From 67b453b0d1ed7ab42a9408a6eb00131e160eb421 Mon Sep 17 00:00:00 2001 From: Bob Wilson Date: Wed, 4 Aug 2010 00:12:08 +0000 Subject: [PATCH] Combine NEON VABD (absolute difference) intrinsics with ADDs to make VABA (absolute difference with accumulate) intrinsics. Radar 8228576. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@110170 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/ARM/ARMISelLowering.cpp | 16 ++++++++++++++++ test/CodeGen/ARM/vaba.ll | 24 ++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 08054cbae32..92fb442ac75 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -4248,12 +4248,28 @@ SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, /// operands. static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI) { + SelectionDAG &DAG = DCI.DAG; + // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { SDValue Result = combineSelectAndUse(N, N0, N1, DCI); if (Result.getNode()) return Result; } + // fold (add (arm_neon_vabd a, b) c) -> (arm_neon_vaba c, a, b) + EVT VT = N->getValueType(0); + if (N0.getOpcode() == ISD::INTRINSIC_WO_CHAIN && VT.isInteger()) { + unsigned IntNo = cast(N0.getOperand(0))->getZExtValue(); + if (IntNo == Intrinsic::arm_neon_vabds) + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), VT, + DAG.getConstant(Intrinsic::arm_neon_vabas, MVT::i32), + N1, N0.getOperand(1), N0.getOperand(2)); + if (IntNo == Intrinsic::arm_neon_vabdu) + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), VT, + DAG.getConstant(Intrinsic::arm_neon_vabau, MVT::i32), + N1, N0.getOperand(1), N0.getOperand(2)); + } + return SDValue(); } diff --git a/test/CodeGen/ARM/vaba.ll b/test/CodeGen/ARM/vaba.ll index e2dca4647bc..e7aa6aecb96 100644 --- a/test/CodeGen/ARM/vaba.ll +++ b/test/CodeGen/ARM/vaba.ll @@ -203,3 +203,27 @@ declare <2 x i64> @llvm.arm.neon.vabals.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) n declare <8 x i16> @llvm.arm.neon.vabalu.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone declare <4 x i32> @llvm.arm.neon.vabalu.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone declare <2 x i64> @llvm.arm.neon.vabalu.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone + +define <8 x i8> @vabd_combine_s8(<8 x i8>* %A, <8 x i8>* %B) nounwind { +;CHECK: vabd_combine_s8: +;CHECK: vaba.s8 + %tmp1 = load <8 x i8>* %A + %tmp2 = load <8 x i8>* %B + %tmp3 = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) + %tmp4 = add <8 x i8> %tmp2, %tmp3 + ret <8 x i8> %tmp4 +} + +define <4 x i16> @vabd_combine_u16(<4 x i16>* %A, <4 x i16>* %B) nounwind { +;CHECK: vabd_combine_u16: +;CHECK: vaba.u16 + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B + %tmp3 = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) + %tmp4 = add <4 x i16> %tmp3, %tmp1 + ret <4 x i16> %tmp4 +} + +declare <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8>, <8 x i8>) nounwind readnone +declare <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone + -- 2.34.1