From: Kevin Qin Date: Tue, 24 Dec 2013 08:16:06 +0000 (+0000) Subject: [AArch64 NEON] Fix a bug when lowering BUILD_VECTOR. X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=4905226c1c0a1324f4df38acee0ed6bc891ed2d1;p=oota-llvm.git [AArch64 NEON] Fix a bug when lowering BUILD_VECTOR. DAG.getVectorShuffle() doesn't always return a vector_shuffle node. If mask is the exact sequence of it's operand(For example, operand_0 is v8i8, and the mask is 0, 1, 2, 3, 4, 5, 6, 7), it will directly return that operand. So a check is added here. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@197967 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index f72dfe46de7..757eb0b248e 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3957,7 +3957,10 @@ bool AArch64TargetLowering::isKnownShuffleVector(SDValue Op, SelectionDAG &DAG, if (V1.getNode() && NumElts == V0NumElts && V0NumElts == V1.getValueType().getVectorNumElements()) { SDValue Shuffle = DAG.getVectorShuffle(VT, DL, V0, V1, Mask); - Res = LowerVECTOR_SHUFFLE(Shuffle, DAG); + if(Shuffle.getOpcode() != ISD::VECTOR_SHUFFLE) + Res = Shuffle; + else + Res = LowerVECTOR_SHUFFLE(Shuffle, DAG); return true; } else return false; diff --git a/test/CodeGen/AArch64/neon-copy.ll b/test/CodeGen/AArch64/neon-copy.ll index c783c00c714..881a858bc19 100644 --- a/test/CodeGen/AArch64/neon-copy.ll +++ b/test/CodeGen/AArch64/neon-copy.ll @@ -703,4 +703,26 @@ define <4 x i32> @testDUP.v1i32(<1 x i32> %a) { %e = insertelement <4 x i32> %d, i32 %b, i32 2 %f = insertelement <4 x i32> %e, i32 %b, i32 3 ret <4 x i32> %f -} \ No newline at end of file +} + +define <8 x i8> @getl(<16 x i8> %x) #0 { +; CHECK-LABEL: getl: +; CHECK: ret + %vecext = extractelement <16 x i8> %x, i32 0 + %vecinit = insertelement <8 x i8> undef, i8 %vecext, i32 0 + %vecext1 = extractelement <16 x i8> %x, i32 1 + %vecinit2 = insertelement <8 x i8> %vecinit, i8 %vecext1, i32 1 + %vecext3 = extractelement <16 x i8> %x, i32 2 + %vecinit4 = insertelement <8 x i8> %vecinit2, i8 %vecext3, i32 2 + %vecext5 = extractelement <16 x i8> %x, i32 3 + %vecinit6 = insertelement <8 x i8> %vecinit4, i8 %vecext5, i32 3 + %vecext7 = extractelement <16 x i8> %x, i32 4 + %vecinit8 = insertelement <8 x i8> %vecinit6, i8 %vecext7, i32 4 + %vecext9 = extractelement <16 x i8> %x, i32 5 + %vecinit10 = insertelement <8 x i8> %vecinit8, i8 %vecext9, i32 5 + %vecext11 = extractelement <16 x i8> %x, i32 6 + %vecinit12 = insertelement <8 x i8> %vecinit10, i8 %vecext11, i32 6 + %vecext13 = extractelement <16 x i8> %x, i32 7 + %vecinit14 = insertelement <8 x i8> %vecinit12, i8 %vecext13, i32 7 + ret <8 x i8> %vecinit14 +}