X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=test%2FCodeGen%2FARM%2Fvsra.ll;h=652410e7c4ec02f268a76f2812226fb27086a3c9;hb=53131629dce542bc38af962a06ddf2ab392429b3;hp=10cefc24a21f7807d469931cfb4f1e9ae13e81b9;hpb=fce288fc9134f0f1055caf0342c023225bd5c379;p=oota-llvm.git diff --git a/test/CodeGen/ARM/vsra.ll b/test/CodeGen/ARM/vsra.ll index 10cefc24a21..652410e7c4e 100644 --- a/test/CodeGen/ARM/vsra.ll +++ b/test/CodeGen/ARM/vsra.ll @@ -1,150 +1,168 @@ -; RUN: llc < %s -march=arm -mattr=+neon > %t -; RUN: grep {vsra\\.s8} %t | count 2 -; RUN: grep {vsra\\.s16} %t | count 2 -; RUN: grep {vsra\\.s32} %t | count 2 -; RUN: grep {vsra\\.s64} %t | count 2 -; RUN: grep {vsra\\.u8} %t | count 2 -; RUN: grep {vsra\\.u16} %t | count 2 -; RUN: grep {vsra\\.u32} %t | count 2 -; RUN: grep {vsra\\.u64} %t | count 2 -; RUN: grep {vrsra\\.s8} %t | count 2 -; RUN: grep {vrsra\\.s16} %t | count 2 -; RUN: grep {vrsra\\.s32} %t | count 2 -; RUN: grep {vrsra\\.s64} %t | count 2 -; RUN: grep {vrsra\\.u8} %t | count 2 -; RUN: grep {vrsra\\.u16} %t | count 2 -; RUN: grep {vrsra\\.u32} %t | count 2 -; RUN: grep {vrsra\\.u64} %t | count 2 +; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s define <8 x i8> @vsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind { +;CHECK-LABEL: vsras8: +;CHECK: vsra.s8 %tmp1 = load <8 x i8>* %A %tmp2 = load <8 x i8>* %B - %tmp3 = ashr <8 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 > - %tmp4 = add <8 x i8> %tmp1, %tmp3 + %tmp3 = ashr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > + %tmp4 = add <8 x i8> %tmp1, %tmp3 ret <8 x i8> %tmp4 } define <4 x i16> @vsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind { +;CHECK-LABEL: vsras16: +;CHECK: vsra.s16 %tmp1 = load <4 x i16>* %A %tmp2 = load <4 x i16>* %B - %tmp3 = ashr <4 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16 > + %tmp3 = ashr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 > %tmp4 = add <4 x i16> %tmp1, %tmp3 ret <4 x i16> %tmp4 } define <2 x i32> @vsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind { +;CHECK-LABEL: vsras32: +;CHECK: vsra.s32 %tmp1 = load <2 x i32>* %A %tmp2 = load <2 x i32>* %B - %tmp3 = ashr <2 x i32> %tmp2, < i32 32, i32 32 > + %tmp3 = ashr <2 x i32> %tmp2, < i32 31, i32 31 > %tmp4 = add <2 x i32> %tmp1, %tmp3 ret <2 x i32> %tmp4 } define <1 x i64> @vsras64(<1 x i64>* %A, <1 x i64>* %B) nounwind { +;CHECK-LABEL: vsras64: +;CHECK: vsra.s64 %tmp1 = load <1 x i64>* %A %tmp2 = load <1 x i64>* %B - %tmp3 = ashr <1 x i64> %tmp2, < i64 64 > + %tmp3 = ashr <1 x i64> %tmp2, < i64 63 > %tmp4 = add <1 x i64> %tmp1, %tmp3 ret <1 x i64> %tmp4 } define <16 x i8> @vsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind { +;CHECK-LABEL: vsraQs8: +;CHECK: vsra.s8 %tmp1 = load <16 x i8>* %A %tmp2 = load <16 x i8>* %B - %tmp3 = ashr <16 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 > + %tmp3 = ashr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > %tmp4 = add <16 x i8> %tmp1, %tmp3 ret <16 x i8> %tmp4 } define <8 x i16> @vsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind { +;CHECK-LABEL: vsraQs16: +;CHECK: vsra.s16 %tmp1 = load <8 x i16>* %A %tmp2 = load <8 x i16>* %B - %tmp3 = ashr <8 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 > + %tmp3 = ashr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 > %tmp4 = add <8 x i16> %tmp1, %tmp3 ret <8 x i16> %tmp4 } define <4 x i32> @vsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind { +;CHECK-LABEL: vsraQs32: +;CHECK: vsra.s32 %tmp1 = load <4 x i32>* %A %tmp2 = load <4 x i32>* %B - %tmp3 = ashr <4 x i32> %tmp2, < i32 32, i32 32, i32 32, i32 32 > + %tmp3 = ashr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 > %tmp4 = add <4 x i32> %tmp1, %tmp3 ret <4 x i32> %tmp4 } define <2 x i64> @vsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind { +;CHECK-LABEL: vsraQs64: +;CHECK: vsra.s64 %tmp1 = load <2 x i64>* %A %tmp2 = load <2 x i64>* %B - %tmp3 = ashr <2 x i64> %tmp2, < i64 64, i64 64 > + %tmp3 = ashr <2 x i64> %tmp2, < i64 63, i64 63 > %tmp4 = add <2 x i64> %tmp1, %tmp3 ret <2 x i64> %tmp4 } define <8 x i8> @vsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind { +;CHECK-LABEL: vsrau8: +;CHECK: vsra.u8 %tmp1 = load <8 x i8>* %A %tmp2 = load <8 x i8>* %B - %tmp3 = lshr <8 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 > + %tmp3 = lshr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > %tmp4 = add <8 x i8> %tmp1, %tmp3 ret <8 x i8> %tmp4 } define <4 x i16> @vsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind { +;CHECK-LABEL: vsrau16: +;CHECK: vsra.u16 %tmp1 = load <4 x i16>* %A %tmp2 = load <4 x i16>* %B - %tmp3 = lshr <4 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16 > + %tmp3 = lshr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 > %tmp4 = add <4 x i16> %tmp1, %tmp3 ret <4 x i16> %tmp4 } define <2 x i32> @vsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind { +;CHECK-LABEL: vsrau32: +;CHECK: vsra.u32 %tmp1 = load <2 x i32>* %A %tmp2 = load <2 x i32>* %B - %tmp3 = lshr <2 x i32> %tmp2, < i32 32, i32 32 > + %tmp3 = lshr <2 x i32> %tmp2, < i32 31, i32 31 > %tmp4 = add <2 x i32> %tmp1, %tmp3 ret <2 x i32> %tmp4 } define <1 x i64> @vsrau64(<1 x i64>* %A, <1 x i64>* %B) nounwind { +;CHECK-LABEL: vsrau64: +;CHECK: vsra.u64 %tmp1 = load <1 x i64>* %A %tmp2 = load <1 x i64>* %B - %tmp3 = lshr <1 x i64> %tmp2, < i64 64 > + %tmp3 = lshr <1 x i64> %tmp2, < i64 63 > %tmp4 = add <1 x i64> %tmp1, %tmp3 ret <1 x i64> %tmp4 } define <16 x i8> @vsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind { +;CHECK-LABEL: vsraQu8: +;CHECK: vsra.u8 %tmp1 = load <16 x i8>* %A %tmp2 = load <16 x i8>* %B - %tmp3 = lshr <16 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 > + %tmp3 = lshr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > %tmp4 = add <16 x i8> %tmp1, %tmp3 ret <16 x i8> %tmp4 } define <8 x i16> @vsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind { +;CHECK-LABEL: vsraQu16: +;CHECK: vsra.u16 %tmp1 = load <8 x i16>* %A %tmp2 = load <8 x i16>* %B - %tmp3 = lshr <8 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 > + %tmp3 = lshr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 > %tmp4 = add <8 x i16> %tmp1, %tmp3 ret <8 x i16> %tmp4 } define <4 x i32> @vsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind { +;CHECK-LABEL: vsraQu32: +;CHECK: vsra.u32 %tmp1 = load <4 x i32>* %A %tmp2 = load <4 x i32>* %B - %tmp3 = lshr <4 x i32> %tmp2, < i32 32, i32 32, i32 32, i32 32 > + %tmp3 = lshr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 > %tmp4 = add <4 x i32> %tmp1, %tmp3 ret <4 x i32> %tmp4 } define <2 x i64> @vsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind { +;CHECK-LABEL: vsraQu64: +;CHECK: vsra.u64 %tmp1 = load <2 x i64>* %A %tmp2 = load <2 x i64>* %B - %tmp3 = lshr <2 x i64> %tmp2, < i64 64, i64 64 > + %tmp3 = lshr <2 x i64> %tmp2, < i64 63, i64 63 > %tmp4 = add <2 x i64> %tmp1, %tmp3 ret <2 x i64> %tmp4 } define <8 x i8> @vrsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind { +;CHECK-LABEL: vrsras8: +;CHECK: vrsra.s8 %tmp1 = load <8 x i8>* %A %tmp2 = load <8 x i8>* %B %tmp3 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >) @@ -153,6 +171,8 @@ define <8 x i8> @vrsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind { } define <4 x i16> @vrsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind { +;CHECK-LABEL: vrsras16: +;CHECK: vrsra.s16 %tmp1 = load <4 x i16>* %A %tmp2 = load <4 x i16>* %B %tmp3 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >) @@ -161,6 +181,8 @@ define <4 x i16> @vrsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind { } define <2 x i32> @vrsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind { +;CHECK-LABEL: vrsras32: +;CHECK: vrsra.s32 %tmp1 = load <2 x i32>* %A %tmp2 = load <2 x i32>* %B %tmp3 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >) @@ -169,6 +191,8 @@ define <2 x i32> @vrsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind { } define <1 x i64> @vrsras64(<1 x i64>* %A, <1 x i64>* %B) nounwind { +;CHECK-LABEL: vrsras64: +;CHECK: vrsra.s64 %tmp1 = load <1 x i64>* %A %tmp2 = load <1 x i64>* %B %tmp3 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp2, <1 x i64> < i64 -64 >) @@ -177,6 +201,8 @@ define <1 x i64> @vrsras64(<1 x i64>* %A, <1 x i64>* %B) nounwind { } define <8 x i8> @vrsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind { +;CHECK-LABEL: vrsrau8: +;CHECK: vrsra.u8 %tmp1 = load <8 x i8>* %A %tmp2 = load <8 x i8>* %B %tmp3 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >) @@ -185,6 +211,8 @@ define <8 x i8> @vrsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind { } define <4 x i16> @vrsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind { +;CHECK-LABEL: vrsrau16: +;CHECK: vrsra.u16 %tmp1 = load <4 x i16>* %A %tmp2 = load <4 x i16>* %B %tmp3 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >) @@ -193,6 +221,8 @@ define <4 x i16> @vrsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind { } define <2 x i32> @vrsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind { +;CHECK-LABEL: vrsrau32: +;CHECK: vrsra.u32 %tmp1 = load <2 x i32>* %A %tmp2 = load <2 x i32>* %B %tmp3 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >) @@ -201,6 +231,8 @@ define <2 x i32> @vrsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind { } define <1 x i64> @vrsrau64(<1 x i64>* %A, <1 x i64>* %B) nounwind { +;CHECK-LABEL: vrsrau64: +;CHECK: vrsra.u64 %tmp1 = load <1 x i64>* %A %tmp2 = load <1 x i64>* %B %tmp3 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp2, <1 x i64> < i64 -64 >) @@ -209,6 +241,8 @@ define <1 x i64> @vrsrau64(<1 x i64>* %A, <1 x i64>* %B) nounwind { } define <16 x i8> @vrsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind { +;CHECK-LABEL: vrsraQs8: +;CHECK: vrsra.s8 %tmp1 = load <16 x i8>* %A %tmp2 = load <16 x i8>* %B %tmp3 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >) @@ -217,6 +251,8 @@ define <16 x i8> @vrsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind { } define <8 x i16> @vrsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind { +;CHECK-LABEL: vrsraQs16: +;CHECK: vrsra.s16 %tmp1 = load <8 x i16>* %A %tmp2 = load <8 x i16>* %B %tmp3 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >) @@ -225,6 +261,8 @@ define <8 x i16> @vrsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind { } define <4 x i32> @vrsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind { +;CHECK-LABEL: vrsraQs32: +;CHECK: vrsra.s32 %tmp1 = load <4 x i32>* %A %tmp2 = load <4 x i32>* %B %tmp3 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >) @@ -233,6 +271,8 @@ define <4 x i32> @vrsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind { } define <2 x i64> @vrsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind { +;CHECK-LABEL: vrsraQs64: +;CHECK: vrsra.s64 %tmp1 = load <2 x i64>* %A %tmp2 = load <2 x i64>* %B %tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >) @@ -241,6 +281,8 @@ define <2 x i64> @vrsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind { } define <16 x i8> @vrsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind { +;CHECK-LABEL: vrsraQu8: +;CHECK: vrsra.u8 %tmp1 = load <16 x i8>* %A %tmp2 = load <16 x i8>* %B %tmp3 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >) @@ -249,6 +291,8 @@ define <16 x i8> @vrsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind { } define <8 x i16> @vrsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind { +;CHECK-LABEL: vrsraQu16: +;CHECK: vrsra.u16 %tmp1 = load <8 x i16>* %A %tmp2 = load <8 x i16>* %B %tmp3 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >) @@ -257,6 +301,8 @@ define <8 x i16> @vrsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind { } define <4 x i32> @vrsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind { +;CHECK-LABEL: vrsraQu32: +;CHECK: vrsra.u32 %tmp1 = load <4 x i32>* %A %tmp2 = load <4 x i32>* %B %tmp3 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >) @@ -265,6 +311,8 @@ define <4 x i32> @vrsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind { } define <2 x i64> @vrsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind { +;CHECK-LABEL: vrsraQu64: +;CHECK: vrsra.u64 %tmp1 = load <2 x i64>* %A %tmp2 = load <2 x i64>* %B %tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >)