1 ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
3 define <8 x i16> @vshlls8(<8 x i8>* %A) nounwind {
6 %tmp1 = load <8 x i8>, <8 x i8>* %A
7 %sext = sext <8 x i8> %tmp1 to <8 x i16>
8 %shift = shl <8 x i16> %sext, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
12 define <4 x i32> @vshlls16(<4 x i16>* %A) nounwind {
13 ;CHECK-LABEL: vshlls16:
15 %tmp1 = load <4 x i16>, <4 x i16>* %A
16 %sext = sext <4 x i16> %tmp1 to <4 x i32>
17 %shift = shl <4 x i32> %sext, <i32 15, i32 15, i32 15, i32 15>
21 define <2 x i64> @vshlls32(<2 x i32>* %A) nounwind {
22 ;CHECK-LABEL: vshlls32:
24 %tmp1 = load <2 x i32>, <2 x i32>* %A
25 %sext = sext <2 x i32> %tmp1 to <2 x i64>
26 %shift = shl <2 x i64> %sext, <i64 31, i64 31>
30 define <8 x i16> @vshllu8(<8 x i8>* %A) nounwind {
31 ;CHECK-LABEL: vshllu8:
33 %tmp1 = load <8 x i8>, <8 x i8>* %A
34 %zext = zext <8 x i8> %tmp1 to <8 x i16>
35 %shift = shl <8 x i16> %zext, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
39 define <4 x i32> @vshllu16(<4 x i16>* %A) nounwind {
40 ;CHECK-LABEL: vshllu16:
42 %tmp1 = load <4 x i16>, <4 x i16>* %A
43 %zext = zext <4 x i16> %tmp1 to <4 x i32>
44 %shift = shl <4 x i32> %zext, <i32 15, i32 15, i32 15, i32 15>
48 define <2 x i64> @vshllu32(<2 x i32>* %A) nounwind {
49 ;CHECK-LABEL: vshllu32:
51 %tmp1 = load <2 x i32>, <2 x i32>* %A
52 %zext = zext <2 x i32> %tmp1 to <2 x i64>
53 %shift = shl <2 x i64> %zext, <i64 31, i64 31>
57 ; The following tests use the maximum shift count, so the signedness is
58 ; irrelevant. Test both signed and unsigned versions.
59 define <8 x i16> @vshlli8(<8 x i8>* %A) nounwind {
60 ;CHECK-LABEL: vshlli8:
62 %tmp1 = load <8 x i8>, <8 x i8>* %A
63 %sext = sext <8 x i8> %tmp1 to <8 x i16>
64 %shift = shl <8 x i16> %sext, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
68 define <4 x i32> @vshlli16(<4 x i16>* %A) nounwind {
69 ;CHECK-LABEL: vshlli16:
71 %tmp1 = load <4 x i16>, <4 x i16>* %A
72 %zext = zext <4 x i16> %tmp1 to <4 x i32>
73 %shift = shl <4 x i32> %zext, <i32 16, i32 16, i32 16, i32 16>
77 define <2 x i64> @vshlli32(<2 x i32>* %A) nounwind {
78 ;CHECK-LABEL: vshlli32:
80 %tmp1 = load <2 x i32>, <2 x i32>* %A
81 %zext = zext <2 x i32> %tmp1 to <2 x i64>
82 %shift = shl <2 x i64> %zext, <i64 32, i64 32>
86 ; And these have a shift just out of range so separate vmovl and vshl
87 ; instructions are needed.
88 define <8 x i16> @vshllu8_bad(<8 x i8>* %A) nounwind {
89 ; CHECK-LABEL: vshllu8_bad:
92 %tmp1 = load <8 x i8>, <8 x i8>* %A
93 %zext = zext <8 x i8> %tmp1 to <8 x i16>
94 %shift = shl <8 x i16> %zext, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
98 define <4 x i32> @vshlls16_bad(<4 x i16>* %A) nounwind {
99 ; CHECK-LABEL: vshlls16_bad:
102 %tmp1 = load <4 x i16>, <4 x i16>* %A
103 %sext = sext <4 x i16> %tmp1 to <4 x i32>
104 %shift = shl <4 x i32> %sext, <i32 17, i32 17, i32 17, i32 17>
108 define <2 x i64> @vshllu32_bad(<2 x i32>* %A) nounwind {
109 ; CHECK-LABEL: vshllu32_bad:
112 %tmp1 = load <2 x i32>, <2 x i32>* %A
113 %zext = zext <2 x i32> %tmp1 to <2 x i64>
114 %shift = shl <2 x i64> %zext, <i64 33, i64 33>