-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm
+; RUN: llvm-as < %s | llc -march=arm &&
+; RUN: llvm-as < %s | llc -march=arm | grep rrx | wc -l | grep 1 &&
+; RUN: llvm-as < %s | llc -march=arm | grep __ashldi3 &&
+; RUN: llvm-as < %s | llc -march=arm | grep __ashrdi3 &&
+; RUN: llvm-as < %s | llc -march=arm | grep __lshrdi3 &&
+; RUN: llvm-as < %s | llc -march=arm -enable-thumb
-long %foo0(long %A, ulong %B) {
- %tmp = cast long %A to ulong ; <ulong> [#uses=1]
- %tmp2 = shr ulong %B, ubyte 1 ; <ulong> [#uses=1]
- %tmp3 = sub ulong %tmp, %tmp2 ; <ulong> [#uses=1]
- %tmp3 = cast ulong %tmp3 to long ; <long> [#uses=1]
- ret long %tmp3
+define i64 @f00(i64 %A, i64 %B) {
+ %tmp = bitcast i64 %A to i64
+ %tmp2 = lshr i64 %B, i8 1
+ %tmp3 = sub i64 %tmp, %tmp2
+ ret i64 %tmp3
}
+define i32 @f1(i64 %x, i8 %y) {
+ %a = shl i64 %x, i8 %y
+ %b = trunc i64 %a to i32
+ ret i32 %b
+}
+
+define i32 @f2(i64 %x, i8 %y) {
+ %a = ashr i64 %x, i8 %y
+ %b = trunc i64 %a to i32
+ ret i32 %b
+}
+
+define i32 @f3(i64 %x, i8 %y) {
+ %a = lshr i64 %x, i8 %y
+ %b = trunc i64 %a to i32
+ ret i32 %b
+}
+++ /dev/null
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep __ashldi3 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep __ashrdi3 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep __lshrdi3
-uint %f1(ulong %x, ubyte %y) {
-entry:
- %a = shl ulong %x, ubyte %y
- %b = cast ulong %a to uint
- ret uint %b
-}
-
-uint %f2(long %x, ubyte %y) {
-entry:
- %a = shr long %x, ubyte %y
- %b = cast long %a to uint
- ret uint %b
-}
-
-uint %f3(ulong %x, ubyte %y) {
-entry:
- %a = shr ulong %x, ubyte %y
- %b = cast ulong %a to uint
- ret uint %b
-}