1 ; All of these ands and shifts should be folded into rlwimi's
2 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep and &&
3 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep srawi &&
4 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep srwi &&
5 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep slwi &&
6 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | grep rlwinm | wc -l | grep 8
8 implementation ; Functions:
12 %tmp.1 = and int %a, 268431360 ; <int> [#uses=1]
18 %tmp.1 = and int %a, -268435441 ; <int> [#uses=1]
24 %tmp.2 = shr int %a, ubyte 8 ; <int> [#uses=1]
25 %tmp.3 = and int %tmp.2, 255 ; <int> [#uses=1]
29 uint %test4(uint %a) {
31 %tmp.3 = shr uint %a, ubyte 8 ; <uint> [#uses=1]
32 %tmp.4 = and uint %tmp.3, 255 ; <uint> [#uses=1]
38 %tmp.2 = shl int %a, ubyte 8 ; <int> [#uses=1]
39 %tmp.3 = and int %tmp.2, -8388608 ; <int> [#uses=1]
45 %tmp.1 = and int %a, 65280 ; <int> [#uses=1]
46 %tmp.2 = shr int %tmp.1, ubyte 8 ; <uint> [#uses=1]
50 uint %test7(uint %a) {
52 %tmp.1 = and uint %a, 65280 ; <uint> [#uses=1]
53 %tmp.2 = shr uint %tmp.1, ubyte 8 ; <uint> [#uses=1]
59 %tmp.1 = and int %a, 16711680 ; <int> [#uses=1]
60 %tmp.2 = shl int %tmp.1, ubyte 8 ; <int> [#uses=1]