1 ; All of these ands and shifts should be folded into rlwimi's
2 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 -o %t -f
4 ; RUN: not grep srawi %t
5 ; RUN: not grep srwi %t
6 ; RUN: not grep slwi %t
7 ; RUN: grep rlwinm %t | count 8
9 implementation ; Functions:
13 %tmp.1 = and int %a, 268431360 ; <int> [#uses=1]
19 %tmp.1 = and int %a, -268435441 ; <int> [#uses=1]
25 %tmp.2 = shr int %a, ubyte 8 ; <int> [#uses=1]
26 %tmp.3 = and int %tmp.2, 255 ; <int> [#uses=1]
30 uint %test4(uint %a) {
32 %tmp.3 = shr uint %a, ubyte 8 ; <uint> [#uses=1]
33 %tmp.4 = and uint %tmp.3, 255 ; <uint> [#uses=1]
39 %tmp.2 = shl int %a, ubyte 8 ; <int> [#uses=1]
40 %tmp.3 = and int %tmp.2, -8388608 ; <int> [#uses=1]
46 %tmp.1 = and int %a, 65280 ; <int> [#uses=1]
47 %tmp.2 = shr int %tmp.1, ubyte 8 ; <uint> [#uses=1]
51 uint %test7(uint %a) {
53 %tmp.1 = and uint %a, 65280 ; <uint> [#uses=1]
54 %tmp.2 = shr uint %tmp.1, ubyte 8 ; <uint> [#uses=1]
60 %tmp.1 = and int %a, 16711680 ; <int> [#uses=1]
61 %tmp.2 = shl int %tmp.1, ubyte 8 ; <int> [#uses=1]