1 ; Neither of these functions should contain algebraic right shifts
2 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep srawi
4 int %test1(uint %mode.0.i.0) {
5 %tmp.79 = cast uint %mode.0.i.0 to int ; <sbyte> [#uses=1]
6 %tmp.80 = shr int %tmp.79, ubyte 15 ; <int> [#uses=1]
7 %tmp.81 = and int %tmp.80, 24 ; <int> [#uses=1]
11 int %test2(uint %mode.0.i.0) {
12 %tmp.79 = cast uint %mode.0.i.0 to int ; <sbyte> [#uses=1]
13 %tmp.80 = shr int %tmp.79, ubyte 15 ; <int> [#uses=1]
14 %tmp.81 = shr uint %mode.0.i.0, ubyte 16
15 %tmp.82 = cast uint %tmp.81 to int
16 %tmp.83 = and int %tmp.80, %tmp.82 ; <int> [#uses=1]
20 uint %test3(int %specbits.6.1) {
21 %tmp.2540 = shr int %specbits.6.1, ubyte 11 ; <int> [#uses=1]
22 %tmp.2541 = cast int %tmp.2540 to uint ; <uint> [#uses=1]
23 %tmp.2542 = shl uint %tmp.2541, ubyte 13 ; <uint> [#uses=1]
24 %tmp.2543 = and uint %tmp.2542, 8192 ; <uint> [#uses=1]