1 ; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
2 ; RUN: -check-prefix=ALL -check-prefix=GP32 \
3 ; RUN: -check-prefix=M2
4 ; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
5 ; RUN: -check-prefix=ALL -check-prefix=GP32 \
6 ; RUN: -check-prefix=32R1-R2
7 ; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
8 ; RUN: -check-prefix=ALL -check-prefix=GP32 \
9 ; RUN: -check-prefix=32R1-R2
10 ; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
11 ; RUN: -check-prefix=ALL -check-prefix=GP32 \
12 ; RUN: -check-prefix=32R6
13 ; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
14 ; RUN: -check-prefix=ALL -check-prefix=GP64 \
15 ; RUN: -check-prefix=M3
16 ; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
17 ; RUN: -check-prefix=ALL -check-prefix=GP64 \
18 ; RUN: -check-prefix=GP64-NOT-R6
19 ; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
20 ; RUN: -check-prefix=ALL -check-prefix=GP64 \
21 ; RUN: -check-prefix=GP64-NOT-R6
22 ; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
23 ; RUN: -check-prefix=ALL -check-prefix=GP64 \
24 ; RUN: -check-prefix=GP64-NOT-R6
25 ; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
26 ; RUN: -check-prefix=ALL -check-prefix=GP64 \
27 ; RUN: -check-prefix=64R6
29 define signext i1 @lshr_i1(i1 signext %a, i1 signext %b) {
39 define zeroext i8 @lshr_i8(i8 zeroext %a, i8 zeroext %b) {
43 ; ALL: srlv $[[T0:[0-9]+]], $4, $5
44 ; ALL: andi $2, $[[T0]], 255
50 define zeroext i16 @lshr_i16(i16 zeroext %a, i16 zeroext %b) {
52 ; ALL-LABEL: lshr_i16:
54 ; ALL: srlv $[[T0:[0-9]+]], $4, $5
55 ; ALL: andi $2, $[[T0]], 65535
61 define signext i32 @lshr_i32(i32 signext %a, i32 signext %b) {
63 ; ALL-LABEL: lshr_i32:
65 ; ALL: srlv $2, $4, $5
71 define signext i64 @lshr_i64(i64 signext %a, i64 signext %b) {
73 ; ALL-LABEL: lshr_i64:
75 ; M2: srlv $[[T0:[0-9]+]], $4, $7
76 ; M2: andi $[[T1:[0-9]+]], $7, 32
77 ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]]
78 ; M2: move $3, $[[T0]]
79 ; M2: srlv $[[T2:[0-9]+]], $5, $7
80 ; M2: not $[[T3:[0-9]+]], $7
81 ; M2: sll $[[T4:[0-9]+]], $4, 1
82 ; M2: sllv $[[T5:[0-9]+]], $[[T4]], $[[T3]]
83 ; M2: or $3, $[[T3]], $[[T2]]
85 ; M2: bnez $[[T1]], $[[BB1:BB[0-9_]+]]
86 ; M2: addiu $2, $zero, 0
87 ; M2: move $2, $[[T0]]
92 ; 32R1-R2: srlv $[[T0:[0-9]+]], $5, $7
93 ; 32R1-R2: not $[[T1:[0-9]+]], $7
94 ; 32R1-R2: sll $[[T2:[0-9]+]], $4, 1
95 ; 32R1-R2: sllv $[[T3:[0-9]+]], $[[T2]], $[[T1]]
96 ; 32R1-R2: or $3, $[[T3]], $[[T0]]
97 ; 32R1-R2: srlv $[[T4:[0-9]+]], $4, $7
98 ; 32R1-R2: andi $[[T5:[0-9]+]], $7, 32
99 ; 32R1-R2: movn $3, $[[T4]], $[[T5]]
101 ; 32R1-R2: movn $2, $zero, $[[T5]]
103 ; 32R6: srlv $[[T0:[0-9]+]], $5, $7
104 ; 32R6: not $[[T1:[0-9]+]], $7
105 ; 32R6: sll $[[T2:[0-9]+]], $4, 1
106 ; 32R6: sllv $[[T3:[0-9]+]], $[[T2]], $[[T1]]
107 ; 32R6: or $[[T4:[0-9]+]], $[[T3]], $[[T0]]
108 ; 32R6: andi $[[T5:[0-9]+]], $7, 32
109 ; 32R6: seleqz $[[T6:[0-9]+]], $[[T4]], $[[T3]]
110 ; 32R6: srlv $[[T7:[0-9]+]], $4, $7
111 ; 32R6: selnez $[[T8:[0-9]+]], $[[T7]], $[[T5]]
112 ; 32R6: or $3, $[[T8]], $[[T6]]
114 ; 32R6: seleqz $2, $[[T7]], $[[T5]]
116 ; GP64: sll $[[T0:[0-9]+]], $5, 0
117 ; GP64: dsrlv $2, $4, $[[T0]]
123 define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
125 ; ALL-LABEL: lshr_i128:
127 ; GP32: lw $25, %call16(__lshrti3)($gp)
129 ; M3: sll $[[T0:[0-9]+]], $7, 0
130 ; M3: dsrlv $[[T1:[0-9]+]], $4, $[[T0]]
131 ; M3: andi $[[T2:[0-9]+]], $[[T0]], 32
132 ; M3: bnez $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
133 ; M3: move $3, $[[T1]]
134 ; M3: dsrlv $[[T4:[0-9]+]], $5, $[[T0]]
135 ; M3: dsll $[[T5:[0-9]+]], $4, 1
136 ; M3: not $[[T6:[0-9]+]], $[[T0]]
137 ; M3: dsllv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
138 ; M3: or $3, $[[T7]], $[[T4]]
140 ; M3: bnez $[[T3]], $[[BB1:BB[0-9_]+]]
141 ; M3: daddiu $2, $zero, 0
142 ; M3: move $2, $[[T1]]
147 ; GP64-NOT-R6: sll $[[T0:[0-9]+]], $7, 0
148 ; GP64-NOT-R6: dsrlv $[[T1:[0-9]+]], $5, $[[T0]]
149 ; GP64-NOT-R6: dsll $[[T2:[0-9]+]], $4, 1
150 ; GP64-NOT-R6: not $[[T3:[0-9]+]], $[[T0]]
151 ; GP64-NOT-R6: dsllv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
152 ; GP64-NOT-R6: or $3, $[[T4]], $[[T1]]
153 ; GP64-NOT-R6: dsrlv $2, $4, $[[T0]]
154 ; GP64-NOT-R6: andi $[[T5:[0-9]+]], $[[T0]], 32
155 ; GP64-NOT-R6: movn $3, $2, $[[T5]]
156 ; GP64-NOT-R6: jr $ra
157 ; GP64-NOT-R6: movn $2, $zero, $1
159 ; 64R6: sll $[[T0:[0-9]+]], $7, 0
160 ; 64R6: dsrlv $[[T1:[0-9]+]], $5, $[[T0]]
161 ; 64R6: dsll $[[T2:[0-9]+]], $4, 1
162 ; 64R6: not $[[T3:[0-9]+]], $[[T0]]
163 ; 64R6: dsllv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
164 ; 64R6: or $[[T5:[0-9]+]], $[[T4]], $[[T1]]
165 ; 64R6: andi $[[T6:[0-9]+]], $[[T0]], 32
166 ; 64R6: sll $[[T7:[0-9]+]], $[[T6]], 0
167 ; 64R6: seleqz $[[T8:[0-9]+]], $[[T5]], $[[T7]]
168 ; 64R6: dsrlv $[[T9:[0-9]+]], $4, $[[T0]]
169 ; 64R6: selnez $[[T10:[0-9]+]], $[[T9]], $[[T7]]
170 ; 64R6: or $3, $[[T10]], $[[T8]]
172 ; 64R6: seleqz $2, $[[T0]], $[[T7]]
174 %r = lshr i128 %a, %b