1 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -O0 | FileCheck %s
2 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu | FileCheck %s
4 @var1_32 = global i32 0
5 @var2_32 = global i32 0
7 @var1_64 = global i64 0
8 @var2_64 = global i64 0
10 define void @logical_32bit() minsize {
11 ; CHECK-LABEL: logical_32bit:
12 %val1 = load i32* @var1_32
13 %val2 = load i32* @var2_32
15 ; First check basic and/bic/or/orn/eor/eon patterns with no shift
16 %neg_val2 = xor i32 -1, %val2
18 %and_noshift = and i32 %val1, %val2
19 ; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
20 store volatile i32 %and_noshift, i32* @var1_32
21 %bic_noshift = and i32 %neg_val2, %val1
22 ; CHECK: bic {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
23 store volatile i32 %bic_noshift, i32* @var1_32
25 %or_noshift = or i32 %val1, %val2
26 ; CHECK: orr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
27 store volatile i32 %or_noshift, i32* @var1_32
28 %orn_noshift = or i32 %neg_val2, %val1
29 ; CHECK: orn {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
30 store volatile i32 %orn_noshift, i32* @var1_32
32 %xor_noshift = xor i32 %val1, %val2
33 ; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
34 store volatile i32 %xor_noshift, i32* @var1_32
35 %xorn_noshift = xor i32 %neg_val2, %val1
36 ; CHECK: eon {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
37 store volatile i32 %xorn_noshift, i32* @var1_32
39 ; Check the maximum shift on each
40 %operand_lsl31 = shl i32 %val2, 31
41 %neg_operand_lsl31 = xor i32 -1, %operand_lsl31
43 %and_lsl31 = and i32 %val1, %operand_lsl31
44 ; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
45 store volatile i32 %and_lsl31, i32* @var1_32
46 %bic_lsl31 = and i32 %val1, %neg_operand_lsl31
47 ; CHECK: bic {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
48 store volatile i32 %bic_lsl31, i32* @var1_32
50 %or_lsl31 = or i32 %val1, %operand_lsl31
51 ; CHECK: orr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
52 store volatile i32 %or_lsl31, i32* @var1_32
53 %orn_lsl31 = or i32 %val1, %neg_operand_lsl31
54 ; CHECK: orn {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
55 store volatile i32 %orn_lsl31, i32* @var1_32
57 %xor_lsl31 = xor i32 %val1, %operand_lsl31
58 ; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
59 store volatile i32 %xor_lsl31, i32* @var1_32
60 %xorn_lsl31 = xor i32 %val1, %neg_operand_lsl31
61 ; CHECK: eon {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
62 store volatile i32 %xorn_lsl31, i32* @var1_32
64 ; Check other shifts on a subset
65 %operand_asr10 = ashr i32 %val2, 10
66 %neg_operand_asr10 = xor i32 -1, %operand_asr10
68 %bic_asr10 = and i32 %val1, %neg_operand_asr10
69 ; CHECK: bic {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #10
70 store volatile i32 %bic_asr10, i32* @var1_32
71 %xor_asr10 = xor i32 %val1, %operand_asr10
72 ; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #10
73 store volatile i32 %xor_asr10, i32* @var1_32
75 %operand_lsr1 = lshr i32 %val2, 1
76 %neg_operand_lsr1 = xor i32 -1, %operand_lsr1
78 %orn_lsr1 = or i32 %val1, %neg_operand_lsr1
79 ; CHECK: orn {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #1
80 store volatile i32 %orn_lsr1, i32* @var1_32
81 %xor_lsr1 = xor i32 %val1, %operand_lsr1
82 ; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #1
83 store volatile i32 %xor_lsr1, i32* @var1_32
85 %operand_ror20_big = shl i32 %val2, 12
86 %operand_ror20_small = lshr i32 %val2, 20
87 %operand_ror20 = or i32 %operand_ror20_big, %operand_ror20_small
88 %neg_operand_ror20 = xor i32 -1, %operand_ror20
90 %xorn_ror20 = xor i32 %val1, %neg_operand_ror20
91 ; CHECK: eon {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, ror #20
92 store volatile i32 %xorn_ror20, i32* @var1_32
93 %and_ror20 = and i32 %val1, %operand_ror20
94 ; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, ror #20
95 store volatile i32 %and_ror20, i32* @var1_32
100 define void @logical_64bit() minsize {
101 ; CHECK-LABEL: logical_64bit:
102 %val1 = load i64* @var1_64
103 %val2 = load i64* @var2_64
105 ; First check basic and/bic/or/orn/eor/eon patterns with no shift
106 %neg_val2 = xor i64 -1, %val2
108 %and_noshift = and i64 %val1, %val2
109 ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
110 store volatile i64 %and_noshift, i64* @var1_64
111 %bic_noshift = and i64 %neg_val2, %val1
112 ; CHECK: bic {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
113 store volatile i64 %bic_noshift, i64* @var1_64
115 %or_noshift = or i64 %val1, %val2
116 ; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
117 store volatile i64 %or_noshift, i64* @var1_64
118 %orn_noshift = or i64 %neg_val2, %val1
119 ; CHECK: orn {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
120 store volatile i64 %orn_noshift, i64* @var1_64
122 %xor_noshift = xor i64 %val1, %val2
123 ; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
124 store volatile i64 %xor_noshift, i64* @var1_64
125 %xorn_noshift = xor i64 %neg_val2, %val1
126 ; CHECK: eon {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
127 store volatile i64 %xorn_noshift, i64* @var1_64
129 ; Check the maximum shift on each
130 %operand_lsl63 = shl i64 %val2, 63
131 %neg_operand_lsl63 = xor i64 -1, %operand_lsl63
133 %and_lsl63 = and i64 %val1, %operand_lsl63
134 ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
135 store volatile i64 %and_lsl63, i64* @var1_64
136 %bic_lsl63 = and i64 %val1, %neg_operand_lsl63
137 ; CHECK: bic {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
138 store volatile i64 %bic_lsl63, i64* @var1_64
140 %or_lsl63 = or i64 %val1, %operand_lsl63
141 ; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
142 store volatile i64 %or_lsl63, i64* @var1_64
143 %orn_lsl63 = or i64 %val1, %neg_operand_lsl63
144 ; CHECK: orn {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
145 store volatile i64 %orn_lsl63, i64* @var1_64
147 %xor_lsl63 = xor i64 %val1, %operand_lsl63
148 ; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
149 store volatile i64 %xor_lsl63, i64* @var1_64
150 %xorn_lsl63 = xor i64 %val1, %neg_operand_lsl63
151 ; CHECK: eon {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
152 store volatile i64 %xorn_lsl63, i64* @var1_64
154 ; Check other shifts on a subset
155 %operand_asr10 = ashr i64 %val2, 10
156 %neg_operand_asr10 = xor i64 -1, %operand_asr10
158 %bic_asr10 = and i64 %val1, %neg_operand_asr10
159 ; CHECK: bic {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #10
160 store volatile i64 %bic_asr10, i64* @var1_64
161 %xor_asr10 = xor i64 %val1, %operand_asr10
162 ; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #10
163 store volatile i64 %xor_asr10, i64* @var1_64
165 %operand_lsr1 = lshr i64 %val2, 1
166 %neg_operand_lsr1 = xor i64 -1, %operand_lsr1
168 %orn_lsr1 = or i64 %val1, %neg_operand_lsr1
169 ; CHECK: orn {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #1
170 store volatile i64 %orn_lsr1, i64* @var1_64
171 %xor_lsr1 = xor i64 %val1, %operand_lsr1
172 ; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #1
173 store volatile i64 %xor_lsr1, i64* @var1_64
175 ; Construct a rotate-right from a bunch of other logical
176 ; operations. DAGCombiner should ensure we the ROTR during
178 %operand_ror20_big = shl i64 %val2, 44
179 %operand_ror20_small = lshr i64 %val2, 20
180 %operand_ror20 = or i64 %operand_ror20_big, %operand_ror20_small
181 %neg_operand_ror20 = xor i64 -1, %operand_ror20
183 %xorn_ror20 = xor i64 %val1, %neg_operand_ror20
184 ; CHECK: eon {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, ror #20
185 store volatile i64 %xorn_ror20, i64* @var1_64
186 %and_ror20 = and i64 %val1, %operand_ror20
187 ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, ror #20
188 store volatile i64 %and_ror20, i64* @var1_64
193 define void @flag_setting() {
194 ; CHECK-LABEL: flag_setting:
195 %val1 = load i64* @var1_64
196 %val2 = load i64* @var2_64
198 ; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}}
200 %simple_and = and i64 %val1, %val2
201 %tst1 = icmp sgt i64 %simple_and, 0
202 br i1 %tst1, label %ret, label %test2
205 ; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
207 %shifted_op = shl i64 %val2, 63
208 %shifted_and = and i64 %val1, %shifted_op
209 %tst2 = icmp slt i64 %shifted_and, 0
210 br i1 %tst2, label %ret, label %test3
213 ; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}}, asr #12
215 %asr_op = ashr i64 %val2, 12
216 %asr_and = and i64 %asr_op, %val1
217 %tst3 = icmp sgt i64 %asr_and, 0
218 br i1 %tst3, label %ret, label %other_exit
221 store volatile i64 %val1, i64* @var1_64