1 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "arm-isel"
17 #include "ARMAddressingModes.h"
18 #include "ARMCallingConv.h"
19 #include "ARMConstantPoolValue.h"
20 #include "ARMISelLowering.h"
21 #include "ARMMachineFunctionInfo.h"
22 #include "ARMPerfectShuffle.h"
23 #include "ARMRegisterInfo.h"
24 #include "ARMSubtarget.h"
25 #include "ARMTargetMachine.h"
26 #include "ARMTargetObjectFile.h"
27 #include "llvm/CallingConv.h"
28 #include "llvm/Constants.h"
29 #include "llvm/Function.h"
30 #include "llvm/GlobalValue.h"
31 #include "llvm/Instruction.h"
32 #include "llvm/Instructions.h"
33 #include "llvm/Intrinsics.h"
34 #include "llvm/Type.h"
35 #include "llvm/CodeGen/CallingConvLower.h"
36 #include "llvm/CodeGen/IntrinsicLowering.h"
37 #include "llvm/CodeGen/MachineBasicBlock.h"
38 #include "llvm/CodeGen/MachineFrameInfo.h"
39 #include "llvm/CodeGen/MachineFunction.h"
40 #include "llvm/CodeGen/MachineInstrBuilder.h"
41 #include "llvm/CodeGen/MachineRegisterInfo.h"
42 #include "llvm/CodeGen/PseudoSourceValue.h"
43 #include "llvm/CodeGen/SelectionDAG.h"
44 #include "llvm/MC/MCSectionMachO.h"
45 #include "llvm/Target/TargetOptions.h"
46 #include "llvm/ADT/VectorExtras.h"
47 #include "llvm/ADT/StringExtras.h"
48 #include "llvm/ADT/Statistic.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Support/raw_ostream.h"
56 STATISTIC(NumTailCalls, "Number of tail calls");
58 // This option should go away when tail calls fully work.
60 EnableARMTailCalls("arm-tail-calls", cl::Hidden,
61 cl::desc("Generate tail calls (TEMPORARY OPTION)."),
65 EnableARMLongCalls("arm-long-calls", cl::Hidden,
66 cl::desc("Generate calls via indirect call instructions"),
70 ARMInterworking("arm-interworking", cl::Hidden,
71 cl::desc("Enable / disable ARM interworking (for debugging only)"),
74 void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
75 EVT PromotedBitwiseVT) {
76 if (VT != PromotedLdStVT) {
77 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
78 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(),
79 PromotedLdStVT.getSimpleVT());
81 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
82 AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
83 PromotedLdStVT.getSimpleVT());
86 EVT ElemTy = VT.getVectorElementType();
87 if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
88 setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom);
89 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
90 if (ElemTy != MVT::i32) {
91 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand);
92 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand);
93 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand);
94 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand);
96 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
97 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
98 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal);
99 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal);
100 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
101 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
102 if (VT.isInteger()) {
103 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
104 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
105 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
106 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand);
107 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand);
108 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
109 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
110 setTruncStoreAction(VT.getSimpleVT(),
111 (MVT::SimpleValueType)InnerVT, Expand);
113 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand);
115 // Promote all bit-wise operations.
116 if (VT.isInteger() && VT != PromotedBitwiseVT) {
117 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote);
118 AddPromotedToType (ISD::AND, VT.getSimpleVT(),
119 PromotedBitwiseVT.getSimpleVT());
120 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote);
121 AddPromotedToType (ISD::OR, VT.getSimpleVT(),
122 PromotedBitwiseVT.getSimpleVT());
123 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote);
124 AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
125 PromotedBitwiseVT.getSimpleVT());
128 // Neon does not support vector divide/remainder operations.
129 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand);
130 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand);
131 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand);
132 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand);
133 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand);
134 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand);
137 void ARMTargetLowering::addDRTypeForNEON(EVT VT) {
138 addRegisterClass(VT, ARM::DPRRegisterClass);
139 addTypeForNEON(VT, MVT::f64, MVT::v2i32);
142 void ARMTargetLowering::addQRTypeForNEON(EVT VT) {
143 addRegisterClass(VT, ARM::QPRRegisterClass);
144 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
147 static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) {
148 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin())
149 return new TargetLoweringObjectFileMachO();
151 return new ARMElfTargetObjectFile();
154 ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
155 : TargetLowering(TM, createTLOF(TM)) {
156 Subtarget = &TM.getSubtarget<ARMSubtarget>();
157 RegInfo = TM.getRegisterInfo();
158 Itins = TM.getInstrItineraryData();
160 if (Subtarget->isTargetDarwin()) {
161 // Uses VFP for Thumb libfuncs if available.
162 if (Subtarget->isThumb() && Subtarget->hasVFP2()) {
163 // Single-precision floating-point arithmetic.
164 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
165 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
166 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
167 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
169 // Double-precision floating-point arithmetic.
170 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
171 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
172 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
173 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
175 // Single-precision comparisons.
176 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
177 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
178 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
179 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
180 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
181 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
182 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp");
183 setLibcallName(RTLIB::O_F32, "__unordsf2vfp");
185 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
186 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
187 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
188 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
189 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
190 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
191 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE);
192 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ);
194 // Double-precision comparisons.
195 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
196 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
197 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
198 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
199 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
200 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
201 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp");
202 setLibcallName(RTLIB::O_F64, "__unorddf2vfp");
204 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
205 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
206 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
207 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
208 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
209 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
210 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE);
211 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ);
213 // Floating-point to integer conversions.
214 // i64 conversions are done via library routines even when generating VFP
215 // instructions, so use the same ones.
216 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
217 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
218 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
219 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
221 // Conversions between floating types.
222 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
223 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp");
225 // Integer to floating-point conversions.
226 // i64 conversions are done via library routines even when generating VFP
227 // instructions, so use the same ones.
228 // FIXME: There appears to be some naming inconsistency in ARM libgcc:
229 // e.g., __floatunsidf vs. __floatunssidfvfp.
230 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
231 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
232 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
233 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
237 // These libcalls are not available in 32-bit.
238 setLibcallName(RTLIB::SHL_I128, 0);
239 setLibcallName(RTLIB::SRL_I128, 0);
240 setLibcallName(RTLIB::SRA_I128, 0);
242 if (Subtarget->isAAPCS_ABI()) {
243 // Double-precision floating-point arithmetic helper functions
244 // RTABI chapter 4.1.2, Table 2
245 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd");
246 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv");
247 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul");
248 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub");
249 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS);
250 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS);
251 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS);
252 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS);
254 // Double-precision floating-point comparison helper functions
255 // RTABI chapter 4.1.2, Table 3
256 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq");
257 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
258 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq");
259 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ);
260 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt");
261 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
262 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple");
263 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
264 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge");
265 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
266 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt");
267 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
268 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun");
269 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE);
270 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun");
271 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ);
272 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS);
273 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS);
274 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS);
275 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS);
276 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS);
277 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS);
278 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS);
279 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS);
281 // Single-precision floating-point arithmetic helper functions
282 // RTABI chapter 4.1.2, Table 4
283 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd");
284 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv");
285 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul");
286 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub");
287 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS);
288 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS);
289 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS);
290 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS);
292 // Single-precision floating-point comparison helper functions
293 // RTABI chapter 4.1.2, Table 5
294 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq");
295 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
296 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq");
297 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ);
298 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt");
299 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
300 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple");
301 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
302 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge");
303 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
304 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt");
305 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
306 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun");
307 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE);
308 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun");
309 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ);
310 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS);
311 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS);
312 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS);
313 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS);
314 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS);
315 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS);
316 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS);
317 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS);
319 // Floating-point to integer conversions.
320 // RTABI chapter 4.1.2, Table 6
321 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz");
322 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz");
323 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz");
324 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz");
325 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz");
326 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz");
327 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz");
328 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz");
329 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS);
330 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS);
331 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS);
332 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS);
333 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS);
334 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS);
335 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS);
336 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS);
338 // Conversions between floating types.
339 // RTABI chapter 4.1.2, Table 7
340 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f");
341 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d");
342 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS);
343 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS);
345 // Integer to floating-point conversions.
346 // RTABI chapter 4.1.2, Table 8
347 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d");
348 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d");
349 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d");
350 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d");
351 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f");
352 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f");
353 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f");
354 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f");
355 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS);
356 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS);
357 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS);
358 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS);
359 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS);
360 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS);
361 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS);
362 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS);
364 // Long long helper functions
365 // RTABI chapter 4.2, Table 9
366 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul");
367 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod");
368 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod");
369 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl");
370 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr");
371 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr");
372 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS);
373 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS);
374 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS);
375 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS);
376 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS);
377 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS);
379 // Integer division functions
380 // RTABI chapter 4.3.1
381 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv");
382 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv");
383 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv");
384 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv");
385 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv");
386 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv");
387 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS);
388 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS);
389 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS);
390 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS);
391 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS);
392 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS);
395 if (Subtarget->isThumb1Only())
396 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
398 addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
399 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
400 addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
401 if (!Subtarget->isFPOnlySP())
402 addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
404 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
407 if (Subtarget->hasNEON()) {
408 addDRTypeForNEON(MVT::v2f32);
409 addDRTypeForNEON(MVT::v8i8);
410 addDRTypeForNEON(MVT::v4i16);
411 addDRTypeForNEON(MVT::v2i32);
412 addDRTypeForNEON(MVT::v1i64);
414 addQRTypeForNEON(MVT::v4f32);
415 addQRTypeForNEON(MVT::v2f64);
416 addQRTypeForNEON(MVT::v16i8);
417 addQRTypeForNEON(MVT::v8i16);
418 addQRTypeForNEON(MVT::v4i32);
419 addQRTypeForNEON(MVT::v2i64);
421 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
422 // neither Neon nor VFP support any arithmetic operations on it.
423 setOperationAction(ISD::FADD, MVT::v2f64, Expand);
424 setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
425 setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
426 setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
427 setOperationAction(ISD::FREM, MVT::v2f64, Expand);
428 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
429 setOperationAction(ISD::VSETCC, MVT::v2f64, Expand);
430 setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
431 setOperationAction(ISD::FABS, MVT::v2f64, Expand);
432 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
433 setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
434 setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
435 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
436 setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
437 setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
438 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
439 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
440 setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
441 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
442 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
443 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
444 setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
445 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
446 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
448 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
450 // Neon does not support some operations on v1i64 and v2i64 types.
451 setOperationAction(ISD::MUL, MVT::v1i64, Expand);
452 // Custom handling for some quad-vector types to detect VMULL.
453 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
454 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
455 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
456 setOperationAction(ISD::VSETCC, MVT::v1i64, Expand);
457 setOperationAction(ISD::VSETCC, MVT::v2i64, Expand);
459 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
460 setTargetDAGCombine(ISD::SHL);
461 setTargetDAGCombine(ISD::SRL);
462 setTargetDAGCombine(ISD::SRA);
463 setTargetDAGCombine(ISD::SIGN_EXTEND);
464 setTargetDAGCombine(ISD::ZERO_EXTEND);
465 setTargetDAGCombine(ISD::ANY_EXTEND);
466 setTargetDAGCombine(ISD::SELECT_CC);
467 setTargetDAGCombine(ISD::BUILD_VECTOR);
468 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
469 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
470 setTargetDAGCombine(ISD::STORE);
473 computeRegisterProperties();
475 // ARM does not have f32 extending load.
476 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
478 // ARM does not have i1 sign extending load.
479 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
481 // ARM supports all 4 flavors of integer indexed load / store.
482 if (!Subtarget->isThumb1Only()) {
483 for (unsigned im = (unsigned)ISD::PRE_INC;
484 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
485 setIndexedLoadAction(im, MVT::i1, Legal);
486 setIndexedLoadAction(im, MVT::i8, Legal);
487 setIndexedLoadAction(im, MVT::i16, Legal);
488 setIndexedLoadAction(im, MVT::i32, Legal);
489 setIndexedStoreAction(im, MVT::i1, Legal);
490 setIndexedStoreAction(im, MVT::i8, Legal);
491 setIndexedStoreAction(im, MVT::i16, Legal);
492 setIndexedStoreAction(im, MVT::i32, Legal);
496 // i64 operation support.
497 if (Subtarget->isThumb1Only()) {
498 setOperationAction(ISD::MUL, MVT::i64, Expand);
499 setOperationAction(ISD::MULHU, MVT::i32, Expand);
500 setOperationAction(ISD::MULHS, MVT::i32, Expand);
501 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
502 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
504 setOperationAction(ISD::MUL, MVT::i64, Expand);
505 setOperationAction(ISD::MULHU, MVT::i32, Expand);
506 if (!Subtarget->hasV6Ops())
507 setOperationAction(ISD::MULHS, MVT::i32, Expand);
509 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
510 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
511 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
512 setOperationAction(ISD::SRL, MVT::i64, Custom);
513 setOperationAction(ISD::SRA, MVT::i64, Custom);
515 // ARM does not have ROTL.
516 setOperationAction(ISD::ROTL, MVT::i32, Expand);
517 setOperationAction(ISD::CTTZ, MVT::i32, Custom);
518 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
519 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
520 setOperationAction(ISD::CTLZ, MVT::i32, Expand);
522 // Only ARMv6 has BSWAP.
523 if (!Subtarget->hasV6Ops())
524 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
526 // These are expanded into libcalls.
527 if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) {
528 // v7M has a hardware divider
529 setOperationAction(ISD::SDIV, MVT::i32, Expand);
530 setOperationAction(ISD::UDIV, MVT::i32, Expand);
532 setOperationAction(ISD::SREM, MVT::i32, Expand);
533 setOperationAction(ISD::UREM, MVT::i32, Expand);
534 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
535 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
537 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
538 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
539 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
540 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
541 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
543 setOperationAction(ISD::TRAP, MVT::Other, Legal);
545 // Use the default implementation.
546 setOperationAction(ISD::VASTART, MVT::Other, Custom);
547 setOperationAction(ISD::VAARG, MVT::Other, Expand);
548 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
549 setOperationAction(ISD::VAEND, MVT::Other, Expand);
550 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
551 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
552 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
553 // FIXME: Shouldn't need this, since no register is used, but the legalizer
554 // doesn't yet know how to not do that for SjLj.
555 setExceptionSelectorRegister(ARM::R0);
556 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
557 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
558 // the default expansion.
559 if (Subtarget->hasDataBarrier() ||
560 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) {
561 // membarrier needs custom lowering; the rest are legal and handled
563 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
565 // Set them all for expansion, which will force libcalls.
566 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
567 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Expand);
568 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Expand);
569 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand);
570 setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, Expand);
571 setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, Expand);
572 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand);
573 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, Expand);
574 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, Expand);
575 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand);
576 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Expand);
577 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Expand);
578 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand);
579 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i8, Expand);
580 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i16, Expand);
581 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand);
582 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, Expand);
583 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, Expand);
584 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand);
585 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, Expand);
586 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, Expand);
587 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand);
588 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i8, Expand);
589 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i16, Expand);
590 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
591 // Since the libcalls include locking, fold in the fences
592 setShouldFoldAtomicFences(true);
594 // 64-bit versions are always libcalls (for now)
595 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Expand);
596 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand);
597 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Expand);
598 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Expand);
599 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Expand);
600 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Expand);
601 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Expand);
602 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Expand);
604 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
606 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
607 if (!Subtarget->hasV6Ops()) {
608 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
609 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
611 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
613 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
614 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
615 // iff target supports vfp2.
616 setOperationAction(ISD::BITCAST, MVT::i64, Custom);
617 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
620 // We want to custom lower some of our intrinsics.
621 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
622 if (Subtarget->isTargetDarwin()) {
623 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
624 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
625 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom);
628 setOperationAction(ISD::SETCC, MVT::i32, Expand);
629 setOperationAction(ISD::SETCC, MVT::f32, Expand);
630 setOperationAction(ISD::SETCC, MVT::f64, Expand);
631 setOperationAction(ISD::SELECT, MVT::i32, Custom);
632 setOperationAction(ISD::SELECT, MVT::f32, Custom);
633 setOperationAction(ISD::SELECT, MVT::f64, Custom);
634 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
635 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
636 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
638 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
639 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
640 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
641 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
642 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
644 // We don't support sin/cos/fmod/copysign/pow
645 setOperationAction(ISD::FSIN, MVT::f64, Expand);
646 setOperationAction(ISD::FSIN, MVT::f32, Expand);
647 setOperationAction(ISD::FCOS, MVT::f32, Expand);
648 setOperationAction(ISD::FCOS, MVT::f64, Expand);
649 setOperationAction(ISD::FREM, MVT::f64, Expand);
650 setOperationAction(ISD::FREM, MVT::f32, Expand);
651 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
652 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
653 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
655 setOperationAction(ISD::FPOW, MVT::f64, Expand);
656 setOperationAction(ISD::FPOW, MVT::f32, Expand);
658 // Various VFP goodness
659 if (!UseSoftFloat && !Subtarget->isThumb1Only()) {
660 // int <-> fp are custom expanded into bit_convert + ARMISD ops.
661 if (Subtarget->hasVFP2()) {
662 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
663 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
664 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
665 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
667 // Special handling for half-precision FP.
668 if (!Subtarget->hasFP16()) {
669 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand);
670 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand);
674 // We have target-specific dag combine patterns for the following nodes:
675 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine
676 setTargetDAGCombine(ISD::ADD);
677 setTargetDAGCombine(ISD::SUB);
678 setTargetDAGCombine(ISD::MUL);
680 if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON())
681 setTargetDAGCombine(ISD::OR);
682 if (Subtarget->hasNEON())
683 setTargetDAGCombine(ISD::AND);
685 setStackPointerRegisterToSaveRestore(ARM::SP);
687 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2())
688 setSchedulingPreference(Sched::RegPressure);
690 setSchedulingPreference(Sched::Hybrid);
692 //// temporary - rewrite interface to use type
693 maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1;
695 // On ARM arguments smaller than 4 bytes are extended, so all arguments
696 // are at least 4 bytes aligned.
697 setMinStackArgumentAlignment(4);
699 benefitFromCodePlacementOpt = true;
702 std::pair<const TargetRegisterClass*, uint8_t>
703 ARMTargetLowering::findRepresentativeClass(EVT VT) const{
704 const TargetRegisterClass *RRC = 0;
706 switch (VT.getSimpleVT().SimpleTy) {
708 return TargetLowering::findRepresentativeClass(VT);
709 // Use DPR as representative register class for all floating point
710 // and vector types. Since there are 32 SPR registers and 32 DPR registers so
711 // the cost is 1 for both f32 and f64.
712 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
713 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
714 RRC = ARM::DPRRegisterClass;
716 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
717 case MVT::v4f32: case MVT::v2f64:
718 RRC = ARM::DPRRegisterClass;
722 RRC = ARM::DPRRegisterClass;
726 RRC = ARM::DPRRegisterClass;
730 return std::make_pair(RRC, Cost);
733 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
736 case ARMISD::Wrapper: return "ARMISD::Wrapper";
737 case ARMISD::WrapperJT: return "ARMISD::WrapperJT";
738 case ARMISD::CALL: return "ARMISD::CALL";
739 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED";
740 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK";
741 case ARMISD::tCALL: return "ARMISD::tCALL";
742 case ARMISD::BRCOND: return "ARMISD::BRCOND";
743 case ARMISD::BR_JT: return "ARMISD::BR_JT";
744 case ARMISD::BR2_JT: return "ARMISD::BR2_JT";
745 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG";
746 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD";
747 case ARMISD::CMP: return "ARMISD::CMP";
748 case ARMISD::CMPZ: return "ARMISD::CMPZ";
749 case ARMISD::CMPFP: return "ARMISD::CMPFP";
750 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0";
751 case ARMISD::BCC_i64: return "ARMISD::BCC_i64";
752 case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
753 case ARMISD::CMOV: return "ARMISD::CMOV";
754 case ARMISD::CNEG: return "ARMISD::CNEG";
756 case ARMISD::RBIT: return "ARMISD::RBIT";
758 case ARMISD::FTOSI: return "ARMISD::FTOSI";
759 case ARMISD::FTOUI: return "ARMISD::FTOUI";
760 case ARMISD::SITOF: return "ARMISD::SITOF";
761 case ARMISD::UITOF: return "ARMISD::UITOF";
763 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG";
764 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG";
765 case ARMISD::RRX: return "ARMISD::RRX";
767 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD";
768 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR";
770 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
771 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
772 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP";
774 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN";
776 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
778 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC";
780 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER";
781 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
783 case ARMISD::PRELOAD: return "ARMISD::PRELOAD";
785 case ARMISD::VCEQ: return "ARMISD::VCEQ";
786 case ARMISD::VCEQZ: return "ARMISD::VCEQZ";
787 case ARMISD::VCGE: return "ARMISD::VCGE";
788 case ARMISD::VCGEZ: return "ARMISD::VCGEZ";
789 case ARMISD::VCLEZ: return "ARMISD::VCLEZ";
790 case ARMISD::VCGEU: return "ARMISD::VCGEU";
791 case ARMISD::VCGT: return "ARMISD::VCGT";
792 case ARMISD::VCGTZ: return "ARMISD::VCGTZ";
793 case ARMISD::VCLTZ: return "ARMISD::VCLTZ";
794 case ARMISD::VCGTU: return "ARMISD::VCGTU";
795 case ARMISD::VTST: return "ARMISD::VTST";
797 case ARMISD::VSHL: return "ARMISD::VSHL";
798 case ARMISD::VSHRs: return "ARMISD::VSHRs";
799 case ARMISD::VSHRu: return "ARMISD::VSHRu";
800 case ARMISD::VSHLLs: return "ARMISD::VSHLLs";
801 case ARMISD::VSHLLu: return "ARMISD::VSHLLu";
802 case ARMISD::VSHLLi: return "ARMISD::VSHLLi";
803 case ARMISD::VSHRN: return "ARMISD::VSHRN";
804 case ARMISD::VRSHRs: return "ARMISD::VRSHRs";
805 case ARMISD::VRSHRu: return "ARMISD::VRSHRu";
806 case ARMISD::VRSHRN: return "ARMISD::VRSHRN";
807 case ARMISD::VQSHLs: return "ARMISD::VQSHLs";
808 case ARMISD::VQSHLu: return "ARMISD::VQSHLu";
809 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu";
810 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs";
811 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu";
812 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu";
813 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs";
814 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu";
815 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu";
816 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu";
817 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs";
818 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM";
819 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM";
820 case ARMISD::VDUP: return "ARMISD::VDUP";
821 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE";
822 case ARMISD::VEXT: return "ARMISD::VEXT";
823 case ARMISD::VREV64: return "ARMISD::VREV64";
824 case ARMISD::VREV32: return "ARMISD::VREV32";
825 case ARMISD::VREV16: return "ARMISD::VREV16";
826 case ARMISD::VZIP: return "ARMISD::VZIP";
827 case ARMISD::VUZP: return "ARMISD::VUZP";
828 case ARMISD::VTRN: return "ARMISD::VTRN";
829 case ARMISD::VMULLs: return "ARMISD::VMULLs";
830 case ARMISD::VMULLu: return "ARMISD::VMULLu";
831 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR";
832 case ARMISD::FMAX: return "ARMISD::FMAX";
833 case ARMISD::FMIN: return "ARMISD::FMIN";
834 case ARMISD::BFI: return "ARMISD::BFI";
835 case ARMISD::VORRIMM: return "ARMISD::VORRIMM";
836 case ARMISD::VBICIMM: return "ARMISD::VBICIMM";
837 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP";
838 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP";
839 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP";
843 /// getRegClassFor - Return the register class that should be used for the
844 /// specified value type.
845 TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const {
846 // Map v4i64 to QQ registers but do not make the type legal. Similarly map
847 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
848 // load / store 4 to 8 consecutive D registers.
849 if (Subtarget->hasNEON()) {
850 if (VT == MVT::v4i64)
851 return ARM::QQPRRegisterClass;
852 else if (VT == MVT::v8i64)
853 return ARM::QQQQPRRegisterClass;
855 return TargetLowering::getRegClassFor(VT);
858 // Create a fast isel object.
860 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const {
861 return ARM::createFastISel(funcInfo);
864 /// getFunctionAlignment - Return the Log2 alignment of this function.
865 unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const {
866 return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 1 : 2;
869 /// getMaximalGlobalOffset - Returns the maximal possible offset which can
870 /// be used for loads / stores from the global.
871 unsigned ARMTargetLowering::getMaximalGlobalOffset() const {
872 return (Subtarget->isThumb1Only() ? 127 : 4095);
875 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
876 unsigned NumVals = N->getNumValues();
878 return Sched::RegPressure;
880 for (unsigned i = 0; i != NumVals; ++i) {
881 EVT VT = N->getValueType(i);
882 if (VT == MVT::Glue || VT == MVT::Other)
884 if (VT.isFloatingPoint() || VT.isVector())
885 return Sched::Latency;
888 if (!N->isMachineOpcode())
889 return Sched::RegPressure;
891 // Load are scheduled for latency even if there instruction itinerary
893 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
894 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
896 if (TID.getNumDefs() == 0)
897 return Sched::RegPressure;
898 if (!Itins->isEmpty() &&
899 Itins->getOperandCycle(TID.getSchedClass(), 0) > 2)
900 return Sched::Latency;
902 return Sched::RegPressure;
905 // FIXME: Move to RegInfo
907 ARMTargetLowering::getRegPressureLimit(const TargetRegisterClass *RC,
908 MachineFunction &MF) const {
909 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
911 switch (RC->getID()) {
914 case ARM::tGPRRegClassID:
915 return TFI->hasFP(MF) ? 4 : 5;
916 case ARM::GPRRegClassID: {
917 unsigned FP = TFI->hasFP(MF) ? 1 : 0;
918 return 10 - FP - (Subtarget->isR9Reserved() ? 1 : 0);
920 case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
921 case ARM::DPRRegClassID:
926 //===----------------------------------------------------------------------===//
928 //===----------------------------------------------------------------------===//
930 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
931 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
933 default: llvm_unreachable("Unknown condition code!");
934 case ISD::SETNE: return ARMCC::NE;
935 case ISD::SETEQ: return ARMCC::EQ;
936 case ISD::SETGT: return ARMCC::GT;
937 case ISD::SETGE: return ARMCC::GE;
938 case ISD::SETLT: return ARMCC::LT;
939 case ISD::SETLE: return ARMCC::LE;
940 case ISD::SETUGT: return ARMCC::HI;
941 case ISD::SETUGE: return ARMCC::HS;
942 case ISD::SETULT: return ARMCC::LO;
943 case ISD::SETULE: return ARMCC::LS;
947 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
948 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
949 ARMCC::CondCodes &CondCode2) {
950 CondCode2 = ARMCC::AL;
952 default: llvm_unreachable("Unknown FP condition!");
954 case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
956 case ISD::SETOGT: CondCode = ARMCC::GT; break;
958 case ISD::SETOGE: CondCode = ARMCC::GE; break;
959 case ISD::SETOLT: CondCode = ARMCC::MI; break;
960 case ISD::SETOLE: CondCode = ARMCC::LS; break;
961 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
962 case ISD::SETO: CondCode = ARMCC::VC; break;
963 case ISD::SETUO: CondCode = ARMCC::VS; break;
964 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
965 case ISD::SETUGT: CondCode = ARMCC::HI; break;
966 case ISD::SETUGE: CondCode = ARMCC::PL; break;
968 case ISD::SETULT: CondCode = ARMCC::LT; break;
970 case ISD::SETULE: CondCode = ARMCC::LE; break;
972 case ISD::SETUNE: CondCode = ARMCC::NE; break;
976 //===----------------------------------------------------------------------===//
977 // Calling Convention Implementation
978 //===----------------------------------------------------------------------===//
980 #include "ARMGenCallingConv.inc"
982 /// CCAssignFnForNode - Selects the correct CCAssignFn for a the
983 /// given CallingConvention value.
984 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
986 bool isVarArg) const {
989 llvm_unreachable("Unsupported calling convention");
990 case CallingConv::Fast:
991 if (Subtarget->hasVFP2() && !isVarArg) {
992 if (!Subtarget->isAAPCS_ABI())
993 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
994 // For AAPCS ABI targets, just use VFP variant of the calling convention.
995 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
998 case CallingConv::C: {
999 // Use target triple & subtarget features to do actual dispatch.
1000 if (!Subtarget->isAAPCS_ABI())
1001 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1002 else if (Subtarget->hasVFP2() &&
1003 FloatABIType == FloatABI::Hard && !isVarArg)
1004 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1005 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1007 case CallingConv::ARM_AAPCS_VFP:
1008 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1009 case CallingConv::ARM_AAPCS:
1010 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1011 case CallingConv::ARM_APCS:
1012 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1016 /// LowerCallResult - Lower the result values of a call into the
1017 /// appropriate copies out of appropriate physical registers.
1019 ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1020 CallingConv::ID CallConv, bool isVarArg,
1021 const SmallVectorImpl<ISD::InputArg> &Ins,
1022 DebugLoc dl, SelectionDAG &DAG,
1023 SmallVectorImpl<SDValue> &InVals) const {
1025 // Assign locations to each value returned by this call.
1026 SmallVector<CCValAssign, 16> RVLocs;
1027 CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
1028 RVLocs, *DAG.getContext());
1029 CCInfo.AnalyzeCallResult(Ins,
1030 CCAssignFnForNode(CallConv, /* Return*/ true,
1033 // Copy all of the result registers out of their specified physreg.
1034 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1035 CCValAssign VA = RVLocs[i];
1038 if (VA.needsCustom()) {
1039 // Handle f64 or half of a v2f64.
1040 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1042 Chain = Lo.getValue(1);
1043 InFlag = Lo.getValue(2);
1044 VA = RVLocs[++i]; // skip ahead to next loc
1045 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1047 Chain = Hi.getValue(1);
1048 InFlag = Hi.getValue(2);
1049 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1051 if (VA.getLocVT() == MVT::v2f64) {
1052 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
1053 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1054 DAG.getConstant(0, MVT::i32));
1056 VA = RVLocs[++i]; // skip ahead to next loc
1057 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1058 Chain = Lo.getValue(1);
1059 InFlag = Lo.getValue(2);
1060 VA = RVLocs[++i]; // skip ahead to next loc
1061 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1062 Chain = Hi.getValue(1);
1063 InFlag = Hi.getValue(2);
1064 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1065 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1066 DAG.getConstant(1, MVT::i32));
1069 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1071 Chain = Val.getValue(1);
1072 InFlag = Val.getValue(2);
1075 switch (VA.getLocInfo()) {
1076 default: llvm_unreachable("Unknown loc info!");
1077 case CCValAssign::Full: break;
1078 case CCValAssign::BCvt:
1079 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1083 InVals.push_back(Val);
1089 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
1090 /// by "Src" to address "Dst" of size "Size". Alignment information is
1091 /// specified by the specific parameter attribute. The copy will be passed as
1092 /// a byval function parameter.
1093 /// Sometimes what we are copying is the end of a larger object, the part that
1094 /// does not fit in registers.
1096 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
1097 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1099 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
1100 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
1101 /*isVolatile=*/false, /*AlwaysInline=*/false,
1102 MachinePointerInfo(0), MachinePointerInfo(0));
1105 /// LowerMemOpCallTo - Store the argument to the stack.
1107 ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
1108 SDValue StackPtr, SDValue Arg,
1109 DebugLoc dl, SelectionDAG &DAG,
1110 const CCValAssign &VA,
1111 ISD::ArgFlagsTy Flags) const {
1112 unsigned LocMemOffset = VA.getLocMemOffset();
1113 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
1114 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
1115 if (Flags.isByVal())
1116 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
1118 return DAG.getStore(Chain, dl, Arg, PtrOff,
1119 MachinePointerInfo::getStack(LocMemOffset),
1123 void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
1124 SDValue Chain, SDValue &Arg,
1125 RegsToPassVector &RegsToPass,
1126 CCValAssign &VA, CCValAssign &NextVA,
1128 SmallVector<SDValue, 8> &MemOpChains,
1129 ISD::ArgFlagsTy Flags) const {
1131 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
1132 DAG.getVTList(MVT::i32, MVT::i32), Arg);
1133 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd));
1135 if (NextVA.isRegLoc())
1136 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1)));
1138 assert(NextVA.isMemLoc());
1139 if (StackPtr.getNode() == 0)
1140 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
1142 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1),
1148 /// LowerCall - Lowering a call into a callseq_start <-
1149 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
1152 ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1153 CallingConv::ID CallConv, bool isVarArg,
1155 const SmallVectorImpl<ISD::OutputArg> &Outs,
1156 const SmallVectorImpl<SDValue> &OutVals,
1157 const SmallVectorImpl<ISD::InputArg> &Ins,
1158 DebugLoc dl, SelectionDAG &DAG,
1159 SmallVectorImpl<SDValue> &InVals) const {
1160 MachineFunction &MF = DAG.getMachineFunction();
1161 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
1162 bool IsSibCall = false;
1163 // Temporarily disable tail calls so things don't break.
1164 if (!EnableARMTailCalls)
1167 // Check if it's really possible to do a tail call.
1168 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1169 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
1170 Outs, OutVals, Ins, DAG);
1171 // We don't support GuaranteedTailCallOpt for ARM, only automatically
1172 // detected sibcalls.
1179 // Analyze operands of the call, assigning locations to each operand.
1180 SmallVector<CCValAssign, 16> ArgLocs;
1181 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
1183 CCInfo.AnalyzeCallOperands(Outs,
1184 CCAssignFnForNode(CallConv, /* Return*/ false,
1187 // Get a count of how many bytes are to be pushed on the stack.
1188 unsigned NumBytes = CCInfo.getNextStackOffset();
1190 // For tail calls, memory operands are available in our caller's stack.
1194 // Adjust the stack pointer for the new arguments...
1195 // These operations are automatically eliminated by the prolog/epilog pass
1197 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
1199 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
1201 RegsToPassVector RegsToPass;
1202 SmallVector<SDValue, 8> MemOpChains;
1204 // Walk the register/memloc assignments, inserting copies/loads. In the case
1205 // of tail call optimization, arguments are handled later.
1206 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
1208 ++i, ++realArgIdx) {
1209 CCValAssign &VA = ArgLocs[i];
1210 SDValue Arg = OutVals[realArgIdx];
1211 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
1213 // Promote the value if needed.
1214 switch (VA.getLocInfo()) {
1215 default: llvm_unreachable("Unknown loc info!");
1216 case CCValAssign::Full: break;
1217 case CCValAssign::SExt:
1218 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1220 case CCValAssign::ZExt:
1221 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1223 case CCValAssign::AExt:
1224 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1226 case CCValAssign::BCvt:
1227 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1231 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
1232 if (VA.needsCustom()) {
1233 if (VA.getLocVT() == MVT::v2f64) {
1234 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1235 DAG.getConstant(0, MVT::i32));
1236 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1237 DAG.getConstant(1, MVT::i32));
1239 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
1240 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1242 VA = ArgLocs[++i]; // skip ahead to next loc
1243 if (VA.isRegLoc()) {
1244 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
1245 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1247 assert(VA.isMemLoc());
1249 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
1250 dl, DAG, VA, Flags));
1253 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
1254 StackPtr, MemOpChains, Flags);
1256 } else if (VA.isRegLoc()) {
1257 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1258 } else if (!IsSibCall) {
1259 assert(VA.isMemLoc());
1261 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
1262 dl, DAG, VA, Flags));
1266 if (!MemOpChains.empty())
1267 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1268 &MemOpChains[0], MemOpChains.size());
1270 // Build a sequence of copy-to-reg nodes chained together with token chain
1271 // and flag operands which copy the outgoing args into the appropriate regs.
1273 // Tail call byval lowering might overwrite argument registers so in case of
1274 // tail call optimization the copies to registers are lowered later.
1276 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1277 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1278 RegsToPass[i].second, InFlag);
1279 InFlag = Chain.getValue(1);
1282 // For tail calls lower the arguments to the 'real' stack slot.
1284 // Force all the incoming stack arguments to be loaded from the stack
1285 // before any new outgoing arguments are stored to the stack, because the
1286 // outgoing stack slots may alias the incoming argument stack slots, and
1287 // the alias isn't otherwise explicit. This is slightly more conservative
1288 // than necessary, because it means that each store effectively depends
1289 // on every argument instead of just those arguments it would clobber.
1291 // Do not flag preceeding copytoreg stuff together with the following stuff.
1293 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1294 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1295 RegsToPass[i].second, InFlag);
1296 InFlag = Chain.getValue(1);
1301 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1302 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1303 // node so that legalize doesn't hack it.
1304 bool isDirect = false;
1305 bool isARMFunc = false;
1306 bool isLocalARMFunc = false;
1307 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1309 if (EnableARMLongCalls) {
1310 assert (getTargetMachine().getRelocationModel() == Reloc::Static
1311 && "long-calls with non-static relocation model!");
1312 // Handle a global address or an external symbol. If it's not one of
1313 // those, the target's already in a register, so we don't need to do
1315 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1316 const GlobalValue *GV = G->getGlobal();
1317 // Create a constant pool entry for the callee address
1318 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1319 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV,
1322 // Get the address of the callee into a register
1323 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1324 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1325 Callee = DAG.getLoad(getPointerTy(), dl,
1326 DAG.getEntryNode(), CPAddr,
1327 MachinePointerInfo::getConstantPool(),
1329 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
1330 const char *Sym = S->getSymbol();
1332 // Create a constant pool entry for the callee address
1333 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1334 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
1335 Sym, ARMPCLabelIndex, 0);
1336 // Get the address of the callee into a register
1337 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1338 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1339 Callee = DAG.getLoad(getPointerTy(), dl,
1340 DAG.getEntryNode(), CPAddr,
1341 MachinePointerInfo::getConstantPool(),
1344 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1345 const GlobalValue *GV = G->getGlobal();
1347 bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
1348 bool isStub = (isExt && Subtarget->isTargetDarwin()) &&
1349 getTargetMachine().getRelocationModel() != Reloc::Static;
1350 isARMFunc = !Subtarget->isThumb() || isStub;
1351 // ARM call to a local ARM function is predicable.
1352 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking);
1353 // tBX takes a register source operand.
1354 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
1355 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1356 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV,
1359 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1360 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1361 Callee = DAG.getLoad(getPointerTy(), dl,
1362 DAG.getEntryNode(), CPAddr,
1363 MachinePointerInfo::getConstantPool(),
1365 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1366 Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
1367 getPointerTy(), Callee, PICLabel);
1369 // On ELF targets for PIC code, direct calls should go through the PLT
1370 unsigned OpFlags = 0;
1371 if (Subtarget->isTargetELF() &&
1372 getTargetMachine().getRelocationModel() == Reloc::PIC_)
1373 OpFlags = ARMII::MO_PLT;
1374 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
1376 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1378 bool isStub = Subtarget->isTargetDarwin() &&
1379 getTargetMachine().getRelocationModel() != Reloc::Static;
1380 isARMFunc = !Subtarget->isThumb() || isStub;
1381 // tBX takes a register source operand.
1382 const char *Sym = S->getSymbol();
1383 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
1384 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1385 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
1386 Sym, ARMPCLabelIndex, 4);
1387 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1388 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1389 Callee = DAG.getLoad(getPointerTy(), dl,
1390 DAG.getEntryNode(), CPAddr,
1391 MachinePointerInfo::getConstantPool(),
1393 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1394 Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
1395 getPointerTy(), Callee, PICLabel);
1397 unsigned OpFlags = 0;
1398 // On ELF targets for PIC code, direct calls should go through the PLT
1399 if (Subtarget->isTargetELF() &&
1400 getTargetMachine().getRelocationModel() == Reloc::PIC_)
1401 OpFlags = ARMII::MO_PLT;
1402 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags);
1406 // FIXME: handle tail calls differently.
1408 if (Subtarget->isThumb()) {
1409 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
1410 CallOpc = ARMISD::CALL_NOLINK;
1412 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
1414 CallOpc = (isDirect || Subtarget->hasV5TOps())
1415 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL)
1416 : ARMISD::CALL_NOLINK;
1419 std::vector<SDValue> Ops;
1420 Ops.push_back(Chain);
1421 Ops.push_back(Callee);
1423 // Add argument registers to the end of the list so that they are known live
1425 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1426 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1427 RegsToPass[i].second.getValueType()));
1429 if (InFlag.getNode())
1430 Ops.push_back(InFlag);
1432 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1434 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
1436 // Returns a chain and a flag for retval copy to use.
1437 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
1438 InFlag = Chain.getValue(1);
1440 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1441 DAG.getIntPtrConstant(0, true), InFlag);
1443 InFlag = Chain.getValue(1);
1445 // Handle result values, copying them out of physregs into vregs that we
1447 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins,
1451 /// MatchingStackOffset - Return true if the given stack call argument is
1452 /// already available in the same position (relatively) of the caller's
1453 /// incoming argument stack.
1455 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
1456 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
1457 const ARMInstrInfo *TII) {
1458 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
1460 if (Arg.getOpcode() == ISD::CopyFromReg) {
1461 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
1462 if (!TargetRegisterInfo::isVirtualRegister(VR))
1464 MachineInstr *Def = MRI->getVRegDef(VR);
1467 if (!Flags.isByVal()) {
1468 if (!TII->isLoadFromStackSlot(Def, FI))
1473 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
1474 if (Flags.isByVal())
1475 // ByVal argument is passed in as a pointer but it's now being
1476 // dereferenced. e.g.
1477 // define @foo(%struct.X* %A) {
1478 // tail call @bar(%struct.X* byval %A)
1481 SDValue Ptr = Ld->getBasePtr();
1482 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
1485 FI = FINode->getIndex();
1489 assert(FI != INT_MAX);
1490 if (!MFI->isFixedObjectIndex(FI))
1492 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
1495 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
1496 /// for tail call optimization. Targets which want to do tail call
1497 /// optimization should implement this function.
1499 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
1500 CallingConv::ID CalleeCC,
1502 bool isCalleeStructRet,
1503 bool isCallerStructRet,
1504 const SmallVectorImpl<ISD::OutputArg> &Outs,
1505 const SmallVectorImpl<SDValue> &OutVals,
1506 const SmallVectorImpl<ISD::InputArg> &Ins,
1507 SelectionDAG& DAG) const {
1508 const Function *CallerF = DAG.getMachineFunction().getFunction();
1509 CallingConv::ID CallerCC = CallerF->getCallingConv();
1510 bool CCMatch = CallerCC == CalleeCC;
1512 // Look for obvious safe cases to perform tail call optimization that do not
1513 // require ABI changes. This is what gcc calls sibcall.
1515 // Do not sibcall optimize vararg calls unless the call site is not passing
1517 if (isVarArg && !Outs.empty())
1520 // Also avoid sibcall optimization if either caller or callee uses struct
1521 // return semantics.
1522 if (isCalleeStructRet || isCallerStructRet)
1525 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo::
1526 // emitEpilogue is not ready for them.
1527 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take
1528 // LR. This means if we need to reload LR, it takes an extra instructions,
1529 // which outweighs the value of the tail call; but here we don't know yet
1530 // whether LR is going to be used. Probably the right approach is to
1531 // generate the tail call here and turn it back into CALL/RET in
1532 // emitEpilogue if LR is used.
1534 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
1535 // but we need to make sure there are enough registers; the only valid
1536 // registers are the 4 used for parameters. We don't currently do this
1538 if (Subtarget->isThumb1Only())
1541 // If the calling conventions do not match, then we'd better make sure the
1542 // results are returned in the same way as what the caller expects.
1544 SmallVector<CCValAssign, 16> RVLocs1;
1545 CCState CCInfo1(CalleeCC, false, getTargetMachine(),
1546 RVLocs1, *DAG.getContext());
1547 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg));
1549 SmallVector<CCValAssign, 16> RVLocs2;
1550 CCState CCInfo2(CallerCC, false, getTargetMachine(),
1551 RVLocs2, *DAG.getContext());
1552 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg));
1554 if (RVLocs1.size() != RVLocs2.size())
1556 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
1557 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
1559 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
1561 if (RVLocs1[i].isRegLoc()) {
1562 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
1565 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
1571 // If the callee takes no arguments then go on to check the results of the
1573 if (!Outs.empty()) {
1574 // Check if stack adjustment is needed. For now, do not do this if any
1575 // argument is passed on the stack.
1576 SmallVector<CCValAssign, 16> ArgLocs;
1577 CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(),
1578 ArgLocs, *DAG.getContext());
1579 CCInfo.AnalyzeCallOperands(Outs,
1580 CCAssignFnForNode(CalleeCC, false, isVarArg));
1581 if (CCInfo.getNextStackOffset()) {
1582 MachineFunction &MF = DAG.getMachineFunction();
1584 // Check if the arguments are already laid out in the right way as
1585 // the caller's fixed stack objects.
1586 MachineFrameInfo *MFI = MF.getFrameInfo();
1587 const MachineRegisterInfo *MRI = &MF.getRegInfo();
1588 const ARMInstrInfo *TII =
1589 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo();
1590 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
1592 ++i, ++realArgIdx) {
1593 CCValAssign &VA = ArgLocs[i];
1594 EVT RegVT = VA.getLocVT();
1595 SDValue Arg = OutVals[realArgIdx];
1596 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
1597 if (VA.getLocInfo() == CCValAssign::Indirect)
1599 if (VA.needsCustom()) {
1600 // f64 and vector types are split into multiple registers or
1601 // register/stack-slot combinations. The types will not match
1602 // the registers; give up on memory f64 refs until we figure
1603 // out what to do about this.
1606 if (!ArgLocs[++i].isRegLoc())
1608 if (RegVT == MVT::v2f64) {
1609 if (!ArgLocs[++i].isRegLoc())
1611 if (!ArgLocs[++i].isRegLoc())
1614 } else if (!VA.isRegLoc()) {
1615 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
1627 ARMTargetLowering::LowerReturn(SDValue Chain,
1628 CallingConv::ID CallConv, bool isVarArg,
1629 const SmallVectorImpl<ISD::OutputArg> &Outs,
1630 const SmallVectorImpl<SDValue> &OutVals,
1631 DebugLoc dl, SelectionDAG &DAG) const {
1633 // CCValAssign - represent the assignment of the return value to a location.
1634 SmallVector<CCValAssign, 16> RVLocs;
1636 // CCState - Info about the registers and stack slots.
1637 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
1640 // Analyze outgoing return values.
1641 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
1644 // If this is the first return lowered for this function, add
1645 // the regs to the liveout set for the function.
1646 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1647 for (unsigned i = 0; i != RVLocs.size(); ++i)
1648 if (RVLocs[i].isRegLoc())
1649 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1654 // Copy the result values into the output registers.
1655 for (unsigned i = 0, realRVLocIdx = 0;
1657 ++i, ++realRVLocIdx) {
1658 CCValAssign &VA = RVLocs[i];
1659 assert(VA.isRegLoc() && "Can only return in registers!");
1661 SDValue Arg = OutVals[realRVLocIdx];
1663 switch (VA.getLocInfo()) {
1664 default: llvm_unreachable("Unknown loc info!");
1665 case CCValAssign::Full: break;
1666 case CCValAssign::BCvt:
1667 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1671 if (VA.needsCustom()) {
1672 if (VA.getLocVT() == MVT::v2f64) {
1673 // Extract the first half and return it in two registers.
1674 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1675 DAG.getConstant(0, MVT::i32));
1676 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
1677 DAG.getVTList(MVT::i32, MVT::i32), Half);
1679 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag);
1680 Flag = Chain.getValue(1);
1681 VA = RVLocs[++i]; // skip ahead to next loc
1682 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1683 HalfGPRs.getValue(1), Flag);
1684 Flag = Chain.getValue(1);
1685 VA = RVLocs[++i]; // skip ahead to next loc
1687 // Extract the 2nd half and fall through to handle it as an f64 value.
1688 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1689 DAG.getConstant(1, MVT::i32));
1691 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
1693 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
1694 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
1695 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag);
1696 Flag = Chain.getValue(1);
1697 VA = RVLocs[++i]; // skip ahead to next loc
1698 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1),
1701 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
1703 // Guarantee that all emitted copies are
1704 // stuck together, avoiding something bad.
1705 Flag = Chain.getValue(1);
1710 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1712 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain);
1717 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const {
1718 if (N->getNumValues() != 1)
1720 if (!N->hasNUsesOfValue(1, 0))
1723 unsigned NumCopies = 0;
1725 SDNode *Use = *N->use_begin();
1726 if (Use->getOpcode() == ISD::CopyToReg) {
1727 Copies[NumCopies++] = Use;
1728 } else if (Use->getOpcode() == ARMISD::VMOVRRD) {
1729 // f64 returned in a pair of GPRs.
1730 for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end();
1732 if (UI->getOpcode() != ISD::CopyToReg)
1734 Copies[UI.getUse().getResNo()] = *UI;
1737 } else if (Use->getOpcode() == ISD::BITCAST) {
1738 // f32 returned in a single GPR.
1739 if (!Use->hasNUsesOfValue(1, 0))
1741 Use = *Use->use_begin();
1742 if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0))
1744 Copies[NumCopies++] = Use;
1749 if (NumCopies != 1 && NumCopies != 2)
1752 bool HasRet = false;
1753 for (unsigned i = 0; i < NumCopies; ++i) {
1754 SDNode *Copy = Copies[i];
1755 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
1757 if (UI->getOpcode() == ISD::CopyToReg) {
1759 if (Use == Copies[0] || Use == Copies[1])
1763 if (UI->getOpcode() != ARMISD::RET_FLAG)
1772 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
1773 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
1774 // one of the above mentioned nodes. It has to be wrapped because otherwise
1775 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
1776 // be used to form addressing mode. These wrapped nodes will be selected
1778 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
1779 EVT PtrVT = Op.getValueType();
1780 // FIXME there is no actual debug info here
1781 DebugLoc dl = Op.getDebugLoc();
1782 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1784 if (CP->isMachineConstantPoolEntry())
1785 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
1786 CP->getAlignment());
1788 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
1789 CP->getAlignment());
1790 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
1793 unsigned ARMTargetLowering::getJumpTableEncoding() const {
1794 return MachineJumpTableInfo::EK_Inline;
1797 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
1798 SelectionDAG &DAG) const {
1799 MachineFunction &MF = DAG.getMachineFunction();
1800 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1801 unsigned ARMPCLabelIndex = 0;
1802 DebugLoc DL = Op.getDebugLoc();
1803 EVT PtrVT = getPointerTy();
1804 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1805 Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1807 if (RelocM == Reloc::Static) {
1808 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
1810 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
1811 ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1812 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex,
1813 ARMCP::CPBlockAddress,
1815 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1817 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
1818 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
1819 MachinePointerInfo::getConstantPool(),
1821 if (RelocM == Reloc::Static)
1823 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1824 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
1827 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
1829 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
1830 SelectionDAG &DAG) const {
1831 DebugLoc dl = GA->getDebugLoc();
1832 EVT PtrVT = getPointerTy();
1833 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
1834 MachineFunction &MF = DAG.getMachineFunction();
1835 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1836 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1837 ARMConstantPoolValue *CPV =
1838 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex,
1839 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
1840 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1841 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
1842 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
1843 MachinePointerInfo::getConstantPool(),
1845 SDValue Chain = Argument.getValue(1);
1847 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1848 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
1850 // call __tls_get_addr.
1853 Entry.Node = Argument;
1854 Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext());
1855 Args.push_back(Entry);
1856 // FIXME: is there useful debug info available here?
1857 std::pair<SDValue, SDValue> CallResult =
1858 LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()),
1859 false, false, false, false,
1860 0, CallingConv::C, false, /*isReturnValueUsed=*/true,
1861 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl);
1862 return CallResult.first;
1865 // Lower ISD::GlobalTLSAddress using the "initial exec" or
1866 // "local exec" model.
1868 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
1869 SelectionDAG &DAG) const {
1870 const GlobalValue *GV = GA->getGlobal();
1871 DebugLoc dl = GA->getDebugLoc();
1873 SDValue Chain = DAG.getEntryNode();
1874 EVT PtrVT = getPointerTy();
1875 // Get the Thread Pointer
1876 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
1878 if (GV->isDeclaration()) {
1879 MachineFunction &MF = DAG.getMachineFunction();
1880 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1881 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1882 // Initial exec model.
1883 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
1884 ARMConstantPoolValue *CPV =
1885 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex,
1886 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, true);
1887 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1888 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
1889 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
1890 MachinePointerInfo::getConstantPool(),
1892 Chain = Offset.getValue(1);
1894 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1895 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
1897 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
1898 MachinePointerInfo::getConstantPool(),
1902 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMCP::TPOFF);
1903 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1904 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
1905 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
1906 MachinePointerInfo::getConstantPool(),
1910 // The address of the thread local variable is the add of the thread
1911 // pointer with the offset of the variable.
1912 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
1916 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
1917 // TODO: implement the "local dynamic" model
1918 assert(Subtarget->isTargetELF() &&
1919 "TLS not implemented for non-ELF targets");
1920 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1921 // If the relocation model is PIC, use the "General Dynamic" TLS Model,
1922 // otherwise use the "Local Exec" TLS Model
1923 if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
1924 return LowerToTLSGeneralDynamicModel(GA, DAG);
1926 return LowerToTLSExecModels(GA, DAG);
1929 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
1930 SelectionDAG &DAG) const {
1931 EVT PtrVT = getPointerTy();
1932 DebugLoc dl = Op.getDebugLoc();
1933 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1934 Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1935 if (RelocM == Reloc::PIC_) {
1936 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
1937 ARMConstantPoolValue *CPV =
1938 new ARMConstantPoolValue(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
1939 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1940 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1941 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
1943 MachinePointerInfo::getConstantPool(),
1945 SDValue Chain = Result.getValue(1);
1946 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
1947 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
1949 Result = DAG.getLoad(PtrVT, dl, Chain, Result,
1950 MachinePointerInfo::getGOT(), false, false, 0);
1953 // If we have T2 ops, we can materialize the address directly via movt/movw
1954 // pair. This is always cheaper.
1955 if (Subtarget->useMovt()) {
1956 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
1957 DAG.getTargetGlobalAddress(GV, dl, PtrVT));
1959 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
1960 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1961 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
1962 MachinePointerInfo::getConstantPool(),
1968 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
1969 SelectionDAG &DAG) const {
1970 MachineFunction &MF = DAG.getMachineFunction();
1971 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1972 unsigned ARMPCLabelIndex = 0;
1973 EVT PtrVT = getPointerTy();
1974 DebugLoc dl = Op.getDebugLoc();
1975 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1976 Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1978 if (RelocM == Reloc::Static)
1979 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
1981 ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1982 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8);
1983 ARMConstantPoolValue *CPV =
1984 new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj);
1985 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1987 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1989 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
1990 MachinePointerInfo::getConstantPool(),
1992 SDValue Chain = Result.getValue(1);
1994 if (RelocM == Reloc::PIC_) {
1995 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1996 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
1999 if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
2000 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(),
2006 SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
2007 SelectionDAG &DAG) const {
2008 assert(Subtarget->isTargetELF() &&
2009 "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
2010 MachineFunction &MF = DAG.getMachineFunction();
2011 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2012 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
2013 EVT PtrVT = getPointerTy();
2014 DebugLoc dl = Op.getDebugLoc();
2015 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2016 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
2017 "_GLOBAL_OFFSET_TABLE_",
2018 ARMPCLabelIndex, PCAdj);
2019 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2020 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2021 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
2022 MachinePointerInfo::getConstantPool(),
2024 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
2025 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
2029 ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG)
2031 DebugLoc dl = Op.getDebugLoc();
2032 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other,
2033 Op.getOperand(0), Op.getOperand(1));
2037 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
2038 DebugLoc dl = Op.getDebugLoc();
2039 SDValue Val = DAG.getConstant(0, MVT::i32);
2040 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(0),
2041 Op.getOperand(1), Val);
2045 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
2046 DebugLoc dl = Op.getDebugLoc();
2047 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
2048 Op.getOperand(1), DAG.getConstant(0, MVT::i32));
2052 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
2053 const ARMSubtarget *Subtarget) const {
2054 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2055 DebugLoc dl = Op.getDebugLoc();
2057 default: return SDValue(); // Don't custom lower most intrinsics.
2058 case Intrinsic::arm_thread_pointer: {
2059 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2060 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
2062 case Intrinsic::eh_sjlj_lsda: {
2063 MachineFunction &MF = DAG.getMachineFunction();
2064 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2065 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
2066 EVT PtrVT = getPointerTy();
2067 DebugLoc dl = Op.getDebugLoc();
2068 Reloc::Model RelocM = getTargetMachine().getRelocationModel();
2070 unsigned PCAdj = (RelocM != Reloc::PIC_)
2071 ? 0 : (Subtarget->isThumb() ? 4 : 8);
2072 ARMConstantPoolValue *CPV =
2073 new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex,
2074 ARMCP::CPLSDA, PCAdj);
2075 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2076 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2078 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
2079 MachinePointerInfo::getConstantPool(),
2082 if (RelocM == Reloc::PIC_) {
2083 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
2084 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
2091 static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG,
2092 const ARMSubtarget *Subtarget) {
2093 DebugLoc dl = Op.getDebugLoc();
2094 if (!Subtarget->hasDataBarrier()) {
2095 // Some ARMv6 cpus can support data barriers with an mcr instruction.
2096 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
2098 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
2099 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!");
2100 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
2101 DAG.getConstant(0, MVT::i32));
2104 SDValue Op5 = Op.getOperand(5);
2105 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0;
2106 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
2107 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
2108 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0);
2110 ARM_MB::MemBOpt DMBOpt;
2111 if (isDeviceBarrier)
2112 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY;
2114 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH;
2115 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
2116 DAG.getConstant(DMBOpt, MVT::i32));
2119 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
2120 const ARMSubtarget *Subtarget) {
2121 // ARM pre v5TE and Thumb1 does not have preload instructions.
2122 if (!(Subtarget->isThumb2() ||
2123 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
2124 // Just preserve the chain.
2125 return Op.getOperand(0);
2127 DebugLoc dl = Op.getDebugLoc();
2128 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
2130 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
2131 // ARMv7 with MP extension has PLDW.
2132 return Op.getOperand(0);
2134 if (Subtarget->isThumb())
2136 isRead = ~isRead & 1;
2137 unsigned isData = Subtarget->isThumb() ? 0 : 1;
2139 // Currently there is no intrinsic that matches pli.
2140 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
2141 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32),
2142 DAG.getConstant(isData, MVT::i32));
2145 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
2146 MachineFunction &MF = DAG.getMachineFunction();
2147 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
2149 // vastart just stores the address of the VarArgsFrameIndex slot into the
2150 // memory location argument.
2151 DebugLoc dl = Op.getDebugLoc();
2152 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2153 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
2154 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2155 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
2156 MachinePointerInfo(SV), false, false, 0);
2160 ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
2161 SDValue &Root, SelectionDAG &DAG,
2162 DebugLoc dl) const {
2163 MachineFunction &MF = DAG.getMachineFunction();
2164 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2166 TargetRegisterClass *RC;
2167 if (AFI->isThumb1OnlyFunction())
2168 RC = ARM::tGPRRegisterClass;
2170 RC = ARM::GPRRegisterClass;
2172 // Transform the arguments stored in physical registers into virtual ones.
2173 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2174 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
2177 if (NextVA.isMemLoc()) {
2178 MachineFrameInfo *MFI = MF.getFrameInfo();
2179 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true);
2181 // Create load node to retrieve arguments from the stack.
2182 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2183 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
2184 MachinePointerInfo::getFixedStack(FI),
2187 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
2188 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
2191 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
2195 ARMTargetLowering::LowerFormalArguments(SDValue Chain,
2196 CallingConv::ID CallConv, bool isVarArg,
2197 const SmallVectorImpl<ISD::InputArg>
2199 DebugLoc dl, SelectionDAG &DAG,
2200 SmallVectorImpl<SDValue> &InVals)
2203 MachineFunction &MF = DAG.getMachineFunction();
2204 MachineFrameInfo *MFI = MF.getFrameInfo();
2206 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2208 // Assign locations to all of the incoming arguments.
2209 SmallVector<CCValAssign, 16> ArgLocs;
2210 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
2212 CCInfo.AnalyzeFormalArguments(Ins,
2213 CCAssignFnForNode(CallConv, /* Return*/ false,
2216 SmallVector<SDValue, 16> ArgValues;
2218 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2219 CCValAssign &VA = ArgLocs[i];
2221 // Arguments stored in registers.
2222 if (VA.isRegLoc()) {
2223 EVT RegVT = VA.getLocVT();
2226 if (VA.needsCustom()) {
2227 // f64 and vector types are split up into multiple registers or
2228 // combinations of registers and stack slots.
2229 if (VA.getLocVT() == MVT::v2f64) {
2230 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
2232 VA = ArgLocs[++i]; // skip ahead to next loc
2234 if (VA.isMemLoc()) {
2235 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true);
2236 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2237 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
2238 MachinePointerInfo::getFixedStack(FI),
2241 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
2244 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
2245 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
2246 ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
2247 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
2248 ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
2250 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
2253 TargetRegisterClass *RC;
2255 if (RegVT == MVT::f32)
2256 RC = ARM::SPRRegisterClass;
2257 else if (RegVT == MVT::f64)
2258 RC = ARM::DPRRegisterClass;
2259 else if (RegVT == MVT::v2f64)
2260 RC = ARM::QPRRegisterClass;
2261 else if (RegVT == MVT::i32)
2262 RC = (AFI->isThumb1OnlyFunction() ?
2263 ARM::tGPRRegisterClass : ARM::GPRRegisterClass);
2265 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
2267 // Transform the arguments in physical registers into virtual ones.
2268 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2269 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2272 // If this is an 8 or 16-bit value, it is really passed promoted
2273 // to 32 bits. Insert an assert[sz]ext to capture this, then
2274 // truncate to the right size.
2275 switch (VA.getLocInfo()) {
2276 default: llvm_unreachable("Unknown loc info!");
2277 case CCValAssign::Full: break;
2278 case CCValAssign::BCvt:
2279 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2281 case CCValAssign::SExt:
2282 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2283 DAG.getValueType(VA.getValVT()));
2284 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2286 case CCValAssign::ZExt:
2287 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2288 DAG.getValueType(VA.getValVT()));
2289 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2293 InVals.push_back(ArgValue);
2295 } else { // VA.isRegLoc()
2298 assert(VA.isMemLoc());
2299 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
2301 unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
2302 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), true);
2304 // Create load nodes to retrieve arguments from the stack.
2305 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2306 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
2307 MachinePointerInfo::getFixedStack(FI),
2314 static const unsigned GPRArgRegs[] = {
2315 ARM::R0, ARM::R1, ARM::R2, ARM::R3
2318 unsigned NumGPRs = CCInfo.getFirstUnallocated
2319 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0]));
2321 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment();
2322 unsigned VARegSize = (4 - NumGPRs) * 4;
2323 unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1);
2324 unsigned ArgOffset = CCInfo.getNextStackOffset();
2325 if (VARegSaveSize) {
2326 // If this function is vararg, store any remaining integer argument regs
2327 // to their spots on the stack so that they may be loaded by deferencing
2328 // the result of va_next.
2329 AFI->setVarArgsRegSaveSize(VARegSaveSize);
2330 AFI->setVarArgsFrameIndex(
2331 MFI->CreateFixedObject(VARegSaveSize,
2332 ArgOffset + VARegSaveSize - VARegSize,
2334 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(),
2337 SmallVector<SDValue, 4> MemOps;
2338 for (; NumGPRs < 4; ++NumGPRs) {
2339 TargetRegisterClass *RC;
2340 if (AFI->isThumb1OnlyFunction())
2341 RC = ARM::tGPRRegisterClass;
2343 RC = ARM::GPRRegisterClass;
2345 unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC);
2346 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
2348 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2349 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()),
2351 MemOps.push_back(Store);
2352 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
2353 DAG.getConstant(4, getPointerTy()));
2355 if (!MemOps.empty())
2356 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2357 &MemOps[0], MemOps.size());
2359 // This will point to the next argument passed via stack.
2360 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true));
2366 /// isFloatingPointZero - Return true if this is +0.0.
2367 static bool isFloatingPointZero(SDValue Op) {
2368 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
2369 return CFP->getValueAPF().isPosZero();
2370 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
2371 // Maybe this has already been legalized into the constant pool?
2372 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
2373 SDValue WrapperOp = Op.getOperand(1).getOperand(0);
2374 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
2375 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
2376 return CFP->getValueAPF().isPosZero();
2382 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
2383 /// the given operands.
2385 ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
2386 SDValue &ARMcc, SelectionDAG &DAG,
2387 DebugLoc dl) const {
2388 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
2389 unsigned C = RHSC->getZExtValue();
2390 if (!isLegalICmpImmediate(C)) {
2391 // Constant does not fit, try adjusting it by one?
2396 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
2397 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
2398 RHS = DAG.getConstant(C-1, MVT::i32);
2403 if (C != 0 && isLegalICmpImmediate(C-1)) {
2404 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
2405 RHS = DAG.getConstant(C-1, MVT::i32);
2410 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
2411 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
2412 RHS = DAG.getConstant(C+1, MVT::i32);
2417 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
2418 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
2419 RHS = DAG.getConstant(C+1, MVT::i32);
2426 ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
2427 ARMISD::NodeType CompareType;
2430 CompareType = ARMISD::CMP;
2435 CompareType = ARMISD::CMPZ;
2438 ARMcc = DAG.getConstant(CondCode, MVT::i32);
2439 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
2442 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
2444 ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
2445 DebugLoc dl) const {
2447 if (!isFloatingPointZero(RHS))
2448 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS);
2450 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS);
2451 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
2454 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2455 SDValue Cond = Op.getOperand(0);
2456 SDValue SelectTrue = Op.getOperand(1);
2457 SDValue SelectFalse = Op.getOperand(2);
2458 DebugLoc dl = Op.getDebugLoc();
2462 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
2463 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
2465 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
2466 const ConstantSDNode *CMOVTrue =
2467 dyn_cast<ConstantSDNode>(Cond.getOperand(0));
2468 const ConstantSDNode *CMOVFalse =
2469 dyn_cast<ConstantSDNode>(Cond.getOperand(1));
2471 if (CMOVTrue && CMOVFalse) {
2472 unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
2473 unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
2477 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
2479 False = SelectFalse;
2480 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
2485 if (True.getNode() && False.getNode()) {
2486 EVT VT = Cond.getValueType();
2487 SDValue ARMcc = Cond.getOperand(2);
2488 SDValue CCR = Cond.getOperand(3);
2489 SDValue Cmp = Cond.getOperand(4);
2490 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp);
2495 return DAG.getSelectCC(dl, Cond,
2496 DAG.getConstant(0, Cond.getValueType()),
2497 SelectTrue, SelectFalse, ISD::SETNE);
2500 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
2501 EVT VT = Op.getValueType();
2502 SDValue LHS = Op.getOperand(0);
2503 SDValue RHS = Op.getOperand(1);
2504 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2505 SDValue TrueVal = Op.getOperand(2);
2506 SDValue FalseVal = Op.getOperand(3);
2507 DebugLoc dl = Op.getDebugLoc();
2509 if (LHS.getValueType() == MVT::i32) {
2511 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2512 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
2513 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp);
2516 ARMCC::CondCodes CondCode, CondCode2;
2517 FPCCToARMCC(CC, CondCode, CondCode2);
2519 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
2520 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
2521 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2522 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
2524 if (CondCode2 != ARMCC::AL) {
2525 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32);
2526 // FIXME: Needs another CMP because flag can have but one use.
2527 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
2528 Result = DAG.getNode(ARMISD::CMOV, dl, VT,
2529 Result, TrueVal, ARMcc2, CCR, Cmp2);
2534 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
2535 /// to morph to an integer compare sequence.
2536 static bool canChangeToInt(SDValue Op, bool &SeenZero,
2537 const ARMSubtarget *Subtarget) {
2538 SDNode *N = Op.getNode();
2539 if (!N->hasOneUse())
2540 // Otherwise it requires moving the value from fp to integer registers.
2542 if (!N->getNumValues())
2544 EVT VT = Op.getValueType();
2545 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
2546 // f32 case is generally profitable. f64 case only makes sense when vcmpe +
2547 // vmrs are very slow, e.g. cortex-a8.
2550 if (isFloatingPointZero(Op)) {
2554 return ISD::isNormalLoad(N);
2557 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
2558 if (isFloatingPointZero(Op))
2559 return DAG.getConstant(0, MVT::i32);
2561 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
2562 return DAG.getLoad(MVT::i32, Op.getDebugLoc(),
2563 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
2564 Ld->isVolatile(), Ld->isNonTemporal(),
2565 Ld->getAlignment());
2567 llvm_unreachable("Unknown VFP cmp argument!");
2570 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
2571 SDValue &RetVal1, SDValue &RetVal2) {
2572 if (isFloatingPointZero(Op)) {
2573 RetVal1 = DAG.getConstant(0, MVT::i32);
2574 RetVal2 = DAG.getConstant(0, MVT::i32);
2578 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
2579 SDValue Ptr = Ld->getBasePtr();
2580 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(),
2581 Ld->getChain(), Ptr,
2582 Ld->getPointerInfo(),
2583 Ld->isVolatile(), Ld->isNonTemporal(),
2584 Ld->getAlignment());
2586 EVT PtrType = Ptr.getValueType();
2587 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
2588 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(),
2589 PtrType, Ptr, DAG.getConstant(4, PtrType));
2590 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(),
2591 Ld->getChain(), NewPtr,
2592 Ld->getPointerInfo().getWithOffset(4),
2593 Ld->isVolatile(), Ld->isNonTemporal(),
2598 llvm_unreachable("Unknown VFP cmp argument!");
2601 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
2602 /// f32 and even f64 comparisons to integer ones.
2604 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
2605 SDValue Chain = Op.getOperand(0);
2606 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2607 SDValue LHS = Op.getOperand(2);
2608 SDValue RHS = Op.getOperand(3);
2609 SDValue Dest = Op.getOperand(4);
2610 DebugLoc dl = Op.getDebugLoc();
2612 bool SeenZero = false;
2613 if (canChangeToInt(LHS, SeenZero, Subtarget) &&
2614 canChangeToInt(RHS, SeenZero, Subtarget) &&
2615 // If one of the operand is zero, it's safe to ignore the NaN case since
2616 // we only care about equality comparisons.
2617 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) {
2618 // If unsafe fp math optimization is enabled and there are no othter uses of
2619 // the CMP operands, and the condition code is EQ oe NE, we can optimize it
2620 // to an integer comparison.
2621 if (CC == ISD::SETOEQ)
2623 else if (CC == ISD::SETUNE)
2627 if (LHS.getValueType() == MVT::f32) {
2628 LHS = bitcastf32Toi32(LHS, DAG);
2629 RHS = bitcastf32Toi32(RHS, DAG);
2630 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
2631 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2632 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
2633 Chain, Dest, ARMcc, CCR, Cmp);
2638 expandf64Toi32(LHS, DAG, LHS1, LHS2);
2639 expandf64Toi32(RHS, DAG, RHS1, RHS2);
2640 ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
2641 ARMcc = DAG.getConstant(CondCode, MVT::i32);
2642 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
2643 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
2644 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7);
2650 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
2651 SDValue Chain = Op.getOperand(0);
2652 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2653 SDValue LHS = Op.getOperand(2);
2654 SDValue RHS = Op.getOperand(3);
2655 SDValue Dest = Op.getOperand(4);
2656 DebugLoc dl = Op.getDebugLoc();
2658 if (LHS.getValueType() == MVT::i32) {
2660 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
2661 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2662 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
2663 Chain, Dest, ARMcc, CCR, Cmp);
2666 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
2669 (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
2670 CC == ISD::SETNE || CC == ISD::SETUNE)) {
2671 SDValue Result = OptimizeVFPBrcond(Op, DAG);
2672 if (Result.getNode())
2676 ARMCC::CondCodes CondCode, CondCode2;
2677 FPCCToARMCC(CC, CondCode, CondCode2);
2679 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
2680 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
2681 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2682 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
2683 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
2684 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
2685 if (CondCode2 != ARMCC::AL) {
2686 ARMcc = DAG.getConstant(CondCode2, MVT::i32);
2687 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
2688 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
2693 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
2694 SDValue Chain = Op.getOperand(0);
2695 SDValue Table = Op.getOperand(1);
2696 SDValue Index = Op.getOperand(2);
2697 DebugLoc dl = Op.getDebugLoc();
2699 EVT PTy = getPointerTy();
2700 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
2701 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
2702 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
2703 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
2704 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
2705 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
2706 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
2707 if (Subtarget->isThumb2()) {
2708 // Thumb2 uses a two-level jump. That is, it jumps into the jump table
2709 // which does another jump to the destination. This also makes it easier
2710 // to translate it to TBB / TBH later.
2711 // FIXME: This might not work if the function is extremely large.
2712 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
2713 Addr, Op.getOperand(2), JTI, UId);
2715 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
2716 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
2717 MachinePointerInfo::getJumpTable(),
2719 Chain = Addr.getValue(1);
2720 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
2721 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
2723 Addr = DAG.getLoad(PTy, dl, Chain, Addr,
2724 MachinePointerInfo::getJumpTable(), false, false, 0);
2725 Chain = Addr.getValue(1);
2726 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
2730 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
2731 DebugLoc dl = Op.getDebugLoc();
2734 switch (Op.getOpcode()) {
2736 assert(0 && "Invalid opcode!");
2737 case ISD::FP_TO_SINT:
2738 Opc = ARMISD::FTOSI;
2740 case ISD::FP_TO_UINT:
2741 Opc = ARMISD::FTOUI;
2744 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
2745 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
2748 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
2749 EVT VT = Op.getValueType();
2750 DebugLoc dl = Op.getDebugLoc();
2753 switch (Op.getOpcode()) {
2755 assert(0 && "Invalid opcode!");
2756 case ISD::SINT_TO_FP:
2757 Opc = ARMISD::SITOF;
2759 case ISD::UINT_TO_FP:
2760 Opc = ARMISD::UITOF;
2764 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
2765 return DAG.getNode(Opc, dl, VT, Op);
2768 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
2769 // Implement fcopysign with a fabs and a conditional fneg.
2770 SDValue Tmp0 = Op.getOperand(0);
2771 SDValue Tmp1 = Op.getOperand(1);
2772 DebugLoc dl = Op.getDebugLoc();
2773 EVT VT = Op.getValueType();
2774 EVT SrcVT = Tmp1.getValueType();
2775 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0);
2776 SDValue ARMcc = DAG.getConstant(ARMCC::LT, MVT::i32);
2777 SDValue FP0 = DAG.getConstantFP(0.0, SrcVT);
2778 SDValue Cmp = getVFPCmp(Tmp1, FP0, DAG, dl);
2779 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2780 return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMcc, CCR, Cmp);
2783 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
2784 MachineFunction &MF = DAG.getMachineFunction();
2785 MachineFrameInfo *MFI = MF.getFrameInfo();
2786 MFI->setReturnAddressIsTaken(true);
2788 EVT VT = Op.getValueType();
2789 DebugLoc dl = Op.getDebugLoc();
2790 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2792 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
2793 SDValue Offset = DAG.getConstant(4, MVT::i32);
2794 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
2795 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
2796 MachinePointerInfo(), false, false, 0);
2799 // Return LR, which contains the return address. Mark it an implicit live-in.
2800 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
2801 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
2804 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
2805 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2806 MFI->setFrameAddressIsTaken(true);
2808 EVT VT = Op.getValueType();
2809 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful
2810 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2811 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin())
2812 ? ARM::R7 : ARM::R11;
2813 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
2815 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
2816 MachinePointerInfo(),
2821 /// ExpandBITCAST - If the target supports VFP, this function is called to
2822 /// expand a bit convert where either the source or destination type is i64 to
2823 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64
2824 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
2825 /// vectors), since the legalizer won't know what to do with that.
2826 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
2827 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2828 DebugLoc dl = N->getDebugLoc();
2829 SDValue Op = N->getOperand(0);
2831 // This function is only supposed to be called for i64 types, either as the
2832 // source or destination of the bit convert.
2833 EVT SrcVT = Op.getValueType();
2834 EVT DstVT = N->getValueType(0);
2835 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
2836 "ExpandBITCAST called for non-i64 type");
2838 // Turn i64->f64 into VMOVDRR.
2839 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
2840 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
2841 DAG.getConstant(0, MVT::i32));
2842 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
2843 DAG.getConstant(1, MVT::i32));
2844 return DAG.getNode(ISD::BITCAST, dl, DstVT,
2845 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
2848 // Turn f64->i64 into VMOVRRD.
2849 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
2850 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
2851 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1);
2852 // Merge the pieces into a single i64 value.
2853 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
2859 /// getZeroVector - Returns a vector of specified type with all zero elements.
2860 /// Zero vectors are used to represent vector negation and in those cases
2861 /// will be implemented with the NEON VNEG instruction. However, VNEG does
2862 /// not support i64 elements, so sometimes the zero vectors will need to be
2863 /// explicitly constructed. Regardless, use a canonical VMOV to create the
2865 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
2866 assert(VT.isVector() && "Expected a vector type");
2867 // The canonical modified immediate encoding of a zero vector is....0!
2868 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
2869 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
2870 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
2871 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
2874 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
2875 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
2876 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
2877 SelectionDAG &DAG) const {
2878 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2879 EVT VT = Op.getValueType();
2880 unsigned VTBits = VT.getSizeInBits();
2881 DebugLoc dl = Op.getDebugLoc();
2882 SDValue ShOpLo = Op.getOperand(0);
2883 SDValue ShOpHi = Op.getOperand(1);
2884 SDValue ShAmt = Op.getOperand(2);
2886 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
2888 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
2890 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2891 DAG.getConstant(VTBits, MVT::i32), ShAmt);
2892 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
2893 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2894 DAG.getConstant(VTBits, MVT::i32));
2895 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
2896 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2897 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
2899 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2900 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
2902 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
2903 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc,
2906 SDValue Ops[2] = { Lo, Hi };
2907 return DAG.getMergeValues(Ops, 2, dl);
2910 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
2911 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
2912 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
2913 SelectionDAG &DAG) const {
2914 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2915 EVT VT = Op.getValueType();
2916 unsigned VTBits = VT.getSizeInBits();
2917 DebugLoc dl = Op.getDebugLoc();
2918 SDValue ShOpLo = Op.getOperand(0);
2919 SDValue ShOpHi = Op.getOperand(1);
2920 SDValue ShAmt = Op.getOperand(2);
2923 assert(Op.getOpcode() == ISD::SHL_PARTS);
2924 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2925 DAG.getConstant(VTBits, MVT::i32), ShAmt);
2926 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
2927 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2928 DAG.getConstant(VTBits, MVT::i32));
2929 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
2930 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
2932 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2933 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2934 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
2936 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2937 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc,
2940 SDValue Ops[2] = { Lo, Hi };
2941 return DAG.getMergeValues(Ops, 2, dl);
2944 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
2945 SelectionDAG &DAG) const {
2946 // The rounding mode is in bits 23:22 of the FPSCR.
2947 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
2948 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
2949 // so that the shift + and get folded into a bitfield extract.
2950 DebugLoc dl = Op.getDebugLoc();
2951 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
2952 DAG.getConstant(Intrinsic::arm_get_fpscr,
2954 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
2955 DAG.getConstant(1U << 22, MVT::i32));
2956 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
2957 DAG.getConstant(22, MVT::i32));
2958 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
2959 DAG.getConstant(3, MVT::i32));
2962 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
2963 const ARMSubtarget *ST) {
2964 EVT VT = N->getValueType(0);
2965 DebugLoc dl = N->getDebugLoc();
2967 if (!ST->hasV6T2Ops())
2970 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0));
2971 return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
2974 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
2975 const ARMSubtarget *ST) {
2976 EVT VT = N->getValueType(0);
2977 DebugLoc dl = N->getDebugLoc();
2982 // Lower vector shifts on NEON to use VSHL.
2983 assert(ST->hasNEON() && "unexpected vector shift");
2985 // Left shifts translate directly to the vshiftu intrinsic.
2986 if (N->getOpcode() == ISD::SHL)
2987 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
2988 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
2989 N->getOperand(0), N->getOperand(1));
2991 assert((N->getOpcode() == ISD::SRA ||
2992 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
2994 // NEON uses the same intrinsics for both left and right shifts. For
2995 // right shifts, the shift amounts are negative, so negate the vector of
2997 EVT ShiftVT = N->getOperand(1).getValueType();
2998 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
2999 getZeroVector(ShiftVT, DAG, dl),
3001 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
3002 Intrinsic::arm_neon_vshifts :
3003 Intrinsic::arm_neon_vshiftu);
3004 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
3005 DAG.getConstant(vshiftInt, MVT::i32),
3006 N->getOperand(0), NegatedCount);
3009 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
3010 const ARMSubtarget *ST) {
3011 EVT VT = N->getValueType(0);
3012 DebugLoc dl = N->getDebugLoc();
3014 // We can get here for a node like i32 = ISD::SHL i32, i64
3018 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
3019 "Unknown shift to lower!");
3021 // We only lower SRA, SRL of 1 here, all others use generic lowering.
3022 if (!isa<ConstantSDNode>(N->getOperand(1)) ||
3023 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1)
3026 // If we are in thumb mode, we don't have RRX.
3027 if (ST->isThumb1Only()) return SDValue();
3029 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
3030 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
3031 DAG.getConstant(0, MVT::i32));
3032 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
3033 DAG.getConstant(1, MVT::i32));
3035 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
3036 // captures the result into a carry flag.
3037 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
3038 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1);
3040 // The low part is an ARMISD::RRX operand, which shifts the carry in.
3041 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
3043 // Merge the pieces into a single i64 value.
3044 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
3047 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
3048 SDValue TmpOp0, TmpOp1;
3049 bool Invert = false;
3053 SDValue Op0 = Op.getOperand(0);
3054 SDValue Op1 = Op.getOperand(1);
3055 SDValue CC = Op.getOperand(2);
3056 EVT VT = Op.getValueType();
3057 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
3058 DebugLoc dl = Op.getDebugLoc();
3060 if (Op.getOperand(1).getValueType().isFloatingPoint()) {
3061 switch (SetCCOpcode) {
3062 default: llvm_unreachable("Illegal FP comparison"); break;
3064 case ISD::SETNE: Invert = true; // Fallthrough
3066 case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
3068 case ISD::SETLT: Swap = true; // Fallthrough
3070 case ISD::SETGT: Opc = ARMISD::VCGT; break;
3072 case ISD::SETLE: Swap = true; // Fallthrough
3074 case ISD::SETGE: Opc = ARMISD::VCGE; break;
3075 case ISD::SETUGE: Swap = true; // Fallthrough
3076 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
3077 case ISD::SETUGT: Swap = true; // Fallthrough
3078 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
3079 case ISD::SETUEQ: Invert = true; // Fallthrough
3081 // Expand this to (OLT | OGT).
3085 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
3086 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1);
3088 case ISD::SETUO: Invert = true; // Fallthrough
3090 // Expand this to (OLT | OGE).
3094 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
3095 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1);
3099 // Integer comparisons.
3100 switch (SetCCOpcode) {
3101 default: llvm_unreachable("Illegal integer comparison"); break;
3102 case ISD::SETNE: Invert = true;
3103 case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
3104 case ISD::SETLT: Swap = true;
3105 case ISD::SETGT: Opc = ARMISD::VCGT; break;
3106 case ISD::SETLE: Swap = true;
3107 case ISD::SETGE: Opc = ARMISD::VCGE; break;
3108 case ISD::SETULT: Swap = true;
3109 case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
3110 case ISD::SETULE: Swap = true;
3111 case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
3114 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
3115 if (Opc == ARMISD::VCEQ) {
3118 if (ISD::isBuildVectorAllZeros(Op1.getNode()))
3120 else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
3123 // Ignore bitconvert.
3124 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
3125 AndOp = AndOp.getOperand(0);
3127 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
3129 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0));
3130 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1));
3137 std::swap(Op0, Op1);
3139 // If one of the operands is a constant vector zero, attempt to fold the
3140 // comparison to a specialized compare-against-zero form.
3142 if (ISD::isBuildVectorAllZeros(Op1.getNode()))
3144 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
3145 if (Opc == ARMISD::VCGE)
3146 Opc = ARMISD::VCLEZ;
3147 else if (Opc == ARMISD::VCGT)
3148 Opc = ARMISD::VCLTZ;
3153 if (SingleOp.getNode()) {
3156 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break;
3158 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break;
3160 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break;
3162 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break;
3164 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break;
3166 Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
3169 Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
3173 Result = DAG.getNOT(dl, Result, VT);
3178 /// isNEONModifiedImm - Check if the specified splat value corresponds to a
3179 /// valid vector constant for a NEON instruction with a "modified immediate"
3180 /// operand (e.g., VMOV). If so, return the encoded value.
3181 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
3182 unsigned SplatBitSize, SelectionDAG &DAG,
3183 EVT &VT, bool is128Bits, NEONModImmType type) {
3184 unsigned OpCmode, Imm;
3186 // SplatBitSize is set to the smallest size that splats the vector, so a
3187 // zero vector will always have SplatBitSize == 8. However, NEON modified
3188 // immediate instructions others than VMOV do not support the 8-bit encoding
3189 // of a zero vector, and the default encoding of zero is supposed to be the
3194 switch (SplatBitSize) {
3196 if (type != VMOVModImm)
3198 // Any 1-byte value is OK. Op=0, Cmode=1110.
3199 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
3202 VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
3206 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
3207 VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
3208 if ((SplatBits & ~0xff) == 0) {
3209 // Value = 0x00nn: Op=x, Cmode=100x.
3214 if ((SplatBits & ~0xff00) == 0) {
3215 // Value = 0xnn00: Op=x, Cmode=101x.
3217 Imm = SplatBits >> 8;
3223 // NEON's 32-bit VMOV supports splat values where:
3224 // * only one byte is nonzero, or
3225 // * the least significant byte is 0xff and the second byte is nonzero, or
3226 // * the least significant 2 bytes are 0xff and the third is nonzero.
3227 VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
3228 if ((SplatBits & ~0xff) == 0) {
3229 // Value = 0x000000nn: Op=x, Cmode=000x.
3234 if ((SplatBits & ~0xff00) == 0) {
3235 // Value = 0x0000nn00: Op=x, Cmode=001x.
3237 Imm = SplatBits >> 8;
3240 if ((SplatBits & ~0xff0000) == 0) {
3241 // Value = 0x00nn0000: Op=x, Cmode=010x.
3243 Imm = SplatBits >> 16;
3246 if ((SplatBits & ~0xff000000) == 0) {
3247 // Value = 0xnn000000: Op=x, Cmode=011x.
3249 Imm = SplatBits >> 24;
3253 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
3254 if (type == OtherModImm) return SDValue();
3256 if ((SplatBits & ~0xffff) == 0 &&
3257 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
3258 // Value = 0x0000nnff: Op=x, Cmode=1100.
3260 Imm = SplatBits >> 8;
3265 if ((SplatBits & ~0xffffff) == 0 &&
3266 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
3267 // Value = 0x00nnffff: Op=x, Cmode=1101.
3269 Imm = SplatBits >> 16;
3270 SplatBits |= 0xffff;
3274 // Note: there are a few 32-bit splat values (specifically: 00ffff00,
3275 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
3276 // VMOV.I32. A (very) minor optimization would be to replicate the value
3277 // and fall through here to test for a valid 64-bit splat. But, then the
3278 // caller would also need to check and handle the change in size.
3282 if (type != VMOVModImm)
3284 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
3285 uint64_t BitMask = 0xff;
3287 unsigned ImmMask = 1;
3289 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
3290 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
3293 } else if ((SplatBits & BitMask) != 0) {
3299 // Op=1, Cmode=1110.
3302 VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
3307 llvm_unreachable("unexpected size for isNEONModifiedImm");
3311 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
3312 return DAG.getTargetConstant(EncodedVal, MVT::i32);
3315 static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT,
3316 bool &ReverseVEXT, unsigned &Imm) {
3317 unsigned NumElts = VT.getVectorNumElements();
3318 ReverseVEXT = false;
3320 // Assume that the first shuffle index is not UNDEF. Fail if it is.
3326 // If this is a VEXT shuffle, the immediate value is the index of the first
3327 // element. The other shuffle indices must be the successive elements after
3329 unsigned ExpectedElt = Imm;
3330 for (unsigned i = 1; i < NumElts; ++i) {
3331 // Increment the expected index. If it wraps around, it may still be
3332 // a VEXT but the source vectors must be swapped.
3334 if (ExpectedElt == NumElts * 2) {
3339 if (M[i] < 0) continue; // ignore UNDEF indices
3340 if (ExpectedElt != static_cast<unsigned>(M[i]))
3344 // Adjust the index value if the source operands will be swapped.
3351 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
3352 /// instruction with the specified blocksize. (The order of the elements
3353 /// within each block of the vector is reversed.)
3354 static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT,
3355 unsigned BlockSize) {
3356 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
3357 "Only possible block sizes for VREV are: 16, 32, 64");
3359 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3363 unsigned NumElts = VT.getVectorNumElements();
3364 unsigned BlockElts = M[0] + 1;
3365 // If the first shuffle index is UNDEF, be optimistic.
3367 BlockElts = BlockSize / EltSz;
3369 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
3372 for (unsigned i = 0; i < NumElts; ++i) {
3373 if (M[i] < 0) continue; // ignore UNDEF indices
3374 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
3381 static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT,
3382 unsigned &WhichResult) {
3383 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3387 unsigned NumElts = VT.getVectorNumElements();
3388 WhichResult = (M[0] == 0 ? 0 : 1);
3389 for (unsigned i = 0; i < NumElts; i += 2) {
3390 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
3391 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult))
3397 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
3398 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
3399 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
3400 static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
3401 unsigned &WhichResult) {
3402 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3406 unsigned NumElts = VT.getVectorNumElements();
3407 WhichResult = (M[0] == 0 ? 0 : 1);
3408 for (unsigned i = 0; i < NumElts; i += 2) {
3409 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
3410 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult))
3416 static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT,
3417 unsigned &WhichResult) {
3418 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3422 unsigned NumElts = VT.getVectorNumElements();
3423 WhichResult = (M[0] == 0 ? 0 : 1);
3424 for (unsigned i = 0; i != NumElts; ++i) {
3425 if (M[i] < 0) continue; // ignore UNDEF indices
3426 if ((unsigned) M[i] != 2 * i + WhichResult)
3430 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
3431 if (VT.is64BitVector() && EltSz == 32)
3437 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
3438 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
3439 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
3440 static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
3441 unsigned &WhichResult) {
3442 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3446 unsigned Half = VT.getVectorNumElements() / 2;
3447 WhichResult = (M[0] == 0 ? 0 : 1);
3448 for (unsigned j = 0; j != 2; ++j) {
3449 unsigned Idx = WhichResult;
3450 for (unsigned i = 0; i != Half; ++i) {
3451 int MIdx = M[i + j * Half];
3452 if (MIdx >= 0 && (unsigned) MIdx != Idx)
3458 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
3459 if (VT.is64BitVector() && EltSz == 32)
3465 static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT,
3466 unsigned &WhichResult) {
3467 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3471 unsigned NumElts = VT.getVectorNumElements();
3472 WhichResult = (M[0] == 0 ? 0 : 1);
3473 unsigned Idx = WhichResult * NumElts / 2;
3474 for (unsigned i = 0; i != NumElts; i += 2) {
3475 if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
3476 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts))
3481 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
3482 if (VT.is64BitVector() && EltSz == 32)
3488 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
3489 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
3490 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
3491 static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
3492 unsigned &WhichResult) {
3493 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3497 unsigned NumElts = VT.getVectorNumElements();
3498 WhichResult = (M[0] == 0 ? 0 : 1);
3499 unsigned Idx = WhichResult * NumElts / 2;
3500 for (unsigned i = 0; i != NumElts; i += 2) {
3501 if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
3502 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx))
3507 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
3508 if (VT.is64BitVector() && EltSz == 32)
3514 // If N is an integer constant that can be moved into a register in one
3515 // instruction, return an SDValue of such a constant (will become a MOV
3516 // instruction). Otherwise return null.
3517 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
3518 const ARMSubtarget *ST, DebugLoc dl) {
3520 if (!isa<ConstantSDNode>(N))
3522 Val = cast<ConstantSDNode>(N)->getZExtValue();
3524 if (ST->isThumb1Only()) {
3525 if (Val <= 255 || ~Val <= 255)
3526 return DAG.getConstant(Val, MVT::i32);
3528 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
3529 return DAG.getConstant(Val, MVT::i32);
3534 // If this is a case we can't handle, return null and let the default
3535 // expansion code take care of it.
3536 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
3537 const ARMSubtarget *ST) const {
3538 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
3539 DebugLoc dl = Op.getDebugLoc();
3540 EVT VT = Op.getValueType();
3542 APInt SplatBits, SplatUndef;
3543 unsigned SplatBitSize;
3545 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
3546 if (SplatBitSize <= 64) {
3547 // Check if an immediate VMOV works.
3549 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
3550 SplatUndef.getZExtValue(), SplatBitSize,
3551 DAG, VmovVT, VT.is128BitVector(),
3553 if (Val.getNode()) {
3554 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
3555 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
3558 // Try an immediate VMVN.
3559 uint64_t NegatedImm = (SplatBits.getZExtValue() ^
3560 ((1LL << SplatBitSize) - 1));
3561 Val = isNEONModifiedImm(NegatedImm,
3562 SplatUndef.getZExtValue(), SplatBitSize,
3563 DAG, VmovVT, VT.is128BitVector(),
3565 if (Val.getNode()) {
3566 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
3567 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
3572 // Scan through the operands to see if only one value is used.
3573 unsigned NumElts = VT.getVectorNumElements();
3574 bool isOnlyLowElement = true;
3575 bool usesOnlyOneValue = true;
3576 bool isConstant = true;
3578 for (unsigned i = 0; i < NumElts; ++i) {
3579 SDValue V = Op.getOperand(i);
3580 if (V.getOpcode() == ISD::UNDEF)
3583 isOnlyLowElement = false;
3584 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
3587 if (!Value.getNode())
3589 else if (V != Value)
3590 usesOnlyOneValue = false;
3593 if (!Value.getNode())
3594 return DAG.getUNDEF(VT);
3596 if (isOnlyLowElement)
3597 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
3599 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
3601 // Use VDUP for non-constant splats. For f32 constant splats, reduce to
3602 // i32 and try again.
3603 if (usesOnlyOneValue && EltSize <= 32) {
3605 return DAG.getNode(ARMISD::VDUP, dl, VT, Value);
3606 if (VT.getVectorElementType().isFloatingPoint()) {
3607 SmallVector<SDValue, 8> Ops;
3608 for (unsigned i = 0; i < NumElts; ++i)
3609 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
3611 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
3612 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts);
3613 Val = LowerBUILD_VECTOR(Val, DAG, ST);
3615 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
3617 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
3619 return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
3622 // If all elements are constants and the case above didn't get hit, fall back
3623 // to the default expansion, which will generate a load from the constant
3628 // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
3630 SDValue shuffle = ReconstructShuffle(Op, DAG);
3631 if (shuffle != SDValue())
3635 // Vectors with 32- or 64-bit elements can be built by directly assigning
3636 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands
3637 // will be legalized.
3638 if (EltSize >= 32) {
3639 // Do the expansion with floating-point types, since that is what the VFP
3640 // registers are defined to use, and since i64 is not legal.
3641 EVT EltVT = EVT::getFloatingPointVT(EltSize);
3642 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
3643 SmallVector<SDValue, 8> Ops;
3644 for (unsigned i = 0; i < NumElts; ++i)
3645 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
3646 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
3647 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
3653 // Gather data to see if the operation can be modelled as a
3654 // shuffle in combination with VEXTs.
3655 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const {
3656 DebugLoc dl = Op.getDebugLoc();
3657 EVT VT = Op.getValueType();
3658 unsigned NumElts = VT.getVectorNumElements();
3660 SmallVector<SDValue, 2> SourceVecs;
3661 SmallVector<unsigned, 2> MinElts;
3662 SmallVector<unsigned, 2> MaxElts;
3664 for (unsigned i = 0; i < NumElts; ++i) {
3665 SDValue V = Op.getOperand(i);
3666 if (V.getOpcode() == ISD::UNDEF)
3668 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
3669 // A shuffle can only come from building a vector from various
3670 // elements of other vectors.
3674 // Record this extraction against the appropriate vector if possible...
3675 SDValue SourceVec = V.getOperand(0);
3676 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
3677 bool FoundSource = false;
3678 for (unsigned j = 0; j < SourceVecs.size(); ++j) {
3679 if (SourceVecs[j] == SourceVec) {
3680 if (MinElts[j] > EltNo)
3682 if (MaxElts[j] < EltNo)
3689 // Or record a new source if not...
3691 SourceVecs.push_back(SourceVec);
3692 MinElts.push_back(EltNo);
3693 MaxElts.push_back(EltNo);
3697 // Currently only do something sane when at most two source vectors
3699 if (SourceVecs.size() > 2)
3702 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) };
3703 int VEXTOffsets[2] = {0, 0};
3705 // This loop extracts the usage patterns of the source vectors
3706 // and prepares appropriate SDValues for a shuffle if possible.
3707 for (unsigned i = 0; i < SourceVecs.size(); ++i) {
3708 if (SourceVecs[i].getValueType() == VT) {
3709 // No VEXT necessary
3710 ShuffleSrcs[i] = SourceVecs[i];
3713 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) {
3714 // It probably isn't worth padding out a smaller vector just to
3715 // break it down again in a shuffle.
3719 // Since only 64-bit and 128-bit vectors are legal on ARM and
3720 // we've eliminated the other cases...
3721 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts &&
3722 "unexpected vector sizes in ReconstructShuffle");
3724 if (MaxElts[i] - MinElts[i] >= NumElts) {
3725 // Span too large for a VEXT to cope
3729 if (MinElts[i] >= NumElts) {
3730 // The extraction can just take the second half
3731 VEXTOffsets[i] = NumElts;
3732 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SourceVecs[i],
3733 DAG.getIntPtrConstant(NumElts));
3734 } else if (MaxElts[i] < NumElts) {
3735 // The extraction can just take the first half
3737 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SourceVecs[i],
3738 DAG.getIntPtrConstant(0));
3740 // An actual VEXT is needed
3741 VEXTOffsets[i] = MinElts[i];
3742 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SourceVecs[i],
3743 DAG.getIntPtrConstant(0));
3744 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SourceVecs[i],
3745 DAG.getIntPtrConstant(NumElts));
3746 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2,
3747 DAG.getConstant(VEXTOffsets[i], MVT::i32));
3751 SmallVector<int, 8> Mask;
3753 for (unsigned i = 0; i < NumElts; ++i) {
3754 SDValue Entry = Op.getOperand(i);
3755 if (Entry.getOpcode() == ISD::UNDEF) {
3760 SDValue ExtractVec = Entry.getOperand(0);
3761 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i).getOperand(1))->getSExtValue();
3762 if (ExtractVec == SourceVecs[0]) {
3763 Mask.push_back(ExtractElt - VEXTOffsets[0]);
3765 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]);
3769 // Final check before we try to produce nonsense...
3770 if (isShuffleMaskLegal(Mask, VT))
3771 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], &Mask[0]);
3776 /// isShuffleMaskLegal - Targets can use this to indicate that they only
3777 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
3778 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
3779 /// are assumed to be legal.
3781 ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
3783 if (VT.getVectorNumElements() == 4 &&
3784 (VT.is128BitVector() || VT.is64BitVector())) {
3785 unsigned PFIndexes[4];
3786 for (unsigned i = 0; i != 4; ++i) {
3790 PFIndexes[i] = M[i];
3793 // Compute the index in the perfect shuffle table.
3794 unsigned PFTableIndex =
3795 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
3796 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
3797 unsigned Cost = (PFEntry >> 30);
3804 unsigned Imm, WhichResult;
3806 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
3807 return (EltSize >= 32 ||
3808 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
3809 isVREVMask(M, VT, 64) ||
3810 isVREVMask(M, VT, 32) ||
3811 isVREVMask(M, VT, 16) ||
3812 isVEXTMask(M, VT, ReverseVEXT, Imm) ||
3813 isVTRNMask(M, VT, WhichResult) ||
3814 isVUZPMask(M, VT, WhichResult) ||
3815 isVZIPMask(M, VT, WhichResult) ||
3816 isVTRN_v_undef_Mask(M, VT, WhichResult) ||
3817 isVUZP_v_undef_Mask(M, VT, WhichResult) ||
3818 isVZIP_v_undef_Mask(M, VT, WhichResult));
3821 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
3822 /// the specified operations to build the shuffle.
3823 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
3824 SDValue RHS, SelectionDAG &DAG,
3826 unsigned OpNum = (PFEntry >> 26) & 0x0F;
3827 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
3828 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
3831 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
3840 OP_VUZPL, // VUZP, left result
3841 OP_VUZPR, // VUZP, right result
3842 OP_VZIPL, // VZIP, left result
3843 OP_VZIPR, // VZIP, right result
3844 OP_VTRNL, // VTRN, left result
3845 OP_VTRNR // VTRN, right result
3848 if (OpNum == OP_COPY) {
3849 if (LHSID == (1*9+2)*9+3) return LHS;
3850 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
3854 SDValue OpLHS, OpRHS;
3855 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
3856 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
3857 EVT VT = OpLHS.getValueType();
3860 default: llvm_unreachable("Unknown shuffle opcode!");
3862 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
3867 return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
3868 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32));
3872 return DAG.getNode(ARMISD::VEXT, dl, VT,
3874 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32));
3877 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
3878 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
3881 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
3882 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
3885 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
3886 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
3890 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
3891 SDValue V1 = Op.getOperand(0);
3892 SDValue V2 = Op.getOperand(1);
3893 DebugLoc dl = Op.getDebugLoc();
3894 EVT VT = Op.getValueType();
3895 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
3896 SmallVector<int, 8> ShuffleMask;
3898 // Convert shuffles that are directly supported on NEON to target-specific
3899 // DAG nodes, instead of keeping them as shuffles and matching them again
3900 // during code selection. This is more efficient and avoids the possibility
3901 // of inconsistencies between legalization and selection.
3902 // FIXME: floating-point vectors should be canonicalized to integer vectors
3903 // of the same time so that they get CSEd properly.
3904 SVN->getMask(ShuffleMask);
3906 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
3907 if (EltSize <= 32) {
3908 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
3909 int Lane = SVN->getSplatIndex();
3910 // If this is undef splat, generate it via "just" vdup, if possible.
3911 if (Lane == -1) Lane = 0;
3913 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
3914 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
3916 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
3917 DAG.getConstant(Lane, MVT::i32));
3922 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
3925 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
3926 DAG.getConstant(Imm, MVT::i32));
3929 if (isVREVMask(ShuffleMask, VT, 64))
3930 return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
3931 if (isVREVMask(ShuffleMask, VT, 32))
3932 return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
3933 if (isVREVMask(ShuffleMask, VT, 16))
3934 return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
3936 // Check for Neon shuffles that modify both input vectors in place.
3937 // If both results are used, i.e., if there are two shuffles with the same
3938 // source operands and with masks corresponding to both results of one of
3939 // these operations, DAG memoization will ensure that a single node is
3940 // used for both shuffles.
3941 unsigned WhichResult;
3942 if (isVTRNMask(ShuffleMask, VT, WhichResult))
3943 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
3944 V1, V2).getValue(WhichResult);
3945 if (isVUZPMask(ShuffleMask, VT, WhichResult))
3946 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
3947 V1, V2).getValue(WhichResult);
3948 if (isVZIPMask(ShuffleMask, VT, WhichResult))
3949 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
3950 V1, V2).getValue(WhichResult);
3952 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
3953 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
3954 V1, V1).getValue(WhichResult);
3955 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
3956 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
3957 V1, V1).getValue(WhichResult);
3958 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
3959 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
3960 V1, V1).getValue(WhichResult);
3963 // If the shuffle is not directly supported and it has 4 elements, use
3964 // the PerfectShuffle-generated table to synthesize it from other shuffles.
3965 unsigned NumElts = VT.getVectorNumElements();
3967 unsigned PFIndexes[4];
3968 for (unsigned i = 0; i != 4; ++i) {
3969 if (ShuffleMask[i] < 0)
3972 PFIndexes[i] = ShuffleMask[i];
3975 // Compute the index in the perfect shuffle table.
3976 unsigned PFTableIndex =
3977 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
3978 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
3979 unsigned Cost = (PFEntry >> 30);
3982 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
3985 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
3986 if (EltSize >= 32) {
3987 // Do the expansion with floating-point types, since that is what the VFP
3988 // registers are defined to use, and since i64 is not legal.
3989 EVT EltVT = EVT::getFloatingPointVT(EltSize);
3990 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
3991 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
3992 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
3993 SmallVector<SDValue, 8> Ops;
3994 for (unsigned i = 0; i < NumElts; ++i) {
3995 if (ShuffleMask[i] < 0)
3996 Ops.push_back(DAG.getUNDEF(EltVT));
3998 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
3999 ShuffleMask[i] < (int)NumElts ? V1 : V2,
4000 DAG.getConstant(ShuffleMask[i] & (NumElts-1),
4003 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
4004 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
4010 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
4011 // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
4012 SDValue Lane = Op.getOperand(1);
4013 if (!isa<ConstantSDNode>(Lane))
4016 SDValue Vec = Op.getOperand(0);
4017 if (Op.getValueType() == MVT::i32 &&
4018 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) {
4019 DebugLoc dl = Op.getDebugLoc();
4020 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
4026 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
4027 // The only time a CONCAT_VECTORS operation can have legal types is when
4028 // two 64-bit vectors are concatenated to a 128-bit vector.
4029 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
4030 "unexpected CONCAT_VECTORS");
4031 DebugLoc dl = Op.getDebugLoc();
4032 SDValue Val = DAG.getUNDEF(MVT::v2f64);
4033 SDValue Op0 = Op.getOperand(0);
4034 SDValue Op1 = Op.getOperand(1);
4035 if (Op0.getOpcode() != ISD::UNDEF)
4036 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
4037 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
4038 DAG.getIntPtrConstant(0));
4039 if (Op1.getOpcode() != ISD::UNDEF)
4040 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
4041 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
4042 DAG.getIntPtrConstant(1));
4043 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
4046 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
4047 /// element has been zero/sign-extended, depending on the isSigned parameter,
4048 /// from an integer type half its size.
4049 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
4051 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
4052 EVT VT = N->getValueType(0);
4053 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
4054 SDNode *BVN = N->getOperand(0).getNode();
4055 if (BVN->getValueType(0) != MVT::v4i32 ||
4056 BVN->getOpcode() != ISD::BUILD_VECTOR)
4058 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0;
4059 unsigned HiElt = 1 - LoElt;
4060 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
4061 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
4062 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
4063 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
4064 if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
4067 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
4068 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
4071 if (Hi0->isNullValue() && Hi1->isNullValue())
4077 if (N->getOpcode() != ISD::BUILD_VECTOR)
4080 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
4081 SDNode *Elt = N->getOperand(i).getNode();
4082 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
4083 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4084 unsigned HalfSize = EltSize / 2;
4086 int64_t SExtVal = C->getSExtValue();
4087 if ((SExtVal >> HalfSize) != (SExtVal >> EltSize))
4090 if ((C->getZExtValue() >> HalfSize) != 0)
4101 /// isSignExtended - Check if a node is a vector value that is sign-extended
4102 /// or a constant BUILD_VECTOR with sign-extended elements.
4103 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
4104 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
4106 if (isExtendedBUILD_VECTOR(N, DAG, true))
4111 /// isZeroExtended - Check if a node is a vector value that is zero-extended
4112 /// or a constant BUILD_VECTOR with zero-extended elements.
4113 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
4114 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N))
4116 if (isExtendedBUILD_VECTOR(N, DAG, false))
4121 /// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending
4122 /// load, or BUILD_VECTOR with extended elements, return the unextended value.
4123 static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) {
4124 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND)
4125 return N->getOperand(0);
4126 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
4127 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(),
4128 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(),
4129 LD->isNonTemporal(), LD->getAlignment());
4130 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will
4131 // have been legalized as a BITCAST from v4i32.
4132 if (N->getOpcode() == ISD::BITCAST) {
4133 SDNode *BVN = N->getOperand(0).getNode();
4134 assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
4135 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
4136 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0;
4137 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32,
4138 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2));
4140 // Construct a new BUILD_VECTOR with elements truncated to half the size.
4141 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
4142 EVT VT = N->getValueType(0);
4143 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2;
4144 unsigned NumElts = VT.getVectorNumElements();
4145 MVT TruncVT = MVT::getIntegerVT(EltSize);
4146 SmallVector<SDValue, 8> Ops;
4147 for (unsigned i = 0; i != NumElts; ++i) {
4148 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
4149 const APInt &CInt = C->getAPIntValue();
4150 Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT));
4152 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
4153 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts);
4156 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
4157 // Multiplications are only custom-lowered for 128-bit vectors so that
4158 // VMULL can be detected. Otherwise v2i64 multiplications are not legal.
4159 EVT VT = Op.getValueType();
4160 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL");
4161 SDNode *N0 = Op.getOperand(0).getNode();
4162 SDNode *N1 = Op.getOperand(1).getNode();
4163 unsigned NewOpc = 0;
4164 if (isSignExtended(N0, DAG) && isSignExtended(N1, DAG))
4165 NewOpc = ARMISD::VMULLs;
4166 else if (isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG))
4167 NewOpc = ARMISD::VMULLu;
4168 else if (VT == MVT::v2i64)
4169 // Fall through to expand this. It is not legal.
4172 // Other vector multiplications are legal.
4175 // Legalize to a VMULL instruction.
4176 DebugLoc DL = Op.getDebugLoc();
4177 SDValue Op0 = SkipExtension(N0, DAG);
4178 SDValue Op1 = SkipExtension(N1, DAG);
4180 assert(Op0.getValueType().is64BitVector() &&
4181 Op1.getValueType().is64BitVector() &&
4182 "unexpected types for extended operands to VMULL");
4183 return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
4186 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
4187 switch (Op.getOpcode()) {
4188 default: llvm_unreachable("Don't know how to custom lower this!");
4189 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
4190 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
4191 case ISD::GlobalAddress:
4192 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) :
4193 LowerGlobalAddressELF(Op, DAG);
4194 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
4195 case ISD::SELECT: return LowerSELECT(Op, DAG);
4196 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
4197 case ISD::BR_CC: return LowerBR_CC(Op, DAG);
4198 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
4199 case ISD::VASTART: return LowerVASTART(Op, DAG);
4200 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget);
4201 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget);
4202 case ISD::SINT_TO_FP:
4203 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
4204 case ISD::FP_TO_SINT:
4205 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
4206 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
4207 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
4208 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
4209 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
4210 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
4211 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
4212 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG);
4213 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
4215 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG);
4218 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget);
4219 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG);
4220 case ISD::SRL_PARTS:
4221 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG);
4222 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
4223 case ISD::VSETCC: return LowerVSETCC(Op, DAG);
4224 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget);
4225 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
4226 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
4227 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
4228 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
4229 case ISD::MUL: return LowerMUL(Op, DAG);
4234 /// ReplaceNodeResults - Replace the results of node with an illegal result
4235 /// type with new values built out of custom code.
4236 void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
4237 SmallVectorImpl<SDValue>&Results,
4238 SelectionDAG &DAG) const {
4240 switch (N->getOpcode()) {
4242 llvm_unreachable("Don't know how to custom expand this!");
4245 Res = ExpandBITCAST(N, DAG);
4249 Res = Expand64BitShift(N, DAG, Subtarget);
4253 Results.push_back(Res);
4256 //===----------------------------------------------------------------------===//
4257 // ARM Scheduler Hooks
4258 //===----------------------------------------------------------------------===//
4261 ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
4262 MachineBasicBlock *BB,
4263 unsigned Size) const {
4264 unsigned dest = MI->getOperand(0).getReg();
4265 unsigned ptr = MI->getOperand(1).getReg();
4266 unsigned oldval = MI->getOperand(2).getReg();
4267 unsigned newval = MI->getOperand(3).getReg();
4268 unsigned scratch = BB->getParent()->getRegInfo()
4269 .createVirtualRegister(ARM::GPRRegisterClass);
4270 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
4271 DebugLoc dl = MI->getDebugLoc();
4272 bool isThumb2 = Subtarget->isThumb2();
4274 unsigned ldrOpc, strOpc;
4276 default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
4278 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
4279 strOpc = isThumb2 ? ARM::t2LDREXB : ARM::STREXB;
4282 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
4283 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
4286 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
4287 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
4291 MachineFunction *MF = BB->getParent();
4292 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4293 MachineFunction::iterator It = BB;
4294 ++It; // insert the new blocks after the current block
4296 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
4297 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
4298 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
4299 MF->insert(It, loop1MBB);
4300 MF->insert(It, loop2MBB);
4301 MF->insert(It, exitMBB);
4303 // Transfer the remainder of BB and its successor edges to exitMBB.
4304 exitMBB->splice(exitMBB->begin(), BB,
4305 llvm::next(MachineBasicBlock::iterator(MI)),
4307 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
4311 // fallthrough --> loop1MBB
4312 BB->addSuccessor(loop1MBB);
4315 // ldrex dest, [ptr]
4319 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr));
4320 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
4321 .addReg(dest).addReg(oldval));
4322 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
4323 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
4324 BB->addSuccessor(loop2MBB);
4325 BB->addSuccessor(exitMBB);
4328 // strex scratch, newval, [ptr]
4332 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval)
4334 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
4335 .addReg(scratch).addImm(0));
4336 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
4337 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
4338 BB->addSuccessor(loop1MBB);
4339 BB->addSuccessor(exitMBB);
4345 MI->eraseFromParent(); // The instruction is gone now.
4351 ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
4352 unsigned Size, unsigned BinOpcode) const {
4353 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
4354 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
4356 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4357 MachineFunction *MF = BB->getParent();
4358 MachineFunction::iterator It = BB;
4361 unsigned dest = MI->getOperand(0).getReg();
4362 unsigned ptr = MI->getOperand(1).getReg();
4363 unsigned incr = MI->getOperand(2).getReg();
4364 DebugLoc dl = MI->getDebugLoc();
4366 bool isThumb2 = Subtarget->isThumb2();
4367 unsigned ldrOpc, strOpc;
4369 default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
4371 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
4372 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
4375 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
4376 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
4379 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
4380 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
4384 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
4385 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
4386 MF->insert(It, loopMBB);
4387 MF->insert(It, exitMBB);
4389 // Transfer the remainder of BB and its successor edges to exitMBB.
4390 exitMBB->splice(exitMBB->begin(), BB,
4391 llvm::next(MachineBasicBlock::iterator(MI)),
4393 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
4395 MachineRegisterInfo &RegInfo = MF->getRegInfo();
4396 unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
4397 unsigned scratch2 = (!BinOpcode) ? incr :
4398 RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
4402 // fallthrough --> loopMBB
4403 BB->addSuccessor(loopMBB);
4407 // <binop> scratch2, dest, incr
4408 // strex scratch, scratch2, ptr
4411 // fallthrough --> exitMBB
4413 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr));
4415 // operand order needs to go the other way for NAND
4416 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr)
4417 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
4418 addReg(incr).addReg(dest)).addReg(0);
4420 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
4421 addReg(dest).addReg(incr)).addReg(0);
4424 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2)
4426 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
4427 .addReg(scratch).addImm(0));
4428 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
4429 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
4431 BB->addSuccessor(loopMBB);
4432 BB->addSuccessor(exitMBB);
4438 MI->eraseFromParent(); // The instruction is gone now.
4444 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
4445 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
4446 E = MBB->succ_end(); I != E; ++I)
4449 llvm_unreachable("Expecting a BB with two successors!");
4453 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
4454 MachineBasicBlock *BB) const {
4455 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
4456 DebugLoc dl = MI->getDebugLoc();
4457 bool isThumb2 = Subtarget->isThumb2();
4458 switch (MI->getOpcode()) {
4461 llvm_unreachable("Unexpected instr type to insert");
4463 case ARM::ATOMIC_LOAD_ADD_I8:
4464 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
4465 case ARM::ATOMIC_LOAD_ADD_I16:
4466 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
4467 case ARM::ATOMIC_LOAD_ADD_I32:
4468 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
4470 case ARM::ATOMIC_LOAD_AND_I8:
4471 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
4472 case ARM::ATOMIC_LOAD_AND_I16:
4473 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
4474 case ARM::ATOMIC_LOAD_AND_I32:
4475 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
4477 case ARM::ATOMIC_LOAD_OR_I8:
4478 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
4479 case ARM::ATOMIC_LOAD_OR_I16:
4480 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
4481 case ARM::ATOMIC_LOAD_OR_I32:
4482 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
4484 case ARM::ATOMIC_LOAD_XOR_I8:
4485 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
4486 case ARM::ATOMIC_LOAD_XOR_I16:
4487 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
4488 case ARM::ATOMIC_LOAD_XOR_I32:
4489 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
4491 case ARM::ATOMIC_LOAD_NAND_I8:
4492 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
4493 case ARM::ATOMIC_LOAD_NAND_I16:
4494 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
4495 case ARM::ATOMIC_LOAD_NAND_I32:
4496 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
4498 case ARM::ATOMIC_LOAD_SUB_I8:
4499 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
4500 case ARM::ATOMIC_LOAD_SUB_I16:
4501 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
4502 case ARM::ATOMIC_LOAD_SUB_I32:
4503 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
4505 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0);
4506 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0);
4507 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0);
4509 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1);
4510 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2);
4511 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4);
4513 case ARM::tMOVCCr_pseudo: {
4514 // To "insert" a SELECT_CC instruction, we actually have to insert the
4515 // diamond control-flow pattern. The incoming instruction knows the
4516 // destination vreg to set, the condition code register to branch on, the
4517 // true/false values to select between, and a branch opcode to use.
4518 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4519 MachineFunction::iterator It = BB;
4525 // cmpTY ccX, r1, r2
4527 // fallthrough --> copy0MBB
4528 MachineBasicBlock *thisMBB = BB;
4529 MachineFunction *F = BB->getParent();
4530 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4531 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4532 F->insert(It, copy0MBB);
4533 F->insert(It, sinkMBB);
4535 // Transfer the remainder of BB and its successor edges to sinkMBB.
4536 sinkMBB->splice(sinkMBB->begin(), BB,
4537 llvm::next(MachineBasicBlock::iterator(MI)),
4539 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
4541 BB->addSuccessor(copy0MBB);
4542 BB->addSuccessor(sinkMBB);
4544 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB)
4545 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg());
4548 // %FalseValue = ...
4549 // # fallthrough to sinkMBB
4552 // Update machine-CFG edges
4553 BB->addSuccessor(sinkMBB);
4556 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
4559 BuildMI(*BB, BB->begin(), dl,
4560 TII->get(ARM::PHI), MI->getOperand(0).getReg())
4561 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
4562 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
4564 MI->eraseFromParent(); // The pseudo instruction is gone now.
4569 case ARM::BCCZi64: {
4570 // If there is an unconditional branch to the other successor, remove it.
4571 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
4573 // Compare both parts that make up the double comparison separately for
4575 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64;
4577 unsigned LHS1 = MI->getOperand(1).getReg();
4578 unsigned LHS2 = MI->getOperand(2).getReg();
4580 AddDefaultPred(BuildMI(BB, dl,
4581 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
4582 .addReg(LHS1).addImm(0));
4583 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
4584 .addReg(LHS2).addImm(0)
4585 .addImm(ARMCC::EQ).addReg(ARM::CPSR);
4587 unsigned RHS1 = MI->getOperand(3).getReg();
4588 unsigned RHS2 = MI->getOperand(4).getReg();
4589 AddDefaultPred(BuildMI(BB, dl,
4590 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
4591 .addReg(LHS1).addReg(RHS1));
4592 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
4593 .addReg(LHS2).addReg(RHS2)
4594 .addImm(ARMCC::EQ).addReg(ARM::CPSR);
4597 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB();
4598 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
4599 if (MI->getOperand(0).getImm() == ARMCC::NE)
4600 std::swap(destMBB, exitMBB);
4602 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
4603 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
4604 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2B : ARM::B))
4607 MI->eraseFromParent(); // The pseudo instruction is gone now.
4613 //===----------------------------------------------------------------------===//
4614 // ARM Optimization Hooks
4615 //===----------------------------------------------------------------------===//
4618 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
4619 TargetLowering::DAGCombinerInfo &DCI) {
4620 SelectionDAG &DAG = DCI.DAG;
4621 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4622 EVT VT = N->getValueType(0);
4623 unsigned Opc = N->getOpcode();
4624 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC;
4625 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1);
4626 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2);
4627 ISD::CondCode CC = ISD::SETCC_INVALID;
4630 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get();
4632 SDValue CCOp = Slct.getOperand(0);
4633 if (CCOp.getOpcode() == ISD::SETCC)
4634 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get();
4637 bool DoXform = false;
4639 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) &&
4642 if (LHS.getOpcode() == ISD::Constant &&
4643 cast<ConstantSDNode>(LHS)->isNullValue()) {
4645 } else if (CC != ISD::SETCC_INVALID &&
4646 RHS.getOpcode() == ISD::Constant &&
4647 cast<ConstantSDNode>(RHS)->isNullValue()) {
4648 std::swap(LHS, RHS);
4649 SDValue Op0 = Slct.getOperand(0);
4650 EVT OpVT = isSlctCC ? Op0.getValueType() :
4651 Op0.getOperand(0).getValueType();
4652 bool isInt = OpVT.isInteger();
4653 CC = ISD::getSetCCInverse(CC, isInt);
4655 if (!TLI.isCondCodeLegal(CC, OpVT))
4656 return SDValue(); // Inverse operator isn't legal.
4663 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS);
4665 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result,
4666 Slct.getOperand(0), Slct.getOperand(1), CC);
4667 SDValue CCOp = Slct.getOperand(0);
4669 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(),
4670 CCOp.getOperand(0), CCOp.getOperand(1), CC);
4671 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
4672 CCOp, OtherOp, Result);
4677 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
4678 /// operands N0 and N1. This is a helper for PerformADDCombine that is
4679 /// called with the default operands, and if that fails, with commuted
4681 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
4682 TargetLowering::DAGCombinerInfo &DCI) {
4683 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
4684 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) {
4685 SDValue Result = combineSelectAndUse(N, N0, N1, DCI);
4686 if (Result.getNode()) return Result;
4691 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
4693 static SDValue PerformADDCombine(SDNode *N,
4694 TargetLowering::DAGCombinerInfo &DCI) {
4695 SDValue N0 = N->getOperand(0);
4696 SDValue N1 = N->getOperand(1);
4698 // First try with the default operand order.
4699 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI);
4700 if (Result.getNode())
4703 // If that didn't work, try again with the operands commuted.
4704 return PerformADDCombineWithOperands(N, N1, N0, DCI);
4707 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
4709 static SDValue PerformSUBCombine(SDNode *N,
4710 TargetLowering::DAGCombinerInfo &DCI) {
4711 SDValue N0 = N->getOperand(0);
4712 SDValue N1 = N->getOperand(1);
4714 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
4715 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
4716 SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
4717 if (Result.getNode()) return Result;
4723 static SDValue PerformMULCombine(SDNode *N,
4724 TargetLowering::DAGCombinerInfo &DCI,
4725 const ARMSubtarget *Subtarget) {
4726 SelectionDAG &DAG = DCI.DAG;
4728 if (Subtarget->isThumb1Only())
4731 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
4734 EVT VT = N->getValueType(0);
4738 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
4742 uint64_t MulAmt = C->getZExtValue();
4743 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt);
4744 ShiftAmt = ShiftAmt & (32 - 1);
4745 SDValue V = N->getOperand(0);
4746 DebugLoc DL = N->getDebugLoc();
4749 MulAmt >>= ShiftAmt;
4750 if (isPowerOf2_32(MulAmt - 1)) {
4751 // (mul x, 2^N + 1) => (add (shl x, N), x)
4752 Res = DAG.getNode(ISD::ADD, DL, VT,
4753 V, DAG.getNode(ISD::SHL, DL, VT,
4754 V, DAG.getConstant(Log2_32(MulAmt-1),
4756 } else if (isPowerOf2_32(MulAmt + 1)) {
4757 // (mul x, 2^N - 1) => (sub (shl x, N), x)
4758 Res = DAG.getNode(ISD::SUB, DL, VT,
4759 DAG.getNode(ISD::SHL, DL, VT,
4760 V, DAG.getConstant(Log2_32(MulAmt+1),
4767 Res = DAG.getNode(ISD::SHL, DL, VT, Res,
4768 DAG.getConstant(ShiftAmt, MVT::i32));
4770 // Do not add new nodes to DAG combiner worklist.
4771 DCI.CombineTo(N, Res, false);
4775 static SDValue PerformANDCombine(SDNode *N,
4776 TargetLowering::DAGCombinerInfo &DCI) {
4777 // Attempt to use immediate-form VBIC
4778 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
4779 DebugLoc dl = N->getDebugLoc();
4780 EVT VT = N->getValueType(0);
4781 SelectionDAG &DAG = DCI.DAG;
4783 APInt SplatBits, SplatUndef;
4784 unsigned SplatBitSize;
4787 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
4788 if (SplatBitSize <= 64) {
4790 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(),
4791 SplatUndef.getZExtValue(), SplatBitSize,
4792 DAG, VbicVT, VT.is128BitVector(),
4794 if (Val.getNode()) {
4796 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
4797 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
4798 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
4806 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR
4807 static SDValue PerformORCombine(SDNode *N,
4808 TargetLowering::DAGCombinerInfo &DCI,
4809 const ARMSubtarget *Subtarget) {
4810 // Attempt to use immediate-form VORR
4811 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
4812 DebugLoc dl = N->getDebugLoc();
4813 EVT VT = N->getValueType(0);
4814 SelectionDAG &DAG = DCI.DAG;
4816 APInt SplatBits, SplatUndef;
4817 unsigned SplatBitSize;
4819 if (BVN && Subtarget->hasNEON() &&
4820 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
4821 if (SplatBitSize <= 64) {
4823 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
4824 SplatUndef.getZExtValue(), SplatBitSize,
4825 DAG, VorrVT, VT.is128BitVector(),
4827 if (Val.getNode()) {
4829 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
4830 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
4831 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
4836 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
4839 // BFI is only available on V6T2+
4840 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
4843 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
4844 DebugLoc DL = N->getDebugLoc();
4845 // 1) or (and A, mask), val => ARMbfi A, val, mask
4846 // iff (val & mask) == val
4848 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
4849 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
4850 // && CountPopulation_32(mask) == CountPopulation_32(~mask2)
4851 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
4852 // && CountPopulation_32(mask) == CountPopulation_32(~mask2)
4853 // (i.e., copy a bitfield value into another bitfield of the same width)
4854 if (N0.getOpcode() != ISD::AND)
4860 SDValue N00 = N0.getOperand(0);
4862 // The value and the mask need to be constants so we can verify this is
4863 // actually a bitfield set. If the mask is 0xffff, we can do better
4864 // via a movt instruction, so don't use BFI in that case.
4865 SDValue MaskOp = N0.getOperand(1);
4866 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
4869 unsigned Mask = MaskC->getZExtValue();
4873 // Case (1): or (and A, mask), val => ARMbfi A, val, mask
4874 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
4876 unsigned Val = N1C->getZExtValue();
4877 if ((Val & ~Mask) != Val)
4880 if (ARM::isBitFieldInvertedMask(Mask)) {
4881 Val >>= CountTrailingZeros_32(~Mask);
4883 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
4884 DAG.getConstant(Val, MVT::i32),
4885 DAG.getConstant(Mask, MVT::i32));
4887 // Do not add new nodes to DAG combiner worklist.
4888 DCI.CombineTo(N, Res, false);
4891 } else if (N1.getOpcode() == ISD::AND) {
4892 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
4893 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
4896 unsigned Mask2 = N11C->getZExtValue();
4898 if (ARM::isBitFieldInvertedMask(Mask) &&
4899 ARM::isBitFieldInvertedMask(~Mask2) &&
4900 (CountPopulation_32(Mask) == CountPopulation_32(~Mask2))) {
4901 // The pack halfword instruction works better for masks that fit it,
4902 // so use that when it's available.
4903 if (Subtarget->hasT2ExtractPack() &&
4904 (Mask == 0xffff || Mask == 0xffff0000))
4907 unsigned lsb = CountTrailingZeros_32(Mask2);
4908 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
4909 DAG.getConstant(lsb, MVT::i32));
4910 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
4911 DAG.getConstant(Mask, MVT::i32));
4912 // Do not add new nodes to DAG combiner worklist.
4913 DCI.CombineTo(N, Res, false);
4915 } else if (ARM::isBitFieldInvertedMask(~Mask) &&
4916 ARM::isBitFieldInvertedMask(Mask2) &&
4917 (CountPopulation_32(~Mask) == CountPopulation_32(Mask2))) {
4918 // The pack halfword instruction works better for masks that fit it,
4919 // so use that when it's available.
4920 if (Subtarget->hasT2ExtractPack() &&
4921 (Mask2 == 0xffff || Mask2 == 0xffff0000))
4924 unsigned lsb = CountTrailingZeros_32(Mask);
4925 Res = DAG.getNode(ISD::SRL, DL, VT, N00,
4926 DAG.getConstant(lsb, MVT::i32));
4927 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
4928 DAG.getConstant(Mask2, MVT::i32));
4929 // Do not add new nodes to DAG combiner worklist.
4930 DCI.CombineTo(N, Res, false);
4935 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
4936 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
4937 ARM::isBitFieldInvertedMask(~Mask)) {
4938 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
4939 // where lsb(mask) == #shamt and masked bits of B are known zero.
4940 SDValue ShAmt = N00.getOperand(1);
4941 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
4942 unsigned LSB = CountTrailingZeros_32(Mask);
4946 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
4947 DAG.getConstant(~Mask, MVT::i32));
4949 // Do not add new nodes to DAG combiner worklist.
4950 DCI.CombineTo(N, Res, false);
4956 /// PerformBFICombine - (bfi A, (and B, C1), C2) -> (bfi A, B, C2) iff
4958 static SDValue PerformBFICombine(SDNode *N,
4959 TargetLowering::DAGCombinerInfo &DCI) {
4960 SDValue N1 = N->getOperand(1);
4961 if (N1.getOpcode() == ISD::AND) {
4962 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
4965 unsigned Mask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
4966 unsigned Mask2 = N11C->getZExtValue();
4967 if ((Mask & Mask2) == Mask2)
4968 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0),
4969 N->getOperand(0), N1.getOperand(0),
4975 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for
4976 /// ARMISD::VMOVRRD.
4977 static SDValue PerformVMOVRRDCombine(SDNode *N,
4978 TargetLowering::DAGCombinerInfo &DCI) {
4979 // vmovrrd(vmovdrr x, y) -> x,y
4980 SDValue InDouble = N->getOperand(0);
4981 if (InDouble.getOpcode() == ARMISD::VMOVDRR)
4982 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
4986 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for
4987 /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands.
4988 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
4989 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
4990 SDValue Op0 = N->getOperand(0);
4991 SDValue Op1 = N->getOperand(1);
4992 if (Op0.getOpcode() == ISD::BITCAST)
4993 Op0 = Op0.getOperand(0);
4994 if (Op1.getOpcode() == ISD::BITCAST)
4995 Op1 = Op1.getOperand(0);
4996 if (Op0.getOpcode() == ARMISD::VMOVRRD &&
4997 Op0.getNode() == Op1.getNode() &&
4998 Op0.getResNo() == 0 && Op1.getResNo() == 1)
4999 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
5000 N->getValueType(0), Op0.getOperand(0));
5004 /// PerformSTORECombine - Target-specific dag combine xforms for
5006 static SDValue PerformSTORECombine(SDNode *N,
5007 TargetLowering::DAGCombinerInfo &DCI) {
5008 // Bitcast an i64 store extracted from a vector to f64.
5009 // Otherwise, the i64 value will be legalized to a pair of i32 values.
5010 StoreSDNode *St = cast<StoreSDNode>(N);
5011 SDValue StVal = St->getValue();
5012 if (!ISD::isNormalStore(St) || St->isVolatile() ||
5013 StVal.getValueType() != MVT::i64 ||
5014 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
5017 SelectionDAG &DAG = DCI.DAG;
5018 DebugLoc dl = StVal.getDebugLoc();
5019 SDValue IntVec = StVal.getOperand(0);
5020 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
5021 IntVec.getValueType().getVectorNumElements());
5022 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
5023 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
5024 Vec, StVal.getOperand(1));
5025 dl = N->getDebugLoc();
5026 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
5027 // Make the DAGCombiner fold the bitcasts.
5028 DCI.AddToWorklist(Vec.getNode());
5029 DCI.AddToWorklist(ExtElt.getNode());
5030 DCI.AddToWorklist(V.getNode());
5031 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
5032 St->getPointerInfo(), St->isVolatile(),
5033 St->isNonTemporal(), St->getAlignment(),
5037 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
5038 /// are normal, non-volatile loads. If so, it is profitable to bitcast an
5039 /// i64 vector to have f64 elements, since the value can then be loaded
5040 /// directly into a VFP register.
5041 static bool hasNormalLoadOperand(SDNode *N) {
5042 unsigned NumElts = N->getValueType(0).getVectorNumElements();
5043 for (unsigned i = 0; i < NumElts; ++i) {
5044 SDNode *Elt = N->getOperand(i).getNode();
5045 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
5051 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
5052 /// ISD::BUILD_VECTOR.
5053 static SDValue PerformBUILD_VECTORCombine(SDNode *N,
5054 TargetLowering::DAGCombinerInfo &DCI){
5055 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
5056 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value
5057 // into a pair of GPRs, which is fine when the value is used as a scalar,
5058 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
5059 SelectionDAG &DAG = DCI.DAG;
5060 if (N->getNumOperands() == 2) {
5061 SDValue RV = PerformVMOVDRRCombine(N, DAG);
5066 // Load i64 elements as f64 values so that type legalization does not split
5067 // them up into i32 values.
5068 EVT VT = N->getValueType(0);
5069 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
5071 DebugLoc dl = N->getDebugLoc();
5072 SmallVector<SDValue, 8> Ops;
5073 unsigned NumElts = VT.getVectorNumElements();
5074 for (unsigned i = 0; i < NumElts; ++i) {
5075 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
5077 // Make the DAGCombiner fold the bitcast.
5078 DCI.AddToWorklist(V.getNode());
5080 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
5081 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts);
5082 return DAG.getNode(ISD::BITCAST, dl, VT, BV);
5085 /// PerformInsertEltCombine - Target-specific dag combine xforms for
5086 /// ISD::INSERT_VECTOR_ELT.
5087 static SDValue PerformInsertEltCombine(SDNode *N,
5088 TargetLowering::DAGCombinerInfo &DCI) {
5089 // Bitcast an i64 load inserted into a vector to f64.
5090 // Otherwise, the i64 value will be legalized to a pair of i32 values.
5091 EVT VT = N->getValueType(0);
5092 SDNode *Elt = N->getOperand(1).getNode();
5093 if (VT.getVectorElementType() != MVT::i64 ||
5094 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
5097 SelectionDAG &DAG = DCI.DAG;
5098 DebugLoc dl = N->getDebugLoc();
5099 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
5100 VT.getVectorNumElements());
5101 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
5102 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
5103 // Make the DAGCombiner fold the bitcasts.
5104 DCI.AddToWorklist(Vec.getNode());
5105 DCI.AddToWorklist(V.getNode());
5106 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
5107 Vec, V, N->getOperand(2));
5108 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
5111 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
5112 /// ISD::VECTOR_SHUFFLE.
5113 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
5114 // The LLVM shufflevector instruction does not require the shuffle mask
5115 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
5116 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the
5117 // operands do not match the mask length, they are extended by concatenating
5118 // them with undef vectors. That is probably the right thing for other
5119 // targets, but for NEON it is better to concatenate two double-register
5120 // size vector operands into a single quad-register size vector. Do that
5121 // transformation here:
5122 // shuffle(concat(v1, undef), concat(v2, undef)) ->
5123 // shuffle(concat(v1, v2), undef)
5124 SDValue Op0 = N->getOperand(0);
5125 SDValue Op1 = N->getOperand(1);
5126 if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
5127 Op1.getOpcode() != ISD::CONCAT_VECTORS ||
5128 Op0.getNumOperands() != 2 ||
5129 Op1.getNumOperands() != 2)
5131 SDValue Concat0Op1 = Op0.getOperand(1);
5132 SDValue Concat1Op1 = Op1.getOperand(1);
5133 if (Concat0Op1.getOpcode() != ISD::UNDEF ||
5134 Concat1Op1.getOpcode() != ISD::UNDEF)
5136 // Skip the transformation if any of the types are illegal.
5137 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5138 EVT VT = N->getValueType(0);
5139 if (!TLI.isTypeLegal(VT) ||
5140 !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
5141 !TLI.isTypeLegal(Concat1Op1.getValueType()))
5144 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT,
5145 Op0.getOperand(0), Op1.getOperand(0));
5146 // Translate the shuffle mask.
5147 SmallVector<int, 16> NewMask;
5148 unsigned NumElts = VT.getVectorNumElements();
5149 unsigned HalfElts = NumElts/2;
5150 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
5151 for (unsigned n = 0; n < NumElts; ++n) {
5152 int MaskElt = SVN->getMaskElt(n);
5154 if (MaskElt < (int)HalfElts)
5156 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
5157 NewElt = HalfElts + MaskElt - NumElts;
5158 NewMask.push_back(NewElt);
5160 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat,
5161 DAG.getUNDEF(VT), NewMask.data());
5164 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
5165 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
5166 /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and
5168 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
5169 SelectionDAG &DAG = DCI.DAG;
5170 EVT VT = N->getValueType(0);
5171 // vldN-dup instructions only support 64-bit vectors for N > 1.
5172 if (!VT.is64BitVector())
5175 // Check if the VDUPLANE operand is a vldN-dup intrinsic.
5176 SDNode *VLD = N->getOperand(0).getNode();
5177 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
5179 unsigned NumVecs = 0;
5180 unsigned NewOpc = 0;
5181 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
5182 if (IntNo == Intrinsic::arm_neon_vld2lane) {
5184 NewOpc = ARMISD::VLD2DUP;
5185 } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
5187 NewOpc = ARMISD::VLD3DUP;
5188 } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
5190 NewOpc = ARMISD::VLD4DUP;
5195 // First check that all the vldN-lane uses are VDUPLANEs and that the lane
5196 // numbers match the load.
5197 unsigned VLDLaneNo =
5198 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
5199 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
5201 // Ignore uses of the chain result.
5202 if (UI.getUse().getResNo() == NumVecs)
5205 if (User->getOpcode() != ARMISD::VDUPLANE ||
5206 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
5210 // Create the vldN-dup node.
5213 for (n = 0; n < NumVecs; ++n)
5215 Tys[n] = MVT::Other;
5216 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1);
5217 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
5218 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
5219 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys,
5220 Ops, 2, VLDMemInt->getMemoryVT(),
5221 VLDMemInt->getMemOperand());
5224 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
5226 unsigned ResNo = UI.getUse().getResNo();
5227 // Ignore uses of the chain result.
5228 if (ResNo == NumVecs)
5231 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
5234 // Now the vldN-lane intrinsic is dead except for its chain result.
5235 // Update uses of the chain.
5236 std::vector<SDValue> VLDDupResults;
5237 for (unsigned n = 0; n < NumVecs; ++n)
5238 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
5239 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
5240 DCI.CombineTo(VLD, VLDDupResults);
5245 /// PerformVDUPLANECombine - Target-specific dag combine xforms for
5246 /// ARMISD::VDUPLANE.
5247 static SDValue PerformVDUPLANECombine(SDNode *N,
5248 TargetLowering::DAGCombinerInfo &DCI) {
5249 SDValue Op = N->getOperand(0);
5251 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
5252 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
5253 if (CombineVLDDUP(N, DCI))
5254 return SDValue(N, 0);
5256 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
5257 // redundant. Ignore bit_converts for now; element sizes are checked below.
5258 while (Op.getOpcode() == ISD::BITCAST)
5259 Op = Op.getOperand(0);
5260 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
5263 // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
5264 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits();
5265 // The canonical VMOV for a zero vector uses a 32-bit element size.
5266 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5268 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0)
5270 EVT VT = N->getValueType(0);
5271 if (EltSize > VT.getVectorElementType().getSizeInBits())
5274 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
5277 /// getVShiftImm - Check if this is a valid build_vector for the immediate
5278 /// operand of a vector shift operation, where all the elements of the
5279 /// build_vector must have the same constant integer value.
5280 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
5281 // Ignore bit_converts.
5282 while (Op.getOpcode() == ISD::BITCAST)
5283 Op = Op.getOperand(0);
5284 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
5285 APInt SplatBits, SplatUndef;
5286 unsigned SplatBitSize;
5288 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
5289 HasAnyUndefs, ElementBits) ||
5290 SplatBitSize > ElementBits)
5292 Cnt = SplatBits.getSExtValue();
5296 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
5297 /// operand of a vector shift left operation. That value must be in the range:
5298 /// 0 <= Value < ElementBits for a left shift; or
5299 /// 0 <= Value <= ElementBits for a long left shift.
5300 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
5301 assert(VT.isVector() && "vector shift count is not a vector type");
5302 unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
5303 if (! getVShiftImm(Op, ElementBits, Cnt))
5305 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
5308 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
5309 /// operand of a vector shift right operation. For a shift opcode, the value
5310 /// is positive, but for an intrinsic the value count must be negative. The
5311 /// absolute value must be in the range:
5312 /// 1 <= |Value| <= ElementBits for a right shift; or
5313 /// 1 <= |Value| <= ElementBits/2 for a narrow right shift.
5314 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
5316 assert(VT.isVector() && "vector shift count is not a vector type");
5317 unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
5318 if (! getVShiftImm(Op, ElementBits, Cnt))
5322 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
5325 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
5326 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
5327 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5330 // Don't do anything for most intrinsics.
5333 // Vector shifts: check for immediate versions and lower them.
5334 // Note: This is done during DAG combining instead of DAG legalizing because
5335 // the build_vectors for 64-bit vector element shift counts are generally
5336 // not legal, and it is hard to see their values after they get legalized to
5337 // loads from a constant pool.
5338 case Intrinsic::arm_neon_vshifts:
5339 case Intrinsic::arm_neon_vshiftu:
5340 case Intrinsic::arm_neon_vshiftls:
5341 case Intrinsic::arm_neon_vshiftlu:
5342 case Intrinsic::arm_neon_vshiftn:
5343 case Intrinsic::arm_neon_vrshifts:
5344 case Intrinsic::arm_neon_vrshiftu:
5345 case Intrinsic::arm_neon_vrshiftn:
5346 case Intrinsic::arm_neon_vqshifts:
5347 case Intrinsic::arm_neon_vqshiftu:
5348 case Intrinsic::arm_neon_vqshiftsu:
5349 case Intrinsic::arm_neon_vqshiftns:
5350 case Intrinsic::arm_neon_vqshiftnu:
5351 case Intrinsic::arm_neon_vqshiftnsu:
5352 case Intrinsic::arm_neon_vqrshiftns:
5353 case Intrinsic::arm_neon_vqrshiftnu:
5354 case Intrinsic::arm_neon_vqrshiftnsu: {
5355 EVT VT = N->getOperand(1).getValueType();
5357 unsigned VShiftOpc = 0;
5360 case Intrinsic::arm_neon_vshifts:
5361 case Intrinsic::arm_neon_vshiftu:
5362 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
5363 VShiftOpc = ARMISD::VSHL;
5366 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
5367 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
5368 ARMISD::VSHRs : ARMISD::VSHRu);
5373 case Intrinsic::arm_neon_vshiftls:
5374 case Intrinsic::arm_neon_vshiftlu:
5375 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt))
5377 llvm_unreachable("invalid shift count for vshll intrinsic");
5379 case Intrinsic::arm_neon_vrshifts:
5380 case Intrinsic::arm_neon_vrshiftu:
5381 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
5385 case Intrinsic::arm_neon_vqshifts:
5386 case Intrinsic::arm_neon_vqshiftu:
5387 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
5391 case Intrinsic::arm_neon_vqshiftsu:
5392 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
5394 llvm_unreachable("invalid shift count for vqshlu intrinsic");
5396 case Intrinsic::arm_neon_vshiftn:
5397 case Intrinsic::arm_neon_vrshiftn:
5398 case Intrinsic::arm_neon_vqshiftns:
5399 case Intrinsic::arm_neon_vqshiftnu:
5400 case Intrinsic::arm_neon_vqshiftnsu:
5401 case Intrinsic::arm_neon_vqrshiftns:
5402 case Intrinsic::arm_neon_vqrshiftnu:
5403 case Intrinsic::arm_neon_vqrshiftnsu:
5404 // Narrowing shifts require an immediate right shift.
5405 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
5407 llvm_unreachable("invalid shift count for narrowing vector shift "
5411 llvm_unreachable("unhandled vector shift");
5415 case Intrinsic::arm_neon_vshifts:
5416 case Intrinsic::arm_neon_vshiftu:
5417 // Opcode already set above.
5419 case Intrinsic::arm_neon_vshiftls:
5420 case Intrinsic::arm_neon_vshiftlu:
5421 if (Cnt == VT.getVectorElementType().getSizeInBits())
5422 VShiftOpc = ARMISD::VSHLLi;
5424 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ?
5425 ARMISD::VSHLLs : ARMISD::VSHLLu);
5427 case Intrinsic::arm_neon_vshiftn:
5428 VShiftOpc = ARMISD::VSHRN; break;
5429 case Intrinsic::arm_neon_vrshifts:
5430 VShiftOpc = ARMISD::VRSHRs; break;
5431 case Intrinsic::arm_neon_vrshiftu:
5432 VShiftOpc = ARMISD::VRSHRu; break;
5433 case Intrinsic::arm_neon_vrshiftn:
5434 VShiftOpc = ARMISD::VRSHRN; break;
5435 case Intrinsic::arm_neon_vqshifts:
5436 VShiftOpc = ARMISD::VQSHLs; break;
5437 case Intrinsic::arm_neon_vqshiftu:
5438 VShiftOpc = ARMISD::VQSHLu; break;
5439 case Intrinsic::arm_neon_vqshiftsu:
5440 VShiftOpc = ARMISD::VQSHLsu; break;
5441 case Intrinsic::arm_neon_vqshiftns:
5442 VShiftOpc = ARMISD::VQSHRNs; break;
5443 case Intrinsic::arm_neon_vqshiftnu:
5444 VShiftOpc = ARMISD::VQSHRNu; break;
5445 case Intrinsic::arm_neon_vqshiftnsu:
5446 VShiftOpc = ARMISD::VQSHRNsu; break;
5447 case Intrinsic::arm_neon_vqrshiftns:
5448 VShiftOpc = ARMISD::VQRSHRNs; break;
5449 case Intrinsic::arm_neon_vqrshiftnu:
5450 VShiftOpc = ARMISD::VQRSHRNu; break;
5451 case Intrinsic::arm_neon_vqrshiftnsu:
5452 VShiftOpc = ARMISD::VQRSHRNsu; break;
5455 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
5456 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
5459 case Intrinsic::arm_neon_vshiftins: {
5460 EVT VT = N->getOperand(1).getValueType();
5462 unsigned VShiftOpc = 0;
5464 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
5465 VShiftOpc = ARMISD::VSLI;
5466 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
5467 VShiftOpc = ARMISD::VSRI;
5469 llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
5472 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
5473 N->getOperand(1), N->getOperand(2),
5474 DAG.getConstant(Cnt, MVT::i32));
5477 case Intrinsic::arm_neon_vqrshifts:
5478 case Intrinsic::arm_neon_vqrshiftu:
5479 // No immediate versions of these to check for.
5486 /// PerformShiftCombine - Checks for immediate versions of vector shifts and
5487 /// lowers them. As with the vector shift intrinsics, this is done during DAG
5488 /// combining instead of DAG legalizing because the build_vectors for 64-bit
5489 /// vector element shift counts are generally not legal, and it is hard to see
5490 /// their values after they get legalized to loads from a constant pool.
5491 static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
5492 const ARMSubtarget *ST) {
5493 EVT VT = N->getValueType(0);
5495 // Nothing to be done for scalar shifts.
5496 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5497 if (!VT.isVector() || !TLI.isTypeLegal(VT))
5500 assert(ST->hasNEON() && "unexpected vector shift");
5503 switch (N->getOpcode()) {
5504 default: llvm_unreachable("unexpected shift opcode");
5507 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
5508 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0),
5509 DAG.getConstant(Cnt, MVT::i32));
5514 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
5515 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
5516 ARMISD::VSHRs : ARMISD::VSHRu);
5517 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0),
5518 DAG.getConstant(Cnt, MVT::i32));
5524 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
5525 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
5526 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
5527 const ARMSubtarget *ST) {
5528 SDValue N0 = N->getOperand(0);
5530 // Check for sign- and zero-extensions of vector extract operations of 8-
5531 // and 16-bit vector elements. NEON supports these directly. They are
5532 // handled during DAG combining because type legalization will promote them
5533 // to 32-bit types and it is messy to recognize the operations after that.
5534 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
5535 SDValue Vec = N0.getOperand(0);
5536 SDValue Lane = N0.getOperand(1);
5537 EVT VT = N->getValueType(0);
5538 EVT EltVT = N0.getValueType();
5539 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5541 if (VT == MVT::i32 &&
5542 (EltVT == MVT::i8 || EltVT == MVT::i16) &&
5543 TLI.isTypeLegal(Vec.getValueType()) &&
5544 isa<ConstantSDNode>(Lane)) {
5547 switch (N->getOpcode()) {
5548 default: llvm_unreachable("unexpected opcode");
5549 case ISD::SIGN_EXTEND:
5550 Opc = ARMISD::VGETLANEs;
5552 case ISD::ZERO_EXTEND:
5553 case ISD::ANY_EXTEND:
5554 Opc = ARMISD::VGETLANEu;
5557 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane);
5564 /// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC
5565 /// to match f32 max/min patterns to use NEON vmax/vmin instructions.
5566 static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG,
5567 const ARMSubtarget *ST) {
5568 // If the target supports NEON, try to use vmax/vmin instructions for f32
5569 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set,
5570 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is
5571 // a NaN; only do the transformation when it matches that behavior.
5573 // For now only do this when using NEON for FP operations; if using VFP, it
5574 // is not obvious that the benefit outweighs the cost of switching to the
5576 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() ||
5577 N->getValueType(0) != MVT::f32)
5580 SDValue CondLHS = N->getOperand(0);
5581 SDValue CondRHS = N->getOperand(1);
5582 SDValue LHS = N->getOperand(2);
5583 SDValue RHS = N->getOperand(3);
5584 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get();
5586 unsigned Opcode = 0;
5588 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) {
5589 IsReversed = false; // x CC y ? x : y
5590 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) {
5591 IsReversed = true ; // x CC y ? y : x
5605 // If LHS is NaN, an ordered comparison will be false and the result will
5606 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS
5607 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN.
5608 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE);
5609 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS))
5611 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin
5612 // will return -0, so vmin can only be used for unsafe math or if one of
5613 // the operands is known to be nonzero.
5614 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) &&
5616 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
5618 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN;
5627 // If LHS is NaN, an ordered comparison will be false and the result will
5628 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS
5629 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN.
5630 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE);
5631 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS))
5633 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax
5634 // will return +0, so vmax can only be used for unsafe math or if one of
5635 // the operands is known to be nonzero.
5636 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) &&
5638 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
5640 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX;
5646 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS);
5649 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
5650 DAGCombinerInfo &DCI) const {
5651 switch (N->getOpcode()) {
5653 case ISD::ADD: return PerformADDCombine(N, DCI);
5654 case ISD::SUB: return PerformSUBCombine(N, DCI);
5655 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget);
5656 case ISD::OR: return PerformORCombine(N, DCI, Subtarget);
5657 case ISD::AND: return PerformANDCombine(N, DCI);
5658 case ARMISD::BFI: return PerformBFICombine(N, DCI);
5659 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI);
5660 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
5661 case ISD::STORE: return PerformSTORECombine(N, DCI);
5662 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI);
5663 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
5664 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
5665 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI);
5666 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
5669 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget);
5670 case ISD::SIGN_EXTEND:
5671 case ISD::ZERO_EXTEND:
5672 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
5673 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget);
5678 bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
5679 if (!Subtarget->allowsUnalignedMem())
5682 switch (VT.getSimpleVT().SimpleTy) {
5689 // FIXME: VLD1 etc with standard alignment is legal.
5693 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
5698 switch (VT.getSimpleVT().SimpleTy) {
5699 default: return false;
5714 if ((V & (Scale - 1)) != 0)
5717 return V == (V & ((1LL << 5) - 1));
5720 static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
5721 const ARMSubtarget *Subtarget) {
5728 switch (VT.getSimpleVT().SimpleTy) {
5729 default: return false;
5734 // + imm12 or - imm8
5736 return V == (V & ((1LL << 8) - 1));
5737 return V == (V & ((1LL << 12) - 1));
5740 // Same as ARM mode. FIXME: NEON?
5741 if (!Subtarget->hasVFP2())
5746 return V == (V & ((1LL << 8) - 1));
5750 /// isLegalAddressImmediate - Return true if the integer value can be used
5751 /// as the offset of the target addressing mode for load / store of the
5753 static bool isLegalAddressImmediate(int64_t V, EVT VT,
5754 const ARMSubtarget *Subtarget) {
5761 if (Subtarget->isThumb1Only())
5762 return isLegalT1AddressImmediate(V, VT);
5763 else if (Subtarget->isThumb2())
5764 return isLegalT2AddressImmediate(V, VT, Subtarget);
5769 switch (VT.getSimpleVT().SimpleTy) {
5770 default: return false;
5775 return V == (V & ((1LL << 12) - 1));
5778 return V == (V & ((1LL << 8) - 1));
5781 if (!Subtarget->hasVFP2()) // FIXME: NEON?
5786 return V == (V & ((1LL << 8) - 1));
5790 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
5792 int Scale = AM.Scale;
5796 switch (VT.getSimpleVT().SimpleTy) {
5797 default: return false;
5806 return Scale == 2 || Scale == 4 || Scale == 8;
5809 if (((unsigned)AM.HasBaseReg + Scale) <= 2)
5813 // Note, we allow "void" uses (basically, uses that aren't loads or
5814 // stores), because arm allows folding a scale into many arithmetic
5815 // operations. This should be made more precise and revisited later.
5817 // Allow r << imm, but the imm has to be a multiple of two.
5818 if (Scale & 1) return false;
5819 return isPowerOf2_32(Scale);
5823 /// isLegalAddressingMode - Return true if the addressing mode represented
5824 /// by AM is legal for this target, for a load/store of the specified type.
5825 bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
5826 const Type *Ty) const {
5827 EVT VT = getValueType(Ty, true);
5828 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
5831 // Can never fold addr of global into load/store.
5836 case 0: // no scale reg, must be "r+i" or "r", or "i".
5839 if (Subtarget->isThumb1Only())
5843 // ARM doesn't support any R+R*scale+imm addr modes.
5850 if (Subtarget->isThumb2())
5851 return isLegalT2ScaledAddressingMode(AM, VT);
5853 int Scale = AM.Scale;
5854 switch (VT.getSimpleVT().SimpleTy) {
5855 default: return false;
5859 if (Scale < 0) Scale = -Scale;
5863 return isPowerOf2_32(Scale & ~1);
5867 if (((unsigned)AM.HasBaseReg + Scale) <= 2)
5872 // Note, we allow "void" uses (basically, uses that aren't loads or
5873 // stores), because arm allows folding a scale into many arithmetic
5874 // operations. This should be made more precise and revisited later.
5876 // Allow r << imm, but the imm has to be a multiple of two.
5877 if (Scale & 1) return false;
5878 return isPowerOf2_32(Scale);
5885 /// isLegalICmpImmediate - Return true if the specified immediate is legal
5886 /// icmp immediate, that is the target has icmp instructions which can compare
5887 /// a register against the immediate without having to materialize the
5888 /// immediate into a register.
5889 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
5890 if (!Subtarget->isThumb())
5891 return ARM_AM::getSOImmVal(Imm) != -1;
5892 if (Subtarget->isThumb2())
5893 return ARM_AM::getT2SOImmVal(Imm) != -1;
5894 return Imm >= 0 && Imm <= 255;
5897 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
5898 bool isSEXTLoad, SDValue &Base,
5899 SDValue &Offset, bool &isInc,
5900 SelectionDAG &DAG) {
5901 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
5904 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
5906 Base = Ptr->getOperand(0);
5907 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
5908 int RHSC = (int)RHS->getZExtValue();
5909 if (RHSC < 0 && RHSC > -256) {
5910 assert(Ptr->getOpcode() == ISD::ADD);
5912 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
5916 isInc = (Ptr->getOpcode() == ISD::ADD);
5917 Offset = Ptr->getOperand(1);
5919 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
5921 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
5922 int RHSC = (int)RHS->getZExtValue();
5923 if (RHSC < 0 && RHSC > -0x1000) {
5924 assert(Ptr->getOpcode() == ISD::ADD);
5926 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
5927 Base = Ptr->getOperand(0);
5932 if (Ptr->getOpcode() == ISD::ADD) {
5934 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0));
5935 if (ShOpcVal != ARM_AM::no_shift) {
5936 Base = Ptr->getOperand(1);
5937 Offset = Ptr->getOperand(0);
5939 Base = Ptr->getOperand(0);
5940 Offset = Ptr->getOperand(1);
5945 isInc = (Ptr->getOpcode() == ISD::ADD);
5946 Base = Ptr->getOperand(0);
5947 Offset = Ptr->getOperand(1);
5951 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
5955 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
5956 bool isSEXTLoad, SDValue &Base,
5957 SDValue &Offset, bool &isInc,
5958 SelectionDAG &DAG) {
5959 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
5962 Base = Ptr->getOperand(0);
5963 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
5964 int RHSC = (int)RHS->getZExtValue();
5965 if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
5966 assert(Ptr->getOpcode() == ISD::ADD);
5968 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
5970 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
5971 isInc = Ptr->getOpcode() == ISD::ADD;
5972 Offset = DAG.getConstant(RHSC, RHS->getValueType(0));
5980 /// getPreIndexedAddressParts - returns true by value, base pointer and
5981 /// offset pointer and addressing mode by reference if the node's address
5982 /// can be legally represented as pre-indexed load / store address.
5984 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
5986 ISD::MemIndexedMode &AM,
5987 SelectionDAG &DAG) const {
5988 if (Subtarget->isThumb1Only())
5993 bool isSEXTLoad = false;
5994 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
5995 Ptr = LD->getBasePtr();
5996 VT = LD->getMemoryVT();
5997 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
5998 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
5999 Ptr = ST->getBasePtr();
6000 VT = ST->getMemoryVT();
6005 bool isLegal = false;
6006 if (Subtarget->isThumb2())
6007 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
6008 Offset, isInc, DAG);
6010 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
6011 Offset, isInc, DAG);
6015 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
6019 /// getPostIndexedAddressParts - returns true by value, base pointer and
6020 /// offset pointer and addressing mode by reference if this node can be
6021 /// combined with a load / store to form a post-indexed load / store.
6022 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
6025 ISD::MemIndexedMode &AM,
6026 SelectionDAG &DAG) const {
6027 if (Subtarget->isThumb1Only())
6032 bool isSEXTLoad = false;
6033 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
6034 VT = LD->getMemoryVT();
6035 Ptr = LD->getBasePtr();
6036 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
6037 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
6038 VT = ST->getMemoryVT();
6039 Ptr = ST->getBasePtr();
6044 bool isLegal = false;
6045 if (Subtarget->isThumb2())
6046 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
6049 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
6055 // Swap base ptr and offset to catch more post-index load / store when
6056 // it's legal. In Thumb2 mode, offset must be an immediate.
6057 if (Ptr == Offset && Op->getOpcode() == ISD::ADD &&
6058 !Subtarget->isThumb2())
6059 std::swap(Base, Offset);
6061 // Post-indexed load / store update the base pointer.
6066 AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
6070 void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
6074 const SelectionDAG &DAG,
6075 unsigned Depth) const {
6076 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
6077 switch (Op.getOpcode()) {
6079 case ARMISD::CMOV: {
6080 // Bits are known zero/one if known on the LHS and RHS.
6081 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
6082 if (KnownZero == 0 && KnownOne == 0) return;
6084 APInt KnownZeroRHS, KnownOneRHS;
6085 DAG.ComputeMaskedBits(Op.getOperand(1), Mask,
6086 KnownZeroRHS, KnownOneRHS, Depth+1);
6087 KnownZero &= KnownZeroRHS;
6088 KnownOne &= KnownOneRHS;
6094 //===----------------------------------------------------------------------===//
6095 // ARM Inline Assembly Support
6096 //===----------------------------------------------------------------------===//
6098 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
6099 // Looking for "rev" which is V6+.
6100 if (!Subtarget->hasV6Ops())
6103 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
6104 std::string AsmStr = IA->getAsmString();
6105 SmallVector<StringRef, 4> AsmPieces;
6106 SplitString(AsmStr, AsmPieces, ";\n");
6108 switch (AsmPieces.size()) {
6109 default: return false;
6111 AsmStr = AsmPieces[0];
6113 SplitString(AsmStr, AsmPieces, " \t,");
6116 if (AsmPieces.size() == 3 &&
6117 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
6118 IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
6119 const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
6120 if (Ty && Ty->getBitWidth() == 32)
6121 return IntrinsicLowering::LowerToByteSwap(CI);
6129 /// getConstraintType - Given a constraint letter, return the type of
6130 /// constraint it is for this target.
6131 ARMTargetLowering::ConstraintType
6132 ARMTargetLowering::getConstraintType(const std::string &Constraint) const {
6133 if (Constraint.size() == 1) {
6134 switch (Constraint[0]) {
6136 case 'l': return C_RegisterClass;
6137 case 'w': return C_RegisterClass;
6140 return TargetLowering::getConstraintType(Constraint);
6143 /// Examine constraint type and operand type and determine a weight value.
6144 /// This object must already have been set up with the operand type
6145 /// and the current alternative constraint selected.
6146 TargetLowering::ConstraintWeight
6147 ARMTargetLowering::getSingleConstraintMatchWeight(
6148 AsmOperandInfo &info, const char *constraint) const {
6149 ConstraintWeight weight = CW_Invalid;
6150 Value *CallOperandVal = info.CallOperandVal;
6151 // If we don't have a value, we can't do a match,
6152 // but allow it at the lowest weight.
6153 if (CallOperandVal == NULL)
6155 const Type *type = CallOperandVal->getType();
6156 // Look at the constraint type.
6157 switch (*constraint) {
6159 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
6162 if (type->isIntegerTy()) {
6163 if (Subtarget->isThumb())
6164 weight = CW_SpecificReg;
6166 weight = CW_Register;
6170 if (type->isFloatingPointTy())
6171 weight = CW_Register;
6177 std::pair<unsigned, const TargetRegisterClass*>
6178 ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
6180 if (Constraint.size() == 1) {
6181 // GCC ARM Constraint Letters
6182 switch (Constraint[0]) {
6184 if (Subtarget->isThumb())
6185 return std::make_pair(0U, ARM::tGPRRegisterClass);
6187 return std::make_pair(0U, ARM::GPRRegisterClass);
6189 return std::make_pair(0U, ARM::GPRRegisterClass);
6192 return std::make_pair(0U, ARM::SPRRegisterClass);
6193 if (VT.getSizeInBits() == 64)
6194 return std::make_pair(0U, ARM::DPRRegisterClass);
6195 if (VT.getSizeInBits() == 128)
6196 return std::make_pair(0U, ARM::QPRRegisterClass);
6200 if (StringRef("{cc}").equals_lower(Constraint))
6201 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass);
6203 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
6206 std::vector<unsigned> ARMTargetLowering::
6207 getRegClassForInlineAsmConstraint(const std::string &Constraint,
6209 if (Constraint.size() != 1)
6210 return std::vector<unsigned>();
6212 switch (Constraint[0]) { // GCC ARM Constraint Letters
6215 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
6216 ARM::R4, ARM::R5, ARM::R6, ARM::R7,
6219 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
6220 ARM::R4, ARM::R5, ARM::R6, ARM::R7,
6221 ARM::R8, ARM::R9, ARM::R10, ARM::R11,
6222 ARM::R12, ARM::LR, 0);
6225 return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3,
6226 ARM::S4, ARM::S5, ARM::S6, ARM::S7,
6227 ARM::S8, ARM::S9, ARM::S10, ARM::S11,
6228 ARM::S12,ARM::S13,ARM::S14,ARM::S15,
6229 ARM::S16,ARM::S17,ARM::S18,ARM::S19,
6230 ARM::S20,ARM::S21,ARM::S22,ARM::S23,
6231 ARM::S24,ARM::S25,ARM::S26,ARM::S27,
6232 ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0);
6233 if (VT.getSizeInBits() == 64)
6234 return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3,
6235 ARM::D4, ARM::D5, ARM::D6, ARM::D7,
6236 ARM::D8, ARM::D9, ARM::D10,ARM::D11,
6237 ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0);
6238 if (VT.getSizeInBits() == 128)
6239 return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3,
6240 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0);
6244 return std::vector<unsigned>();
6247 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
6248 /// vector. If it is invalid, don't add anything to Ops.
6249 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
6251 std::vector<SDValue>&Ops,
6252 SelectionDAG &DAG) const {
6253 SDValue Result(0, 0);
6255 switch (Constraint) {
6257 case 'I': case 'J': case 'K': case 'L':
6258 case 'M': case 'N': case 'O':
6259 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
6263 int64_t CVal64 = C->getSExtValue();
6264 int CVal = (int) CVal64;
6265 // None of these constraints allow values larger than 32 bits. Check
6266 // that the value fits in an int.
6270 switch (Constraint) {
6272 if (Subtarget->isThumb1Only()) {
6273 // This must be a constant between 0 and 255, for ADD
6275 if (CVal >= 0 && CVal <= 255)
6277 } else if (Subtarget->isThumb2()) {
6278 // A constant that can be used as an immediate value in a
6279 // data-processing instruction.
6280 if (ARM_AM::getT2SOImmVal(CVal) != -1)
6283 // A constant that can be used as an immediate value in a
6284 // data-processing instruction.
6285 if (ARM_AM::getSOImmVal(CVal) != -1)
6291 if (Subtarget->isThumb()) { // FIXME thumb2
6292 // This must be a constant between -255 and -1, for negated ADD
6293 // immediates. This can be used in GCC with an "n" modifier that
6294 // prints the negated value, for use with SUB instructions. It is
6295 // not useful otherwise but is implemented for compatibility.
6296 if (CVal >= -255 && CVal <= -1)
6299 // This must be a constant between -4095 and 4095. It is not clear
6300 // what this constraint is intended for. Implemented for
6301 // compatibility with GCC.
6302 if (CVal >= -4095 && CVal <= 4095)
6308 if (Subtarget->isThumb1Only()) {
6309 // A 32-bit value where only one byte has a nonzero value. Exclude
6310 // zero to match GCC. This constraint is used by GCC internally for
6311 // constants that can be loaded with a move/shift combination.
6312 // It is not useful otherwise but is implemented for compatibility.
6313 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
6315 } else if (Subtarget->isThumb2()) {
6316 // A constant whose bitwise inverse can be used as an immediate
6317 // value in a data-processing instruction. This can be used in GCC
6318 // with a "B" modifier that prints the inverted value, for use with
6319 // BIC and MVN instructions. It is not useful otherwise but is
6320 // implemented for compatibility.
6321 if (ARM_AM::getT2SOImmVal(~CVal) != -1)
6324 // A constant whose bitwise inverse can be used as an immediate
6325 // value in a data-processing instruction. This can be used in GCC
6326 // with a "B" modifier that prints the inverted value, for use with
6327 // BIC and MVN instructions. It is not useful otherwise but is
6328 // implemented for compatibility.
6329 if (ARM_AM::getSOImmVal(~CVal) != -1)
6335 if (Subtarget->isThumb1Only()) {
6336 // This must be a constant between -7 and 7,
6337 // for 3-operand ADD/SUB immediate instructions.
6338 if (CVal >= -7 && CVal < 7)
6340 } else if (Subtarget->isThumb2()) {
6341 // A constant whose negation can be used as an immediate value in a
6342 // data-processing instruction. This can be used in GCC with an "n"
6343 // modifier that prints the negated value, for use with SUB
6344 // instructions. It is not useful otherwise but is implemented for
6346 if (ARM_AM::getT2SOImmVal(-CVal) != -1)
6349 // A constant whose negation can be used as an immediate value in a
6350 // data-processing instruction. This can be used in GCC with an "n"
6351 // modifier that prints the negated value, for use with SUB
6352 // instructions. It is not useful otherwise but is implemented for
6354 if (ARM_AM::getSOImmVal(-CVal) != -1)
6360 if (Subtarget->isThumb()) { // FIXME thumb2
6361 // This must be a multiple of 4 between 0 and 1020, for
6362 // ADD sp + immediate.
6363 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
6366 // A power of two or a constant between 0 and 32. This is used in
6367 // GCC for the shift amount on shifted register operands, but it is
6368 // useful in general for any shift amounts.
6369 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
6375 if (Subtarget->isThumb()) { // FIXME thumb2
6376 // This must be a constant between 0 and 31, for shift amounts.
6377 if (CVal >= 0 && CVal <= 31)
6383 if (Subtarget->isThumb()) { // FIXME thumb2
6384 // This must be a multiple of 4 between -508 and 508, for
6385 // ADD/SUB sp = sp + immediate.
6386 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
6391 Result = DAG.getTargetConstant(CVal, Op.getValueType());
6395 if (Result.getNode()) {
6396 Ops.push_back(Result);
6399 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
6403 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
6404 // The ARM target isn't yet aware of offsets.
6408 int ARM::getVFPf32Imm(const APFloat &FPImm) {
6409 APInt Imm = FPImm.bitcastToAPInt();
6410 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
6411 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127
6412 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits
6414 // We can handle 4 bits of mantissa.
6415 // mantissa = (16+UInt(e:f:g:h))/16.
6416 if (Mantissa & 0x7ffff)
6419 if ((Mantissa & 0xf) != Mantissa)
6422 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
6423 if (Exp < -3 || Exp > 4)
6425 Exp = ((Exp+3) & 0x7) ^ 4;
6427 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
6430 int ARM::getVFPf64Imm(const APFloat &FPImm) {
6431 APInt Imm = FPImm.bitcastToAPInt();
6432 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
6433 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
6434 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL;
6436 // We can handle 4 bits of mantissa.
6437 // mantissa = (16+UInt(e:f:g:h))/16.
6438 if (Mantissa & 0xffffffffffffLL)
6441 if ((Mantissa & 0xf) != Mantissa)
6444 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
6445 if (Exp < -3 || Exp > 4)
6447 Exp = ((Exp+3) & 0x7) ^ 4;
6449 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
6452 bool ARM::isBitFieldInvertedMask(unsigned v) {
6453 if (v == 0xffffffff)
6455 // there can be 1's on either or both "outsides", all the "inside"
6457 unsigned int lsb = 0, msb = 31;
6458 while (v & (1 << msb)) --msb;
6459 while (v & (1 << lsb)) ++lsb;
6460 for (unsigned int i = lsb; i <= msb; ++i) {
6467 /// isFPImmLegal - Returns true if the target can instruction select the
6468 /// specified FP immediate natively. If false, the legalizer will
6469 /// materialize the FP immediate as a load from a constant pool.
6470 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
6471 if (!Subtarget->hasVFP3())
6474 return ARM::getVFPf32Imm(Imm) != -1;
6476 return ARM::getVFPf64Imm(Imm) != -1;
6480 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
6481 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
6482 /// specified in the intrinsic calls.
6483 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
6485 unsigned Intrinsic) const {
6486 switch (Intrinsic) {
6487 case Intrinsic::arm_neon_vld1:
6488 case Intrinsic::arm_neon_vld2:
6489 case Intrinsic::arm_neon_vld3:
6490 case Intrinsic::arm_neon_vld4:
6491 case Intrinsic::arm_neon_vld2lane:
6492 case Intrinsic::arm_neon_vld3lane:
6493 case Intrinsic::arm_neon_vld4lane: {
6494 Info.opc = ISD::INTRINSIC_W_CHAIN;
6495 // Conservatively set memVT to the entire set of vectors loaded.
6496 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8;
6497 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
6498 Info.ptrVal = I.getArgOperand(0);
6500 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
6501 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
6502 Info.vol = false; // volatile loads with NEON intrinsics not supported
6503 Info.readMem = true;
6504 Info.writeMem = false;
6507 case Intrinsic::arm_neon_vst1:
6508 case Intrinsic::arm_neon_vst2:
6509 case Intrinsic::arm_neon_vst3:
6510 case Intrinsic::arm_neon_vst4:
6511 case Intrinsic::arm_neon_vst2lane:
6512 case Intrinsic::arm_neon_vst3lane:
6513 case Intrinsic::arm_neon_vst4lane: {
6514 Info.opc = ISD::INTRINSIC_VOID;
6515 // Conservatively set memVT to the entire set of vectors stored.
6516 unsigned NumElts = 0;
6517 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
6518 const Type *ArgTy = I.getArgOperand(ArgI)->getType();
6519 if (!ArgTy->isVectorTy())
6521 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8;
6523 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
6524 Info.ptrVal = I.getArgOperand(0);
6526 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
6527 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
6528 Info.vol = false; // volatile stores with NEON intrinsics not supported
6529 Info.readMem = false;
6530 Info.writeMem = true;