1 //===-- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the TargetLoweringBase class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Target/TargetLowering.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/Triple.h"
18 #include "llvm/CodeGen/Analysis.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineJumpTableInfo.h"
23 #include "llvm/CodeGen/StackMaps.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/GlobalVariable.h"
27 #include "llvm/IR/Mangler.h"
28 #include "llvm/MC/MCAsmInfo.h"
29 #include "llvm/MC/MCContext.h"
30 #include "llvm/MC/MCExpr.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Target/TargetLoweringObjectFile.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetRegisterInfo.h"
37 #include "llvm/Target/TargetSubtargetInfo.h"
41 static cl::opt<bool> JumpIsExpensiveOverride(
42 "jump-is-expensive", cl::init(false),
43 cl::desc("Do not create extra branches to split comparison logic."),
46 /// InitLibcallNames - Set default libcall names.
48 static void InitLibcallNames(const char **Names, const Triple &TT) {
49 Names[RTLIB::SHL_I16] = "__ashlhi3";
50 Names[RTLIB::SHL_I32] = "__ashlsi3";
51 Names[RTLIB::SHL_I64] = "__ashldi3";
52 Names[RTLIB::SHL_I128] = "__ashlti3";
53 Names[RTLIB::SRL_I16] = "__lshrhi3";
54 Names[RTLIB::SRL_I32] = "__lshrsi3";
55 Names[RTLIB::SRL_I64] = "__lshrdi3";
56 Names[RTLIB::SRL_I128] = "__lshrti3";
57 Names[RTLIB::SRA_I16] = "__ashrhi3";
58 Names[RTLIB::SRA_I32] = "__ashrsi3";
59 Names[RTLIB::SRA_I64] = "__ashrdi3";
60 Names[RTLIB::SRA_I128] = "__ashrti3";
61 Names[RTLIB::MUL_I8] = "__mulqi3";
62 Names[RTLIB::MUL_I16] = "__mulhi3";
63 Names[RTLIB::MUL_I32] = "__mulsi3";
64 Names[RTLIB::MUL_I64] = "__muldi3";
65 Names[RTLIB::MUL_I128] = "__multi3";
66 Names[RTLIB::MULO_I32] = "__mulosi4";
67 Names[RTLIB::MULO_I64] = "__mulodi4";
68 Names[RTLIB::MULO_I128] = "__muloti4";
69 Names[RTLIB::SDIV_I8] = "__divqi3";
70 Names[RTLIB::SDIV_I16] = "__divhi3";
71 Names[RTLIB::SDIV_I32] = "__divsi3";
72 Names[RTLIB::SDIV_I64] = "__divdi3";
73 Names[RTLIB::SDIV_I128] = "__divti3";
74 Names[RTLIB::UDIV_I8] = "__udivqi3";
75 Names[RTLIB::UDIV_I16] = "__udivhi3";
76 Names[RTLIB::UDIV_I32] = "__udivsi3";
77 Names[RTLIB::UDIV_I64] = "__udivdi3";
78 Names[RTLIB::UDIV_I128] = "__udivti3";
79 Names[RTLIB::SREM_I8] = "__modqi3";
80 Names[RTLIB::SREM_I16] = "__modhi3";
81 Names[RTLIB::SREM_I32] = "__modsi3";
82 Names[RTLIB::SREM_I64] = "__moddi3";
83 Names[RTLIB::SREM_I128] = "__modti3";
84 Names[RTLIB::UREM_I8] = "__umodqi3";
85 Names[RTLIB::UREM_I16] = "__umodhi3";
86 Names[RTLIB::UREM_I32] = "__umodsi3";
87 Names[RTLIB::UREM_I64] = "__umoddi3";
88 Names[RTLIB::UREM_I128] = "__umodti3";
90 // These are generally not available.
91 Names[RTLIB::SDIVREM_I8] = nullptr;
92 Names[RTLIB::SDIVREM_I16] = nullptr;
93 Names[RTLIB::SDIVREM_I32] = nullptr;
94 Names[RTLIB::SDIVREM_I64] = nullptr;
95 Names[RTLIB::SDIVREM_I128] = nullptr;
96 Names[RTLIB::UDIVREM_I8] = nullptr;
97 Names[RTLIB::UDIVREM_I16] = nullptr;
98 Names[RTLIB::UDIVREM_I32] = nullptr;
99 Names[RTLIB::UDIVREM_I64] = nullptr;
100 Names[RTLIB::UDIVREM_I128] = nullptr;
102 Names[RTLIB::NEG_I32] = "__negsi2";
103 Names[RTLIB::NEG_I64] = "__negdi2";
104 Names[RTLIB::ADD_F32] = "__addsf3";
105 Names[RTLIB::ADD_F64] = "__adddf3";
106 Names[RTLIB::ADD_F80] = "__addxf3";
107 Names[RTLIB::ADD_F128] = "__addtf3";
108 Names[RTLIB::ADD_PPCF128] = "__gcc_qadd";
109 Names[RTLIB::SUB_F32] = "__subsf3";
110 Names[RTLIB::SUB_F64] = "__subdf3";
111 Names[RTLIB::SUB_F80] = "__subxf3";
112 Names[RTLIB::SUB_F128] = "__subtf3";
113 Names[RTLIB::SUB_PPCF128] = "__gcc_qsub";
114 Names[RTLIB::MUL_F32] = "__mulsf3";
115 Names[RTLIB::MUL_F64] = "__muldf3";
116 Names[RTLIB::MUL_F80] = "__mulxf3";
117 Names[RTLIB::MUL_F128] = "__multf3";
118 Names[RTLIB::MUL_PPCF128] = "__gcc_qmul";
119 Names[RTLIB::DIV_F32] = "__divsf3";
120 Names[RTLIB::DIV_F64] = "__divdf3";
121 Names[RTLIB::DIV_F80] = "__divxf3";
122 Names[RTLIB::DIV_F128] = "__divtf3";
123 Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv";
124 Names[RTLIB::REM_F32] = "fmodf";
125 Names[RTLIB::REM_F64] = "fmod";
126 Names[RTLIB::REM_F80] = "fmodl";
127 Names[RTLIB::REM_F128] = "fmodl";
128 Names[RTLIB::REM_PPCF128] = "fmodl";
129 Names[RTLIB::FMA_F32] = "fmaf";
130 Names[RTLIB::FMA_F64] = "fma";
131 Names[RTLIB::FMA_F80] = "fmal";
132 Names[RTLIB::FMA_F128] = "fmal";
133 Names[RTLIB::FMA_PPCF128] = "fmal";
134 Names[RTLIB::POWI_F32] = "__powisf2";
135 Names[RTLIB::POWI_F64] = "__powidf2";
136 Names[RTLIB::POWI_F80] = "__powixf2";
137 Names[RTLIB::POWI_F128] = "__powitf2";
138 Names[RTLIB::POWI_PPCF128] = "__powitf2";
139 Names[RTLIB::SQRT_F32] = "sqrtf";
140 Names[RTLIB::SQRT_F64] = "sqrt";
141 Names[RTLIB::SQRT_F80] = "sqrtl";
142 Names[RTLIB::SQRT_F128] = "sqrtl";
143 Names[RTLIB::SQRT_PPCF128] = "sqrtl";
144 Names[RTLIB::LOG_F32] = "logf";
145 Names[RTLIB::LOG_F64] = "log";
146 Names[RTLIB::LOG_F80] = "logl";
147 Names[RTLIB::LOG_F128] = "logl";
148 Names[RTLIB::LOG_PPCF128] = "logl";
149 Names[RTLIB::LOG2_F32] = "log2f";
150 Names[RTLIB::LOG2_F64] = "log2";
151 Names[RTLIB::LOG2_F80] = "log2l";
152 Names[RTLIB::LOG2_F128] = "log2l";
153 Names[RTLIB::LOG2_PPCF128] = "log2l";
154 Names[RTLIB::LOG10_F32] = "log10f";
155 Names[RTLIB::LOG10_F64] = "log10";
156 Names[RTLIB::LOG10_F80] = "log10l";
157 Names[RTLIB::LOG10_F128] = "log10l";
158 Names[RTLIB::LOG10_PPCF128] = "log10l";
159 Names[RTLIB::EXP_F32] = "expf";
160 Names[RTLIB::EXP_F64] = "exp";
161 Names[RTLIB::EXP_F80] = "expl";
162 Names[RTLIB::EXP_F128] = "expl";
163 Names[RTLIB::EXP_PPCF128] = "expl";
164 Names[RTLIB::EXP2_F32] = "exp2f";
165 Names[RTLIB::EXP2_F64] = "exp2";
166 Names[RTLIB::EXP2_F80] = "exp2l";
167 Names[RTLIB::EXP2_F128] = "exp2l";
168 Names[RTLIB::EXP2_PPCF128] = "exp2l";
169 Names[RTLIB::SIN_F32] = "sinf";
170 Names[RTLIB::SIN_F64] = "sin";
171 Names[RTLIB::SIN_F80] = "sinl";
172 Names[RTLIB::SIN_F128] = "sinl";
173 Names[RTLIB::SIN_PPCF128] = "sinl";
174 Names[RTLIB::COS_F32] = "cosf";
175 Names[RTLIB::COS_F64] = "cos";
176 Names[RTLIB::COS_F80] = "cosl";
177 Names[RTLIB::COS_F128] = "cosl";
178 Names[RTLIB::COS_PPCF128] = "cosl";
179 Names[RTLIB::POW_F32] = "powf";
180 Names[RTLIB::POW_F64] = "pow";
181 Names[RTLIB::POW_F80] = "powl";
182 Names[RTLIB::POW_F128] = "powl";
183 Names[RTLIB::POW_PPCF128] = "powl";
184 Names[RTLIB::CEIL_F32] = "ceilf";
185 Names[RTLIB::CEIL_F64] = "ceil";
186 Names[RTLIB::CEIL_F80] = "ceill";
187 Names[RTLIB::CEIL_F128] = "ceill";
188 Names[RTLIB::CEIL_PPCF128] = "ceill";
189 Names[RTLIB::TRUNC_F32] = "truncf";
190 Names[RTLIB::TRUNC_F64] = "trunc";
191 Names[RTLIB::TRUNC_F80] = "truncl";
192 Names[RTLIB::TRUNC_F128] = "truncl";
193 Names[RTLIB::TRUNC_PPCF128] = "truncl";
194 Names[RTLIB::RINT_F32] = "rintf";
195 Names[RTLIB::RINT_F64] = "rint";
196 Names[RTLIB::RINT_F80] = "rintl";
197 Names[RTLIB::RINT_F128] = "rintl";
198 Names[RTLIB::RINT_PPCF128] = "rintl";
199 Names[RTLIB::NEARBYINT_F32] = "nearbyintf";
200 Names[RTLIB::NEARBYINT_F64] = "nearbyint";
201 Names[RTLIB::NEARBYINT_F80] = "nearbyintl";
202 Names[RTLIB::NEARBYINT_F128] = "nearbyintl";
203 Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl";
204 Names[RTLIB::ROUND_F32] = "roundf";
205 Names[RTLIB::ROUND_F64] = "round";
206 Names[RTLIB::ROUND_F80] = "roundl";
207 Names[RTLIB::ROUND_F128] = "roundl";
208 Names[RTLIB::ROUND_PPCF128] = "roundl";
209 Names[RTLIB::FLOOR_F32] = "floorf";
210 Names[RTLIB::FLOOR_F64] = "floor";
211 Names[RTLIB::FLOOR_F80] = "floorl";
212 Names[RTLIB::FLOOR_F128] = "floorl";
213 Names[RTLIB::FLOOR_PPCF128] = "floorl";
214 Names[RTLIB::FMIN_F32] = "fminf";
215 Names[RTLIB::FMIN_F64] = "fmin";
216 Names[RTLIB::FMIN_F80] = "fminl";
217 Names[RTLIB::FMIN_F128] = "fminl";
218 Names[RTLIB::FMIN_PPCF128] = "fminl";
219 Names[RTLIB::FMAX_F32] = "fmaxf";
220 Names[RTLIB::FMAX_F64] = "fmax";
221 Names[RTLIB::FMAX_F80] = "fmaxl";
222 Names[RTLIB::FMAX_F128] = "fmaxl";
223 Names[RTLIB::FMAX_PPCF128] = "fmaxl";
224 Names[RTLIB::ROUND_F32] = "roundf";
225 Names[RTLIB::ROUND_F64] = "round";
226 Names[RTLIB::ROUND_F80] = "roundl";
227 Names[RTLIB::ROUND_F128] = "roundl";
228 Names[RTLIB::ROUND_PPCF128] = "roundl";
229 Names[RTLIB::COPYSIGN_F32] = "copysignf";
230 Names[RTLIB::COPYSIGN_F64] = "copysign";
231 Names[RTLIB::COPYSIGN_F80] = "copysignl";
232 Names[RTLIB::COPYSIGN_F128] = "copysignl";
233 Names[RTLIB::COPYSIGN_PPCF128] = "copysignl";
234 Names[RTLIB::FPEXT_F64_F128] = "__extenddftf2";
235 Names[RTLIB::FPEXT_F32_F128] = "__extendsftf2";
236 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
237 Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee";
238 Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee";
239 Names[RTLIB::FPROUND_F64_F16] = "__truncdfhf2";
240 Names[RTLIB::FPROUND_F80_F16] = "__truncxfhf2";
241 Names[RTLIB::FPROUND_F128_F16] = "__trunctfhf2";
242 Names[RTLIB::FPROUND_PPCF128_F16] = "__trunctfhf2";
243 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
244 Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2";
245 Names[RTLIB::FPROUND_F128_F32] = "__trunctfsf2";
246 Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2";
247 Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2";
248 Names[RTLIB::FPROUND_F128_F64] = "__trunctfdf2";
249 Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2";
250 Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfqi";
251 Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfhi";
252 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
253 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
254 Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti";
255 Names[RTLIB::FPTOSINT_F64_I8] = "__fixdfqi";
256 Names[RTLIB::FPTOSINT_F64_I16] = "__fixdfhi";
257 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
258 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
259 Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti";
260 Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi";
261 Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi";
262 Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti";
263 Names[RTLIB::FPTOSINT_F128_I32] = "__fixtfsi";
264 Names[RTLIB::FPTOSINT_F128_I64] = "__fixtfdi";
265 Names[RTLIB::FPTOSINT_F128_I128] = "__fixtfti";
266 Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi";
267 Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi";
268 Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti";
269 Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfqi";
270 Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfhi";
271 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
272 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
273 Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti";
274 Names[RTLIB::FPTOUINT_F64_I8] = "__fixunsdfqi";
275 Names[RTLIB::FPTOUINT_F64_I16] = "__fixunsdfhi";
276 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi";
277 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi";
278 Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti";
279 Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi";
280 Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi";
281 Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti";
282 Names[RTLIB::FPTOUINT_F128_I32] = "__fixunstfsi";
283 Names[RTLIB::FPTOUINT_F128_I64] = "__fixunstfdi";
284 Names[RTLIB::FPTOUINT_F128_I128] = "__fixunstfti";
285 Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi";
286 Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi";
287 Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti";
288 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
289 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf";
290 Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf";
291 Names[RTLIB::SINTTOFP_I32_F128] = "__floatsitf";
292 Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf";
293 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf";
294 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf";
295 Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf";
296 Names[RTLIB::SINTTOFP_I64_F128] = "__floatditf";
297 Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf";
298 Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf";
299 Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf";
300 Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf";
301 Names[RTLIB::SINTTOFP_I128_F128] = "__floattitf";
302 Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf";
303 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf";
304 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf";
305 Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf";
306 Names[RTLIB::UINTTOFP_I32_F128] = "__floatunsitf";
307 Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf";
308 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf";
309 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf";
310 Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf";
311 Names[RTLIB::UINTTOFP_I64_F128] = "__floatunditf";
312 Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf";
313 Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf";
314 Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf";
315 Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf";
316 Names[RTLIB::UINTTOFP_I128_F128] = "__floatuntitf";
317 Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf";
318 Names[RTLIB::OEQ_F32] = "__eqsf2";
319 Names[RTLIB::OEQ_F64] = "__eqdf2";
320 Names[RTLIB::OEQ_F128] = "__eqtf2";
321 Names[RTLIB::UNE_F32] = "__nesf2";
322 Names[RTLIB::UNE_F64] = "__nedf2";
323 Names[RTLIB::UNE_F128] = "__netf2";
324 Names[RTLIB::OGE_F32] = "__gesf2";
325 Names[RTLIB::OGE_F64] = "__gedf2";
326 Names[RTLIB::OGE_F128] = "__getf2";
327 Names[RTLIB::OLT_F32] = "__ltsf2";
328 Names[RTLIB::OLT_F64] = "__ltdf2";
329 Names[RTLIB::OLT_F128] = "__lttf2";
330 Names[RTLIB::OLE_F32] = "__lesf2";
331 Names[RTLIB::OLE_F64] = "__ledf2";
332 Names[RTLIB::OLE_F128] = "__letf2";
333 Names[RTLIB::OGT_F32] = "__gtsf2";
334 Names[RTLIB::OGT_F64] = "__gtdf2";
335 Names[RTLIB::OGT_F128] = "__gttf2";
336 Names[RTLIB::UO_F32] = "__unordsf2";
337 Names[RTLIB::UO_F64] = "__unorddf2";
338 Names[RTLIB::UO_F128] = "__unordtf2";
339 Names[RTLIB::O_F32] = "__unordsf2";
340 Names[RTLIB::O_F64] = "__unorddf2";
341 Names[RTLIB::O_F128] = "__unordtf2";
342 Names[RTLIB::MEMCPY] = "memcpy";
343 Names[RTLIB::MEMMOVE] = "memmove";
344 Names[RTLIB::MEMSET] = "memset";
345 Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
346 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1";
347 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2";
348 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = "__sync_val_compare_and_swap_4";
349 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = "__sync_val_compare_and_swap_8";
350 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_16] = "__sync_val_compare_and_swap_16";
351 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1";
352 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2";
353 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4";
354 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8";
355 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_16] = "__sync_lock_test_and_set_16";
356 Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1";
357 Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2";
358 Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4";
359 Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8";
360 Names[RTLIB::SYNC_FETCH_AND_ADD_16] = "__sync_fetch_and_add_16";
361 Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1";
362 Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2";
363 Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4";
364 Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8";
365 Names[RTLIB::SYNC_FETCH_AND_SUB_16] = "__sync_fetch_and_sub_16";
366 Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1";
367 Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2";
368 Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4";
369 Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8";
370 Names[RTLIB::SYNC_FETCH_AND_AND_16] = "__sync_fetch_and_and_16";
371 Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1";
372 Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2";
373 Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4";
374 Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8";
375 Names[RTLIB::SYNC_FETCH_AND_OR_16] = "__sync_fetch_and_or_16";
376 Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1";
377 Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2";
378 Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and_xor_4";
379 Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8";
380 Names[RTLIB::SYNC_FETCH_AND_XOR_16] = "__sync_fetch_and_xor_16";
381 Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1";
382 Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2";
383 Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4";
384 Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8";
385 Names[RTLIB::SYNC_FETCH_AND_NAND_16] = "__sync_fetch_and_nand_16";
386 Names[RTLIB::SYNC_FETCH_AND_MAX_1] = "__sync_fetch_and_max_1";
387 Names[RTLIB::SYNC_FETCH_AND_MAX_2] = "__sync_fetch_and_max_2";
388 Names[RTLIB::SYNC_FETCH_AND_MAX_4] = "__sync_fetch_and_max_4";
389 Names[RTLIB::SYNC_FETCH_AND_MAX_8] = "__sync_fetch_and_max_8";
390 Names[RTLIB::SYNC_FETCH_AND_MAX_16] = "__sync_fetch_and_max_16";
391 Names[RTLIB::SYNC_FETCH_AND_UMAX_1] = "__sync_fetch_and_umax_1";
392 Names[RTLIB::SYNC_FETCH_AND_UMAX_2] = "__sync_fetch_and_umax_2";
393 Names[RTLIB::SYNC_FETCH_AND_UMAX_4] = "__sync_fetch_and_umax_4";
394 Names[RTLIB::SYNC_FETCH_AND_UMAX_8] = "__sync_fetch_and_umax_8";
395 Names[RTLIB::SYNC_FETCH_AND_UMAX_16] = "__sync_fetch_and_umax_16";
396 Names[RTLIB::SYNC_FETCH_AND_MIN_1] = "__sync_fetch_and_min_1";
397 Names[RTLIB::SYNC_FETCH_AND_MIN_2] = "__sync_fetch_and_min_2";
398 Names[RTLIB::SYNC_FETCH_AND_MIN_4] = "__sync_fetch_and_min_4";
399 Names[RTLIB::SYNC_FETCH_AND_MIN_8] = "__sync_fetch_and_min_8";
400 Names[RTLIB::SYNC_FETCH_AND_MIN_16] = "__sync_fetch_and_min_16";
401 Names[RTLIB::SYNC_FETCH_AND_UMIN_1] = "__sync_fetch_and_umin_1";
402 Names[RTLIB::SYNC_FETCH_AND_UMIN_2] = "__sync_fetch_and_umin_2";
403 Names[RTLIB::SYNC_FETCH_AND_UMIN_4] = "__sync_fetch_and_umin_4";
404 Names[RTLIB::SYNC_FETCH_AND_UMIN_8] = "__sync_fetch_and_umin_8";
405 Names[RTLIB::SYNC_FETCH_AND_UMIN_16] = "__sync_fetch_and_umin_16";
407 if (TT.getEnvironment() == Triple::GNU) {
408 Names[RTLIB::SINCOS_F32] = "sincosf";
409 Names[RTLIB::SINCOS_F64] = "sincos";
410 Names[RTLIB::SINCOS_F80] = "sincosl";
411 Names[RTLIB::SINCOS_F128] = "sincosl";
412 Names[RTLIB::SINCOS_PPCF128] = "sincosl";
414 // These are generally not available.
415 Names[RTLIB::SINCOS_F32] = nullptr;
416 Names[RTLIB::SINCOS_F64] = nullptr;
417 Names[RTLIB::SINCOS_F80] = nullptr;
418 Names[RTLIB::SINCOS_F128] = nullptr;
419 Names[RTLIB::SINCOS_PPCF128] = nullptr;
422 if (!TT.isOSOpenBSD()) {
423 Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = "__stack_chk_fail";
425 // These are generally not available.
426 Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = nullptr;
429 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
430 // of the gnueabi-style __gnu_*_ieee.
431 // FIXME: What about other targets?
432 if (TT.isOSDarwin()) {
433 Names[RTLIB::FPEXT_F16_F32] = "__extendhfsf2";
434 Names[RTLIB::FPROUND_F32_F16] = "__truncsfhf2";
438 /// InitLibcallCallingConvs - Set default libcall CallingConvs.
440 static void InitLibcallCallingConvs(CallingConv::ID *CCs) {
441 for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) {
442 CCs[i] = CallingConv::C;
446 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
447 /// UNKNOWN_LIBCALL if there is none.
448 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
449 if (OpVT == MVT::f16) {
450 if (RetVT == MVT::f32)
451 return FPEXT_F16_F32;
452 } else if (OpVT == MVT::f32) {
453 if (RetVT == MVT::f64)
454 return FPEXT_F32_F64;
455 if (RetVT == MVT::f128)
456 return FPEXT_F32_F128;
457 } else if (OpVT == MVT::f64) {
458 if (RetVT == MVT::f128)
459 return FPEXT_F64_F128;
462 return UNKNOWN_LIBCALL;
465 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
466 /// UNKNOWN_LIBCALL if there is none.
467 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
468 if (RetVT == MVT::f16) {
469 if (OpVT == MVT::f32)
470 return FPROUND_F32_F16;
471 if (OpVT == MVT::f64)
472 return FPROUND_F64_F16;
473 if (OpVT == MVT::f80)
474 return FPROUND_F80_F16;
475 if (OpVT == MVT::f128)
476 return FPROUND_F128_F16;
477 if (OpVT == MVT::ppcf128)
478 return FPROUND_PPCF128_F16;
479 } else if (RetVT == MVT::f32) {
480 if (OpVT == MVT::f64)
481 return FPROUND_F64_F32;
482 if (OpVT == MVT::f80)
483 return FPROUND_F80_F32;
484 if (OpVT == MVT::f128)
485 return FPROUND_F128_F32;
486 if (OpVT == MVT::ppcf128)
487 return FPROUND_PPCF128_F32;
488 } else if (RetVT == MVT::f64) {
489 if (OpVT == MVT::f80)
490 return FPROUND_F80_F64;
491 if (OpVT == MVT::f128)
492 return FPROUND_F128_F64;
493 if (OpVT == MVT::ppcf128)
494 return FPROUND_PPCF128_F64;
497 return UNKNOWN_LIBCALL;
500 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
501 /// UNKNOWN_LIBCALL if there is none.
502 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
503 if (OpVT == MVT::f32) {
504 if (RetVT == MVT::i8)
505 return FPTOSINT_F32_I8;
506 if (RetVT == MVT::i16)
507 return FPTOSINT_F32_I16;
508 if (RetVT == MVT::i32)
509 return FPTOSINT_F32_I32;
510 if (RetVT == MVT::i64)
511 return FPTOSINT_F32_I64;
512 if (RetVT == MVT::i128)
513 return FPTOSINT_F32_I128;
514 } else if (OpVT == MVT::f64) {
515 if (RetVT == MVT::i8)
516 return FPTOSINT_F64_I8;
517 if (RetVT == MVT::i16)
518 return FPTOSINT_F64_I16;
519 if (RetVT == MVT::i32)
520 return FPTOSINT_F64_I32;
521 if (RetVT == MVT::i64)
522 return FPTOSINT_F64_I64;
523 if (RetVT == MVT::i128)
524 return FPTOSINT_F64_I128;
525 } else if (OpVT == MVT::f80) {
526 if (RetVT == MVT::i32)
527 return FPTOSINT_F80_I32;
528 if (RetVT == MVT::i64)
529 return FPTOSINT_F80_I64;
530 if (RetVT == MVT::i128)
531 return FPTOSINT_F80_I128;
532 } else if (OpVT == MVT::f128) {
533 if (RetVT == MVT::i32)
534 return FPTOSINT_F128_I32;
535 if (RetVT == MVT::i64)
536 return FPTOSINT_F128_I64;
537 if (RetVT == MVT::i128)
538 return FPTOSINT_F128_I128;
539 } else if (OpVT == MVT::ppcf128) {
540 if (RetVT == MVT::i32)
541 return FPTOSINT_PPCF128_I32;
542 if (RetVT == MVT::i64)
543 return FPTOSINT_PPCF128_I64;
544 if (RetVT == MVT::i128)
545 return FPTOSINT_PPCF128_I128;
547 return UNKNOWN_LIBCALL;
550 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
551 /// UNKNOWN_LIBCALL if there is none.
552 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
553 if (OpVT == MVT::f32) {
554 if (RetVT == MVT::i8)
555 return FPTOUINT_F32_I8;
556 if (RetVT == MVT::i16)
557 return FPTOUINT_F32_I16;
558 if (RetVT == MVT::i32)
559 return FPTOUINT_F32_I32;
560 if (RetVT == MVT::i64)
561 return FPTOUINT_F32_I64;
562 if (RetVT == MVT::i128)
563 return FPTOUINT_F32_I128;
564 } else if (OpVT == MVT::f64) {
565 if (RetVT == MVT::i8)
566 return FPTOUINT_F64_I8;
567 if (RetVT == MVT::i16)
568 return FPTOUINT_F64_I16;
569 if (RetVT == MVT::i32)
570 return FPTOUINT_F64_I32;
571 if (RetVT == MVT::i64)
572 return FPTOUINT_F64_I64;
573 if (RetVT == MVT::i128)
574 return FPTOUINT_F64_I128;
575 } else if (OpVT == MVT::f80) {
576 if (RetVT == MVT::i32)
577 return FPTOUINT_F80_I32;
578 if (RetVT == MVT::i64)
579 return FPTOUINT_F80_I64;
580 if (RetVT == MVT::i128)
581 return FPTOUINT_F80_I128;
582 } else if (OpVT == MVT::f128) {
583 if (RetVT == MVT::i32)
584 return FPTOUINT_F128_I32;
585 if (RetVT == MVT::i64)
586 return FPTOUINT_F128_I64;
587 if (RetVT == MVT::i128)
588 return FPTOUINT_F128_I128;
589 } else if (OpVT == MVT::ppcf128) {
590 if (RetVT == MVT::i32)
591 return FPTOUINT_PPCF128_I32;
592 if (RetVT == MVT::i64)
593 return FPTOUINT_PPCF128_I64;
594 if (RetVT == MVT::i128)
595 return FPTOUINT_PPCF128_I128;
597 return UNKNOWN_LIBCALL;
600 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
601 /// UNKNOWN_LIBCALL if there is none.
602 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
603 if (OpVT == MVT::i32) {
604 if (RetVT == MVT::f32)
605 return SINTTOFP_I32_F32;
606 if (RetVT == MVT::f64)
607 return SINTTOFP_I32_F64;
608 if (RetVT == MVT::f80)
609 return SINTTOFP_I32_F80;
610 if (RetVT == MVT::f128)
611 return SINTTOFP_I32_F128;
612 if (RetVT == MVT::ppcf128)
613 return SINTTOFP_I32_PPCF128;
614 } else if (OpVT == MVT::i64) {
615 if (RetVT == MVT::f32)
616 return SINTTOFP_I64_F32;
617 if (RetVT == MVT::f64)
618 return SINTTOFP_I64_F64;
619 if (RetVT == MVT::f80)
620 return SINTTOFP_I64_F80;
621 if (RetVT == MVT::f128)
622 return SINTTOFP_I64_F128;
623 if (RetVT == MVT::ppcf128)
624 return SINTTOFP_I64_PPCF128;
625 } else if (OpVT == MVT::i128) {
626 if (RetVT == MVT::f32)
627 return SINTTOFP_I128_F32;
628 if (RetVT == MVT::f64)
629 return SINTTOFP_I128_F64;
630 if (RetVT == MVT::f80)
631 return SINTTOFP_I128_F80;
632 if (RetVT == MVT::f128)
633 return SINTTOFP_I128_F128;
634 if (RetVT == MVT::ppcf128)
635 return SINTTOFP_I128_PPCF128;
637 return UNKNOWN_LIBCALL;
640 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
641 /// UNKNOWN_LIBCALL if there is none.
642 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
643 if (OpVT == MVT::i32) {
644 if (RetVT == MVT::f32)
645 return UINTTOFP_I32_F32;
646 if (RetVT == MVT::f64)
647 return UINTTOFP_I32_F64;
648 if (RetVT == MVT::f80)
649 return UINTTOFP_I32_F80;
650 if (RetVT == MVT::f128)
651 return UINTTOFP_I32_F128;
652 if (RetVT == MVT::ppcf128)
653 return UINTTOFP_I32_PPCF128;
654 } else if (OpVT == MVT::i64) {
655 if (RetVT == MVT::f32)
656 return UINTTOFP_I64_F32;
657 if (RetVT == MVT::f64)
658 return UINTTOFP_I64_F64;
659 if (RetVT == MVT::f80)
660 return UINTTOFP_I64_F80;
661 if (RetVT == MVT::f128)
662 return UINTTOFP_I64_F128;
663 if (RetVT == MVT::ppcf128)
664 return UINTTOFP_I64_PPCF128;
665 } else if (OpVT == MVT::i128) {
666 if (RetVT == MVT::f32)
667 return UINTTOFP_I128_F32;
668 if (RetVT == MVT::f64)
669 return UINTTOFP_I128_F64;
670 if (RetVT == MVT::f80)
671 return UINTTOFP_I128_F80;
672 if (RetVT == MVT::f128)
673 return UINTTOFP_I128_F128;
674 if (RetVT == MVT::ppcf128)
675 return UINTTOFP_I128_PPCF128;
677 return UNKNOWN_LIBCALL;
680 RTLIB::Libcall RTLIB::getATOMIC(unsigned Opc, MVT VT) {
681 #define OP_TO_LIBCALL(Name, Enum) \
683 switch (VT.SimpleTy) { \
685 return UNKNOWN_LIBCALL; \
699 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
700 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
701 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
702 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
703 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
704 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
705 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
706 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
707 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
708 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
709 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
710 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
715 return UNKNOWN_LIBCALL;
718 /// InitCmpLibcallCCs - Set default comparison libcall CC.
720 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
721 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
722 CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
723 CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
724 CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
725 CCs[RTLIB::UNE_F32] = ISD::SETNE;
726 CCs[RTLIB::UNE_F64] = ISD::SETNE;
727 CCs[RTLIB::UNE_F128] = ISD::SETNE;
728 CCs[RTLIB::OGE_F32] = ISD::SETGE;
729 CCs[RTLIB::OGE_F64] = ISD::SETGE;
730 CCs[RTLIB::OGE_F128] = ISD::SETGE;
731 CCs[RTLIB::OLT_F32] = ISD::SETLT;
732 CCs[RTLIB::OLT_F64] = ISD::SETLT;
733 CCs[RTLIB::OLT_F128] = ISD::SETLT;
734 CCs[RTLIB::OLE_F32] = ISD::SETLE;
735 CCs[RTLIB::OLE_F64] = ISD::SETLE;
736 CCs[RTLIB::OLE_F128] = ISD::SETLE;
737 CCs[RTLIB::OGT_F32] = ISD::SETGT;
738 CCs[RTLIB::OGT_F64] = ISD::SETGT;
739 CCs[RTLIB::OGT_F128] = ISD::SETGT;
740 CCs[RTLIB::UO_F32] = ISD::SETNE;
741 CCs[RTLIB::UO_F64] = ISD::SETNE;
742 CCs[RTLIB::UO_F128] = ISD::SETNE;
743 CCs[RTLIB::O_F32] = ISD::SETEQ;
744 CCs[RTLIB::O_F64] = ISD::SETEQ;
745 CCs[RTLIB::O_F128] = ISD::SETEQ;
748 /// NOTE: The TargetMachine owns TLOF.
749 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
752 // Perform these initializations only once.
753 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8;
754 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize
755 = MaxStoresPerMemmoveOptSize = 4;
756 UseUnderscoreSetJmp = false;
757 UseUnderscoreLongJmp = false;
758 SelectIsExpensive = false;
759 HasMultipleConditionRegisters = false;
760 HasExtractBitsInsn = false;
761 IntDivIsCheap = false;
762 FsqrtIsCheap = false;
763 Pow2SDivIsCheap = false;
764 JumpIsExpensive = JumpIsExpensiveOverride;
765 PredictableSelectIsExpensive = false;
766 MaskAndBranchFoldingIsLegal = false;
767 EnableExtLdPromotion = false;
768 HasFloatingPointExceptions = true;
769 StackPointerRegisterToSaveRestore = 0;
770 ExceptionPointerRegister = 0;
771 ExceptionSelectorRegister = 0;
772 BooleanContents = UndefinedBooleanContent;
773 BooleanFloatContents = UndefinedBooleanContent;
774 BooleanVectorContents = UndefinedBooleanContent;
775 SchedPreferenceInfo = Sched::ILP;
777 JumpBufAlignment = 0;
778 MinFunctionAlignment = 0;
779 PrefFunctionAlignment = 0;
780 PrefLoopAlignment = 0;
781 MinStackArgumentAlignment = 1;
782 InsertFencesForAtomic = false;
783 MinimumJumpTableEntries = 4;
785 InitLibcallNames(LibcallRoutineNames, TM.getTargetTriple());
786 InitCmpLibcallCCs(CmpLibcallCCs);
787 InitLibcallCallingConvs(LibcallCallingConvs);
790 void TargetLoweringBase::initActions() {
791 // All operations default to being supported.
792 memset(OpActions, 0, sizeof(OpActions));
793 memset(LoadExtActions, 0, sizeof(LoadExtActions));
794 memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
795 memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
796 memset(CondCodeActions, 0, sizeof(CondCodeActions));
797 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
798 memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray));
800 // Set default actions for various operations.
801 for (MVT VT : MVT::all_valuetypes()) {
802 // Default all indexed load / store to expand.
803 for (unsigned IM = (unsigned)ISD::PRE_INC;
804 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
805 setIndexedLoadAction(IM, VT, Expand);
806 setIndexedStoreAction(IM, VT, Expand);
809 // Most backends expect to see the node which just returns the value loaded.
810 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
812 // These operations default to expand.
813 setOperationAction(ISD::FGETSIGN, VT, Expand);
814 setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
815 setOperationAction(ISD::FMINNUM, VT, Expand);
816 setOperationAction(ISD::FMAXNUM, VT, Expand);
817 setOperationAction(ISD::FMINNAN, VT, Expand);
818 setOperationAction(ISD::FMAXNAN, VT, Expand);
819 setOperationAction(ISD::FMAD, VT, Expand);
820 setOperationAction(ISD::SMIN, VT, Expand);
821 setOperationAction(ISD::SMAX, VT, Expand);
822 setOperationAction(ISD::UMIN, VT, Expand);
823 setOperationAction(ISD::UMAX, VT, Expand);
825 // Overflow operations default to expand
826 setOperationAction(ISD::SADDO, VT, Expand);
827 setOperationAction(ISD::SSUBO, VT, Expand);
828 setOperationAction(ISD::UADDO, VT, Expand);
829 setOperationAction(ISD::USUBO, VT, Expand);
830 setOperationAction(ISD::SMULO, VT, Expand);
831 setOperationAction(ISD::UMULO, VT, Expand);
832 setOperationAction(ISD::UABSDIFF, VT, Expand);
833 setOperationAction(ISD::SABSDIFF, VT, Expand);
835 // These library functions default to expand.
836 setOperationAction(ISD::FROUND, VT, Expand);
838 // These operations default to expand for vector types.
840 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
841 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand);
842 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand);
843 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand);
847 // Most targets ignore the @llvm.prefetch intrinsic.
848 setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
850 // ConstantFP nodes default to expand. Targets can either change this to
851 // Legal, in which case all fp constants are legal, or use isFPImmLegal()
852 // to optimize expansions for certain constants.
853 setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
854 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
855 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
856 setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
857 setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
859 // These library functions default to expand.
860 for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
861 setOperationAction(ISD::FLOG , VT, Expand);
862 setOperationAction(ISD::FLOG2, VT, Expand);
863 setOperationAction(ISD::FLOG10, VT, Expand);
864 setOperationAction(ISD::FEXP , VT, Expand);
865 setOperationAction(ISD::FEXP2, VT, Expand);
866 setOperationAction(ISD::FFLOOR, VT, Expand);
867 setOperationAction(ISD::FMINNUM, VT, Expand);
868 setOperationAction(ISD::FMAXNUM, VT, Expand);
869 setOperationAction(ISD::FNEARBYINT, VT, Expand);
870 setOperationAction(ISD::FCEIL, VT, Expand);
871 setOperationAction(ISD::FRINT, VT, Expand);
872 setOperationAction(ISD::FTRUNC, VT, Expand);
873 setOperationAction(ISD::FROUND, VT, Expand);
876 // Default ISD::TRAP to expand (which turns it into abort).
877 setOperationAction(ISD::TRAP, MVT::Other, Expand);
879 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
880 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
882 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
885 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
887 return MVT::getIntegerVT(8 * DL.getPointerSize(0));
890 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy,
891 const DataLayout &DL) const {
892 assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
893 if (LHSTy.isVector())
895 return getScalarShiftAmountTy(DL, LHSTy);
898 /// canOpTrap - Returns true if the operation can trap for the value type.
899 /// VT must be a legal type.
900 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
901 assert(isTypeLegal(VT));
915 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
916 // If the command-line option was specified, ignore this request.
917 if (!JumpIsExpensiveOverride.getNumOccurrences())
918 JumpIsExpensive = isExpensive;
921 TargetLoweringBase::LegalizeKind
922 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
923 // If this is a simple type, use the ComputeRegisterProp mechanism.
925 MVT SVT = VT.getSimpleVT();
926 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
927 MVT NVT = TransformToType[SVT.SimpleTy];
928 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
930 assert((LA == TypeLegal || LA == TypeSoftenFloat ||
931 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) &&
932 "Promote may not follow Expand or Promote");
934 if (LA == TypeSplitVector)
935 return LegalizeKind(LA,
936 EVT::getVectorVT(Context, SVT.getVectorElementType(),
937 SVT.getVectorNumElements() / 2));
938 if (LA == TypeScalarizeVector)
939 return LegalizeKind(LA, SVT.getVectorElementType());
940 return LegalizeKind(LA, NVT);
943 // Handle Extended Scalar Types.
944 if (!VT.isVector()) {
945 assert(VT.isInteger() && "Float types must be simple");
946 unsigned BitSize = VT.getSizeInBits();
947 // First promote to a power-of-two size, then expand if necessary.
948 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
949 EVT NVT = VT.getRoundIntegerType(Context);
950 assert(NVT != VT && "Unable to round integer VT");
951 LegalizeKind NextStep = getTypeConversion(Context, NVT);
952 // Avoid multi-step promotion.
953 if (NextStep.first == TypePromoteInteger)
955 // Return rounded integer type.
956 return LegalizeKind(TypePromoteInteger, NVT);
959 return LegalizeKind(TypeExpandInteger,
960 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
963 // Handle vector types.
964 unsigned NumElts = VT.getVectorNumElements();
965 EVT EltVT = VT.getVectorElementType();
967 // Vectors with only one element are always scalarized.
969 return LegalizeKind(TypeScalarizeVector, EltVT);
971 // Try to widen vector elements until the element type is a power of two and
972 // promote it to a legal type later on, for example:
973 // <3 x i8> -> <4 x i8> -> <4 x i32>
974 if (EltVT.isInteger()) {
975 // Vectors with a number of elements that is not a power of two are always
976 // widened, for example <3 x i8> -> <4 x i8>.
977 if (!VT.isPow2VectorType()) {
978 NumElts = (unsigned)NextPowerOf2(NumElts);
979 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
980 return LegalizeKind(TypeWidenVector, NVT);
983 // Examine the element type.
984 LegalizeKind LK = getTypeConversion(Context, EltVT);
986 // If type is to be expanded, split the vector.
987 // <4 x i140> -> <2 x i140>
988 if (LK.first == TypeExpandInteger)
989 return LegalizeKind(TypeSplitVector,
990 EVT::getVectorVT(Context, EltVT, NumElts / 2));
992 // Promote the integer element types until a legal vector type is found
993 // or until the element integer type is too big. If a legal type was not
994 // found, fallback to the usual mechanism of widening/splitting the
996 EVT OldEltVT = EltVT;
998 // Increase the bitwidth of the element to the next pow-of-two
999 // (which is greater than 8 bits).
1000 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
1001 .getRoundIntegerType(Context);
1003 // Stop trying when getting a non-simple element type.
1004 // Note that vector elements may be greater than legal vector element
1005 // types. Example: X86 XMM registers hold 64bit element on 32bit
1007 if (!EltVT.isSimple())
1010 // Build a new vector type and check if it is legal.
1011 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1012 // Found a legal promoted vector type.
1013 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1014 return LegalizeKind(TypePromoteInteger,
1015 EVT::getVectorVT(Context, EltVT, NumElts));
1018 // Reset the type to the unexpanded type if we did not find a legal vector
1019 // type with a promoted vector element type.
1023 // Try to widen the vector until a legal type is found.
1024 // If there is no wider legal type, split the vector.
1026 // Round up to the next power of 2.
1027 NumElts = (unsigned)NextPowerOf2(NumElts);
1029 // If there is no simple vector type with this many elements then there
1030 // cannot be a larger legal vector type. Note that this assumes that
1031 // there are no skipped intermediate vector types in the simple types.
1032 if (!EltVT.isSimple())
1034 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1035 if (LargerVector == MVT())
1038 // If this type is legal then widen the vector.
1039 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1040 return LegalizeKind(TypeWidenVector, LargerVector);
1043 // Widen odd vectors to next power of two.
1044 if (!VT.isPow2VectorType()) {
1045 EVT NVT = VT.getPow2VectorType(Context);
1046 return LegalizeKind(TypeWidenVector, NVT);
1049 // Vectors with illegal element types are expanded.
1050 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
1051 return LegalizeKind(TypeSplitVector, NVT);
1054 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
1055 unsigned &NumIntermediates,
1057 TargetLoweringBase *TLI) {
1058 // Figure out the right, legal destination reg to copy into.
1059 unsigned NumElts = VT.getVectorNumElements();
1060 MVT EltTy = VT.getVectorElementType();
1062 unsigned NumVectorRegs = 1;
1064 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1065 // could break down into LHS/RHS like LegalizeDAG does.
1066 if (!isPowerOf2_32(NumElts)) {
1067 NumVectorRegs = NumElts;
1071 // Divide the input until we get to a supported size. This will always
1072 // end with a scalar if the target doesn't support vectors.
1073 while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
1075 NumVectorRegs <<= 1;
1078 NumIntermediates = NumVectorRegs;
1080 MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
1081 if (!TLI->isTypeLegal(NewVT))
1083 IntermediateVT = NewVT;
1085 unsigned NewVTSize = NewVT.getSizeInBits();
1087 // Convert sizes such as i33 to i64.
1088 if (!isPowerOf2_32(NewVTSize))
1089 NewVTSize = NextPowerOf2(NewVTSize);
1091 MVT DestVT = TLI->getRegisterType(NewVT);
1092 RegisterVT = DestVT;
1093 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1094 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1096 // Otherwise, promotion or legal types use the same number of registers as
1097 // the vector decimated to the appropriate level.
1098 return NumVectorRegs;
1101 /// isLegalRC - Return true if the value types that can be represented by the
1102 /// specified register class are all legal.
1103 bool TargetLoweringBase::isLegalRC(const TargetRegisterClass *RC) const {
1104 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
1106 if (isTypeLegal(*I))
1112 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
1113 /// sequence of memory operands that is recognized by PrologEpilogInserter.
1115 TargetLoweringBase::emitPatchPoint(MachineInstr *MI,
1116 MachineBasicBlock *MBB) const {
1117 MachineFunction &MF = *MI->getParent()->getParent();
1119 // MI changes inside this loop as we grow operands.
1120 for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
1121 MachineOperand &MO = MI->getOperand(OperIdx);
1125 // foldMemoryOperand builds a new MI after replacing a single FI operand
1126 // with the canonical set of five x86 addressing-mode operands.
1127 int FI = MO.getIndex();
1128 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1130 // Copy operands before the frame-index.
1131 for (unsigned i = 0; i < OperIdx; ++i)
1132 MIB.addOperand(MI->getOperand(i));
1133 // Add frame index operands: direct-mem-ref tag, #FI, offset.
1134 MIB.addImm(StackMaps::DirectMemRefOp);
1135 MIB.addOperand(MI->getOperand(OperIdx));
1137 // Copy the operands after the frame index.
1138 for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
1139 MIB.addOperand(MI->getOperand(i));
1141 // Inherit previous memory operands.
1142 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1143 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1145 // Add a new memory operand for this FI.
1146 const MachineFrameInfo &MFI = *MF.getFrameInfo();
1147 assert(MFI.getObjectOffset(FI) != -1);
1149 unsigned Flags = MachineMemOperand::MOLoad;
1150 if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
1151 Flags |= MachineMemOperand::MOStore;
1152 Flags |= MachineMemOperand::MOVolatile;
1154 MachineMemOperand *MMO = MF.getMachineMemOperand(
1155 MachinePointerInfo::getFixedStack(FI), Flags,
1156 MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI));
1157 MIB->addMemOperand(MF, MMO);
1159 // Replace the instruction and update the operand index.
1160 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1161 OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
1162 MI->eraseFromParent();
1168 /// findRepresentativeClass - Return the largest legal super-reg register class
1169 /// of the register class for the specified type and its associated "cost".
1170 // This function is in TargetLowering because it uses RegClassForVT which would
1171 // need to be moved to TargetRegisterInfo and would necessitate moving
1172 // isTypeLegal over as well - a massive change that would just require
1173 // TargetLowering having a TargetRegisterInfo class member that it would use.
1174 std::pair<const TargetRegisterClass *, uint8_t>
1175 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
1177 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1179 return std::make_pair(RC, 0);
1181 // Compute the set of all super-register classes.
1182 BitVector SuperRegRC(TRI->getNumRegClasses());
1183 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1184 SuperRegRC.setBitsInMask(RCI.getMask());
1186 // Find the first legal register class with the largest spill size.
1187 const TargetRegisterClass *BestRC = RC;
1188 for (int i = SuperRegRC.find_first(); i >= 0; i = SuperRegRC.find_next(i)) {
1189 const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1190 // We want the largest possible spill size.
1191 if (SuperRC->getSize() <= BestRC->getSize())
1193 if (!isLegalRC(SuperRC))
1197 return std::make_pair(BestRC, 1);
1200 /// computeRegisterProperties - Once all of the register classes are added,
1201 /// this allows us to compute derived properties we expose.
1202 void TargetLoweringBase::computeRegisterProperties(
1203 const TargetRegisterInfo *TRI) {
1204 static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE,
1205 "Too many value types for ValueTypeActions to hold!");
1207 // Everything defaults to needing one register.
1208 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1209 NumRegistersForVT[i] = 1;
1210 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1212 // ...except isVoid, which doesn't need any registers.
1213 NumRegistersForVT[MVT::isVoid] = 0;
1215 // Find the largest integer register class.
1216 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1217 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1218 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1220 // Every integer value type larger than this largest register takes twice as
1221 // many registers to represent as the previous ValueType.
1222 for (unsigned ExpandedReg = LargestIntReg + 1;
1223 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1224 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1225 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1226 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1227 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1231 // Inspect all of the ValueType's smaller than the largest integer
1232 // register to see which ones need promotion.
1233 unsigned LegalIntReg = LargestIntReg;
1234 for (unsigned IntReg = LargestIntReg - 1;
1235 IntReg >= (unsigned)MVT::i1; --IntReg) {
1236 MVT IVT = (MVT::SimpleValueType)IntReg;
1237 if (isTypeLegal(IVT)) {
1238 LegalIntReg = IntReg;
1240 RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1241 (const MVT::SimpleValueType)LegalIntReg;
1242 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1246 // ppcf128 type is really two f64's.
1247 if (!isTypeLegal(MVT::ppcf128)) {
1248 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1249 RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1250 TransformToType[MVT::ppcf128] = MVT::f64;
1251 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1254 // Decide how to handle f128. If the target does not have native f128 support,
1255 // expand it to i128 and we will be generating soft float library calls.
1256 if (!isTypeLegal(MVT::f128)) {
1257 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1258 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1259 TransformToType[MVT::f128] = MVT::i128;
1260 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1263 // Decide how to handle f64. If the target does not have native f64 support,
1264 // expand it to i64 and we will be generating soft float library calls.
1265 if (!isTypeLegal(MVT::f64)) {
1266 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1267 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1268 TransformToType[MVT::f64] = MVT::i64;
1269 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1272 // Decide how to handle f32. If the target does not have native f32 support,
1273 // expand it to i32 and we will be generating soft float library calls.
1274 if (!isTypeLegal(MVT::f32)) {
1275 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1276 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1277 TransformToType[MVT::f32] = MVT::i32;
1278 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1281 if (!isTypeLegal(MVT::f16)) {
1282 // If the target has native f32 support, promote f16 operations to f32. If
1283 // f32 is not supported, generate soft float library calls.
1284 if (isTypeLegal(MVT::f32)) {
1285 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1286 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1287 TransformToType[MVT::f16] = MVT::f32;
1288 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1290 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
1291 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
1292 TransformToType[MVT::f16] = MVT::i16;
1293 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftenFloat);
1297 // Loop over all of the vector value types to see which need transformations.
1298 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1299 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1300 MVT VT = (MVT::SimpleValueType) i;
1301 if (isTypeLegal(VT))
1304 MVT EltVT = VT.getVectorElementType();
1305 unsigned NElts = VT.getVectorNumElements();
1306 bool IsLegalWiderType = false;
1307 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1308 switch (PreferredAction) {
1309 case TypePromoteInteger: {
1310 // Try to promote the elements of integer vectors. If no legal
1311 // promotion was found, fall through to the widen-vector method.
1312 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1313 MVT SVT = (MVT::SimpleValueType) nVT;
1314 // Promote vectors of integers to vectors with the same number
1315 // of elements, with a wider element type.
1316 if (SVT.getVectorElementType().getSizeInBits() > EltVT.getSizeInBits()
1317 && SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)
1318 && SVT.getScalarType().isInteger()) {
1319 TransformToType[i] = SVT;
1320 RegisterTypeForVT[i] = SVT;
1321 NumRegistersForVT[i] = 1;
1322 ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1323 IsLegalWiderType = true;
1327 if (IsLegalWiderType)
1330 case TypeWidenVector: {
1331 // Try to widen the vector.
1332 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1333 MVT SVT = (MVT::SimpleValueType) nVT;
1334 if (SVT.getVectorElementType() == EltVT
1335 && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
1336 TransformToType[i] = SVT;
1337 RegisterTypeForVT[i] = SVT;
1338 NumRegistersForVT[i] = 1;
1339 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1340 IsLegalWiderType = true;
1344 if (IsLegalWiderType)
1347 case TypeSplitVector:
1348 case TypeScalarizeVector: {
1351 unsigned NumIntermediates;
1352 NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1353 NumIntermediates, RegisterVT, this);
1354 RegisterTypeForVT[i] = RegisterVT;
1356 MVT NVT = VT.getPow2VectorType();
1358 // Type is already a power of 2. The default action is to split.
1359 TransformToType[i] = MVT::Other;
1360 if (PreferredAction == TypeScalarizeVector)
1361 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1362 else if (PreferredAction == TypeSplitVector)
1363 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1365 // Set type action according to the number of elements.
1366 ValueTypeActions.setTypeAction(VT, NElts == 1 ? TypeScalarizeVector
1369 TransformToType[i] = NVT;
1370 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1375 llvm_unreachable("Unknown vector legalization action!");
1379 // Determine the 'representative' register class for each value type.
1380 // An representative register class is the largest (meaning one which is
1381 // not a sub-register class / subreg register class) legal register class for
1382 // a group of value types. For example, on i386, i8, i16, and i32
1383 // representative would be GR32; while on x86_64 it's GR64.
1384 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1385 const TargetRegisterClass* RRC;
1387 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1388 RepRegClassForVT[i] = RRC;
1389 RepRegClassCostForVT[i] = Cost;
1393 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1395 assert(!VT.isVector() && "No default SetCC type for vectors!");
1396 return getPointerTy(DL).SimpleTy;
1399 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
1400 return MVT::i32; // return the default value
1403 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1404 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1405 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1406 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1408 /// This method returns the number of registers needed, and the VT for each
1409 /// register. It also returns the VT and quantity of the intermediate values
1410 /// before they are promoted/expanded.
1412 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1413 EVT &IntermediateVT,
1414 unsigned &NumIntermediates,
1415 MVT &RegisterVT) const {
1416 unsigned NumElts = VT.getVectorNumElements();
1418 // If there is a wider vector type with the same element type as this one,
1419 // or a promoted vector type that has the same number of elements which
1420 // are wider, then we should convert to that legal vector type.
1421 // This handles things like <2 x float> -> <4 x float> and
1422 // <4 x i1> -> <4 x i32>.
1423 LegalizeTypeAction TA = getTypeAction(Context, VT);
1424 if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1425 EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1426 if (isTypeLegal(RegisterEVT)) {
1427 IntermediateVT = RegisterEVT;
1428 RegisterVT = RegisterEVT.getSimpleVT();
1429 NumIntermediates = 1;
1434 // Figure out the right, legal destination reg to copy into.
1435 EVT EltTy = VT.getVectorElementType();
1437 unsigned NumVectorRegs = 1;
1439 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1440 // could break down into LHS/RHS like LegalizeDAG does.
1441 if (!isPowerOf2_32(NumElts)) {
1442 NumVectorRegs = NumElts;
1446 // Divide the input until we get to a supported size. This will always
1447 // end with a scalar if the target doesn't support vectors.
1448 while (NumElts > 1 && !isTypeLegal(
1449 EVT::getVectorVT(Context, EltTy, NumElts))) {
1451 NumVectorRegs <<= 1;
1454 NumIntermediates = NumVectorRegs;
1456 EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
1457 if (!isTypeLegal(NewVT))
1459 IntermediateVT = NewVT;
1461 MVT DestVT = getRegisterType(Context, NewVT);
1462 RegisterVT = DestVT;
1463 unsigned NewVTSize = NewVT.getSizeInBits();
1465 // Convert sizes such as i33 to i64.
1466 if (!isPowerOf2_32(NewVTSize))
1467 NewVTSize = NextPowerOf2(NewVTSize);
1469 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1470 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1472 // Otherwise, promotion or legal types use the same number of registers as
1473 // the vector decimated to the appropriate level.
1474 return NumVectorRegs;
1477 /// Get the EVTs and ArgFlags collections that represent the legalized return
1478 /// type of the given function. This does not require a DAG or a return value,
1479 /// and is suitable for use before any DAGs for the function are constructed.
1480 /// TODO: Move this out of TargetLowering.cpp.
1481 void llvm::GetReturnInfo(Type *ReturnType, AttributeSet attr,
1482 SmallVectorImpl<ISD::OutputArg> &Outs,
1483 const TargetLowering &TLI, const DataLayout &DL) {
1484 SmallVector<EVT, 4> ValueVTs;
1485 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1486 unsigned NumValues = ValueVTs.size();
1487 if (NumValues == 0) return;
1489 for (unsigned j = 0, f = NumValues; j != f; ++j) {
1490 EVT VT = ValueVTs[j];
1491 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1493 if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
1494 ExtendKind = ISD::SIGN_EXTEND;
1495 else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt))
1496 ExtendKind = ISD::ZERO_EXTEND;
1498 // FIXME: C calling convention requires the return type to be promoted to
1499 // at least 32-bit. But this is not necessary for non-C calling
1500 // conventions. The frontend should mark functions whose return values
1501 // require promoting with signext or zeroext attributes.
1502 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1503 MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1504 if (VT.bitsLT(MinVT))
1508 unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
1509 MVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
1511 // 'inreg' on function refers to return value
1512 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1513 if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::InReg))
1516 // Propagate extension type if any
1517 if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
1519 else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt))
1522 for (unsigned i = 0; i < NumParts; ++i)
1523 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
1527 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1528 /// function arguments in the caller parameter area. This is the actual
1529 /// alignment, not its logarithm.
1530 unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
1531 const DataLayout &DL) const {
1532 return DL.getABITypeAlignment(Ty);
1535 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1536 const DataLayout &DL, EVT VT,
1540 // Check if the specified alignment is sufficient based on the data layout.
1541 // TODO: While using the data layout works in practice, a better solution
1542 // would be to implement this check directly (make this a virtual function).
1543 // For example, the ABI alignment may change based on software platform while
1544 // this function should only be affected by hardware implementation.
1545 Type *Ty = VT.getTypeForEVT(Context);
1546 if (Alignment >= DL.getABITypeAlignment(Ty)) {
1547 // Assume that an access that meets the ABI-specified alignment is fast.
1548 if (Fast != nullptr)
1553 // This is a misaligned access.
1554 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
1558 //===----------------------------------------------------------------------===//
1559 // TargetTransformInfo Helpers
1560 //===----------------------------------------------------------------------===//
1562 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
1563 enum InstructionOpcodes {
1564 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1565 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1566 #include "llvm/IR/Instruction.def"
1568 switch (static_cast<InstructionOpcodes>(Opcode)) {
1571 case Switch: return 0;
1572 case IndirectBr: return 0;
1573 case Invoke: return 0;
1574 case Resume: return 0;
1575 case Unreachable: return 0;
1576 case CleanupRet: return 0;
1577 case CatchEndPad: return 0;
1578 case CatchRet: return 0;
1579 case CatchPad: return 0;
1580 case TerminatePad: return 0;
1581 case CleanupPad: return 0;
1582 case Add: return ISD::ADD;
1583 case FAdd: return ISD::FADD;
1584 case Sub: return ISD::SUB;
1585 case FSub: return ISD::FSUB;
1586 case Mul: return ISD::MUL;
1587 case FMul: return ISD::FMUL;
1588 case UDiv: return ISD::UDIV;
1589 case SDiv: return ISD::SDIV;
1590 case FDiv: return ISD::FDIV;
1591 case URem: return ISD::UREM;
1592 case SRem: return ISD::SREM;
1593 case FRem: return ISD::FREM;
1594 case Shl: return ISD::SHL;
1595 case LShr: return ISD::SRL;
1596 case AShr: return ISD::SRA;
1597 case And: return ISD::AND;
1598 case Or: return ISD::OR;
1599 case Xor: return ISD::XOR;
1600 case Alloca: return 0;
1601 case Load: return ISD::LOAD;
1602 case Store: return ISD::STORE;
1603 case GetElementPtr: return 0;
1604 case Fence: return 0;
1605 case AtomicCmpXchg: return 0;
1606 case AtomicRMW: return 0;
1607 case Trunc: return ISD::TRUNCATE;
1608 case ZExt: return ISD::ZERO_EXTEND;
1609 case SExt: return ISD::SIGN_EXTEND;
1610 case FPToUI: return ISD::FP_TO_UINT;
1611 case FPToSI: return ISD::FP_TO_SINT;
1612 case UIToFP: return ISD::UINT_TO_FP;
1613 case SIToFP: return ISD::SINT_TO_FP;
1614 case FPTrunc: return ISD::FP_ROUND;
1615 case FPExt: return ISD::FP_EXTEND;
1616 case PtrToInt: return ISD::BITCAST;
1617 case IntToPtr: return ISD::BITCAST;
1618 case BitCast: return ISD::BITCAST;
1619 case AddrSpaceCast: return ISD::ADDRSPACECAST;
1620 case ICmp: return ISD::SETCC;
1621 case FCmp: return ISD::SETCC;
1623 case Call: return 0;
1624 case Select: return ISD::SELECT;
1625 case UserOp1: return 0;
1626 case UserOp2: return 0;
1627 case VAArg: return 0;
1628 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1629 case InsertElement: return ISD::INSERT_VECTOR_ELT;
1630 case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1631 case ExtractValue: return ISD::MERGE_VALUES;
1632 case InsertValue: return ISD::MERGE_VALUES;
1633 case LandingPad: return 0;
1636 llvm_unreachable("Unknown instruction type encountered!");
1640 TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
1642 LLVMContext &C = Ty->getContext();
1643 EVT MTy = getValueType(DL, Ty);
1646 // We keep legalizing the type until we find a legal kind. We assume that
1647 // the only operation that costs anything is the split. After splitting
1648 // we need to handle two types.
1650 LegalizeKind LK = getTypeConversion(C, MTy);
1652 if (LK.first == TypeLegal)
1653 return std::make_pair(Cost, MTy.getSimpleVT());
1655 if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1658 // Keep legalizing the type.
1663 //===----------------------------------------------------------------------===//
1664 // Loop Strength Reduction hooks
1665 //===----------------------------------------------------------------------===//
1667 /// isLegalAddressingMode - Return true if the addressing mode represented
1668 /// by AM is legal for this target, for a load/store of the specified type.
1669 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
1670 const AddrMode &AM, Type *Ty,
1671 unsigned AS) const {
1672 // The default implementation of this implements a conservative RISCy, r+r and
1675 // Allows a sign-extended 16-bit immediate field.
1676 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1679 // No global is ever allowed as a base.
1683 // Only support r+r,
1685 case 0: // "r+i" or just "i", depending on HasBaseReg.
1688 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1690 // Otherwise we have r+r or r+i.
1693 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1695 // Allow 2*r as r+r.
1697 default: // Don't allow n * r