2 //===-- SPUISelLowering.cpp - Cell SPU DAG Lowering Implementation --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SPUTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "SPURegisterNames.h"
15 #include "SPUISelLowering.h"
16 #include "SPUTargetMachine.h"
17 #include "SPUFrameInfo.h"
18 #include "SPUMachineFunction.h"
19 #include "llvm/Constants.h"
20 #include "llvm/Function.h"
21 #include "llvm/Intrinsics.h"
22 #include "llvm/CallingConv.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/ADT/VectorExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/raw_ostream.h"
40 // Used in getTargetNodeName() below
42 std::map<unsigned, const char *> node_names;
44 //! EVT mapping to useful data for Cell SPU
45 struct valtype_map_s {
50 const valtype_map_s valtype_map[] = {
61 const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]);
63 const valtype_map_s *getValueTypeMapEntry(EVT VT) {
64 const valtype_map_s *retval = 0;
66 for (size_t i = 0; i < n_valtype_map; ++i) {
67 if (valtype_map[i].valtype == VT) {
68 retval = valtype_map + i;
75 report_fatal_error("getValueTypeMapEntry returns NULL for " +
76 Twine(VT.getEVTString()));
83 //! Expand a library call into an actual call DAG node
86 This code is taken from SelectionDAGLegalize, since it is not exposed as
87 part of the LLVM SelectionDAG API.
91 ExpandLibCall(RTLIB::Libcall LC, SDValue Op, SelectionDAG &DAG,
92 bool isSigned, SDValue &Hi, const SPUTargetLowering &TLI) {
93 // The input chain to this libcall is the entry node of the function.
94 // Legalizing the call will automatically add the previous call to the
96 SDValue InChain = DAG.getEntryNode();
98 TargetLowering::ArgListTy Args;
99 TargetLowering::ArgListEntry Entry;
100 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
101 EVT ArgVT = Op.getOperand(i).getValueType();
102 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
103 Entry.Node = Op.getOperand(i);
105 Entry.isSExt = isSigned;
106 Entry.isZExt = !isSigned;
107 Args.push_back(Entry);
109 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
112 // Splice the libcall in wherever FindInputOutputChains tells us to.
114 Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
115 std::pair<SDValue, SDValue> CallInfo =
116 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
117 0, TLI.getLibcallCallingConv(LC), false,
118 /*isReturnValueUsed=*/true,
119 Callee, Args, DAG, Op.getDebugLoc());
121 return CallInfo.first;
125 SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
126 : TargetLowering(TM, new TargetLoweringObjectFileELF()),
128 // Fold away setcc operations if possible.
131 // Use _setjmp/_longjmp instead of setjmp/longjmp.
132 setUseUnderscoreSetJmp(true);
133 setUseUnderscoreLongJmp(true);
135 // Set RTLIB libcall names as used by SPU:
136 setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
138 // Set up the SPU's register classes:
139 addRegisterClass(MVT::i8, SPU::R8CRegisterClass);
140 addRegisterClass(MVT::i16, SPU::R16CRegisterClass);
141 addRegisterClass(MVT::i32, SPU::R32CRegisterClass);
142 addRegisterClass(MVT::i64, SPU::R64CRegisterClass);
143 addRegisterClass(MVT::f32, SPU::R32FPRegisterClass);
144 addRegisterClass(MVT::f64, SPU::R64FPRegisterClass);
145 addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
147 // SPU has no sign or zero extended loads for i1, i8, i16:
148 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
149 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
150 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
152 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
153 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
155 setTruncStoreAction(MVT::i128, MVT::i64, Expand);
156 setTruncStoreAction(MVT::i128, MVT::i32, Expand);
157 setTruncStoreAction(MVT::i128, MVT::i16, Expand);
158 setTruncStoreAction(MVT::i128, MVT::i8, Expand);
160 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
162 // SPU constant load actions are custom lowered:
163 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
164 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
166 // SPU's loads and stores have to be custom lowered:
167 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::i128;
169 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
171 setOperationAction(ISD::LOAD, VT, Custom);
172 setOperationAction(ISD::STORE, VT, Custom);
173 setLoadExtAction(ISD::EXTLOAD, VT, Custom);
174 setLoadExtAction(ISD::ZEXTLOAD, VT, Custom);
175 setLoadExtAction(ISD::SEXTLOAD, VT, Custom);
177 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::i8; --stype) {
178 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
179 setTruncStoreAction(VT, StoreVT, Expand);
183 for (unsigned sctype = (unsigned) MVT::f32; sctype < (unsigned) MVT::f64;
185 MVT::SimpleValueType VT = (MVT::SimpleValueType) sctype;
187 setOperationAction(ISD::LOAD, VT, Custom);
188 setOperationAction(ISD::STORE, VT, Custom);
190 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::f32; --stype) {
191 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
192 setTruncStoreAction(VT, StoreVT, Expand);
196 // Expand the jumptable branches
197 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
198 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
200 // Custom lower SELECT_CC for most cases, but expand by default
201 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
202 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
203 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
204 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
205 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
207 // SPU has no intrinsics for these particular operations:
208 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
210 // SPU has no division/remainder instructions
211 setOperationAction(ISD::SREM, MVT::i8, Expand);
212 setOperationAction(ISD::UREM, MVT::i8, Expand);
213 setOperationAction(ISD::SDIV, MVT::i8, Expand);
214 setOperationAction(ISD::UDIV, MVT::i8, Expand);
215 setOperationAction(ISD::SDIVREM, MVT::i8, Expand);
216 setOperationAction(ISD::UDIVREM, MVT::i8, Expand);
217 setOperationAction(ISD::SREM, MVT::i16, Expand);
218 setOperationAction(ISD::UREM, MVT::i16, Expand);
219 setOperationAction(ISD::SDIV, MVT::i16, Expand);
220 setOperationAction(ISD::UDIV, MVT::i16, Expand);
221 setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
222 setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
223 setOperationAction(ISD::SREM, MVT::i32, Expand);
224 setOperationAction(ISD::UREM, MVT::i32, Expand);
225 setOperationAction(ISD::SDIV, MVT::i32, Expand);
226 setOperationAction(ISD::UDIV, MVT::i32, Expand);
227 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
228 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
229 setOperationAction(ISD::SREM, MVT::i64, Expand);
230 setOperationAction(ISD::UREM, MVT::i64, Expand);
231 setOperationAction(ISD::SDIV, MVT::i64, Expand);
232 setOperationAction(ISD::UDIV, MVT::i64, Expand);
233 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
234 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
235 setOperationAction(ISD::SREM, MVT::i128, Expand);
236 setOperationAction(ISD::UREM, MVT::i128, Expand);
237 setOperationAction(ISD::SDIV, MVT::i128, Expand);
238 setOperationAction(ISD::UDIV, MVT::i128, Expand);
239 setOperationAction(ISD::SDIVREM, MVT::i128, Expand);
240 setOperationAction(ISD::UDIVREM, MVT::i128, Expand);
242 // We don't support sin/cos/sqrt/fmod
243 setOperationAction(ISD::FSIN , MVT::f64, Expand);
244 setOperationAction(ISD::FCOS , MVT::f64, Expand);
245 setOperationAction(ISD::FREM , MVT::f64, Expand);
246 setOperationAction(ISD::FSIN , MVT::f32, Expand);
247 setOperationAction(ISD::FCOS , MVT::f32, Expand);
248 setOperationAction(ISD::FREM , MVT::f32, Expand);
250 // Expand fsqrt to the appropriate libcall (NOTE: should use h/w fsqrt
252 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
253 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
255 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
256 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
258 // SPU can do rotate right and left, so legalize it... but customize for i8
259 // because instructions don't exist.
261 // FIXME: Change from "expand" to appropriate type once ROTR is supported in
263 setOperationAction(ISD::ROTR, MVT::i32, Expand /*Legal*/);
264 setOperationAction(ISD::ROTR, MVT::i16, Expand /*Legal*/);
265 setOperationAction(ISD::ROTR, MVT::i8, Expand /*Custom*/);
267 setOperationAction(ISD::ROTL, MVT::i32, Legal);
268 setOperationAction(ISD::ROTL, MVT::i16, Legal);
269 setOperationAction(ISD::ROTL, MVT::i8, Custom);
271 // SPU has no native version of shift left/right for i8
272 setOperationAction(ISD::SHL, MVT::i8, Custom);
273 setOperationAction(ISD::SRL, MVT::i8, Custom);
274 setOperationAction(ISD::SRA, MVT::i8, Custom);
276 // Make these operations legal and handle them during instruction selection:
277 setOperationAction(ISD::SHL, MVT::i64, Legal);
278 setOperationAction(ISD::SRL, MVT::i64, Legal);
279 setOperationAction(ISD::SRA, MVT::i64, Legal);
281 // Custom lower i8, i32 and i64 multiplications
282 setOperationAction(ISD::MUL, MVT::i8, Custom);
283 setOperationAction(ISD::MUL, MVT::i32, Legal);
284 setOperationAction(ISD::MUL, MVT::i64, Legal);
286 // Expand double-width multiplication
287 // FIXME: It would probably be reasonable to support some of these operations
288 setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
289 setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
290 setOperationAction(ISD::MULHU, MVT::i8, Expand);
291 setOperationAction(ISD::MULHS, MVT::i8, Expand);
292 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
293 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
294 setOperationAction(ISD::MULHU, MVT::i16, Expand);
295 setOperationAction(ISD::MULHS, MVT::i16, Expand);
296 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
297 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
298 setOperationAction(ISD::MULHU, MVT::i32, Expand);
299 setOperationAction(ISD::MULHS, MVT::i32, Expand);
300 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
301 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
302 setOperationAction(ISD::MULHU, MVT::i64, Expand);
303 setOperationAction(ISD::MULHS, MVT::i64, Expand);
305 // Need to custom handle (some) common i8, i64 math ops
306 setOperationAction(ISD::ADD, MVT::i8, Custom);
307 setOperationAction(ISD::ADD, MVT::i64, Legal);
308 setOperationAction(ISD::SUB, MVT::i8, Custom);
309 setOperationAction(ISD::SUB, MVT::i64, Legal);
311 // SPU does not have BSWAP. It does have i32 support CTLZ.
312 // CTPOP has to be custom lowered.
313 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
314 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
316 setOperationAction(ISD::CTPOP, MVT::i8, Custom);
317 setOperationAction(ISD::CTPOP, MVT::i16, Custom);
318 setOperationAction(ISD::CTPOP, MVT::i32, Custom);
319 setOperationAction(ISD::CTPOP, MVT::i64, Custom);
320 setOperationAction(ISD::CTPOP, MVT::i128, Expand);
322 setOperationAction(ISD::CTTZ , MVT::i8, Expand);
323 setOperationAction(ISD::CTTZ , MVT::i16, Expand);
324 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
325 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
326 setOperationAction(ISD::CTTZ , MVT::i128, Expand);
328 setOperationAction(ISD::CTLZ , MVT::i8, Promote);
329 setOperationAction(ISD::CTLZ , MVT::i16, Promote);
330 setOperationAction(ISD::CTLZ , MVT::i32, Legal);
331 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
332 setOperationAction(ISD::CTLZ , MVT::i128, Expand);
334 // SPU has a version of select that implements (a&~c)|(b&c), just like
335 // select ought to work:
336 setOperationAction(ISD::SELECT, MVT::i8, Legal);
337 setOperationAction(ISD::SELECT, MVT::i16, Legal);
338 setOperationAction(ISD::SELECT, MVT::i32, Legal);
339 setOperationAction(ISD::SELECT, MVT::i64, Legal);
341 setOperationAction(ISD::SETCC, MVT::i8, Legal);
342 setOperationAction(ISD::SETCC, MVT::i16, Legal);
343 setOperationAction(ISD::SETCC, MVT::i32, Legal);
344 setOperationAction(ISD::SETCC, MVT::i64, Legal);
345 setOperationAction(ISD::SETCC, MVT::f64, Custom);
347 // Custom lower i128 -> i64 truncates
348 setOperationAction(ISD::TRUNCATE, MVT::i64, Custom);
350 // Custom lower i32/i64 -> i128 sign extend
351 setOperationAction(ISD::SIGN_EXTEND, MVT::i128, Custom);
353 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
354 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
355 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
356 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
357 // SPU has a legal FP -> signed INT instruction for f32, but for f64, need
358 // to expand to a libcall, hence the custom lowering:
359 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
360 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
361 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
362 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
363 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Expand);
364 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Expand);
366 // FDIV on SPU requires custom lowering
367 setOperationAction(ISD::FDIV, MVT::f64, Expand); // to libcall
369 // SPU has [U|S]INT_TO_FP for f32->i32, but not for f64->i32, f64->i64:
370 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
371 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
372 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
373 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
374 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
375 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
376 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
377 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
379 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Legal);
380 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Legal);
381 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Legal);
382 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Legal);
384 // We cannot sextinreg(i1). Expand to shifts.
385 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
387 // We want to legalize GlobalAddress and ConstantPool nodes into the
388 // appropriate instructions to materialize the address.
389 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::f128;
391 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
393 setOperationAction(ISD::GlobalAddress, VT, Custom);
394 setOperationAction(ISD::ConstantPool, VT, Custom);
395 setOperationAction(ISD::JumpTable, VT, Custom);
398 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
399 setOperationAction(ISD::VASTART , MVT::Other, Custom);
401 // Use the default implementation.
402 setOperationAction(ISD::VAARG , MVT::Other, Expand);
403 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
404 setOperationAction(ISD::VAEND , MVT::Other, Expand);
405 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
406 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
407 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
408 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Expand);
410 // Cell SPU has instructions for converting between i64 and fp.
411 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
412 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
414 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
415 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
417 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
418 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
420 // First set operation action for all vector types to expand. Then we
421 // will selectively turn on ones that can be effectively codegen'd.
422 addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
423 addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
424 addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
425 addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
426 addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
427 addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
429 // "Odd size" vector classes that we're willing to support:
430 addRegisterClass(MVT::v2i32, SPU::VECREGRegisterClass);
432 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
433 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
434 MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
436 // add/sub are legal for all supported vector VT's.
437 setOperationAction(ISD::ADD, VT, Legal);
438 setOperationAction(ISD::SUB, VT, Legal);
439 // mul has to be custom lowered.
440 setOperationAction(ISD::MUL, VT, Legal);
442 setOperationAction(ISD::AND, VT, Legal);
443 setOperationAction(ISD::OR, VT, Legal);
444 setOperationAction(ISD::XOR, VT, Legal);
445 setOperationAction(ISD::LOAD, VT, Legal);
446 setOperationAction(ISD::SELECT, VT, Legal);
447 setOperationAction(ISD::STORE, VT, Legal);
449 // These operations need to be expanded:
450 setOperationAction(ISD::SDIV, VT, Expand);
451 setOperationAction(ISD::SREM, VT, Expand);
452 setOperationAction(ISD::UDIV, VT, Expand);
453 setOperationAction(ISD::UREM, VT, Expand);
455 // Custom lower build_vector, constant pool spills, insert and
456 // extract vector elements:
457 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
458 setOperationAction(ISD::ConstantPool, VT, Custom);
459 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
460 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
461 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
462 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
465 setOperationAction(ISD::AND, MVT::v16i8, Custom);
466 setOperationAction(ISD::OR, MVT::v16i8, Custom);
467 setOperationAction(ISD::XOR, MVT::v16i8, Custom);
468 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
470 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
472 setShiftAmountType(MVT::i32);
473 setBooleanContents(ZeroOrNegativeOneBooleanContent);
475 setStackPointerRegisterToSaveRestore(SPU::R1);
477 // We have target-specific dag combine patterns for the following nodes:
478 setTargetDAGCombine(ISD::ADD);
479 setTargetDAGCombine(ISD::ZERO_EXTEND);
480 setTargetDAGCombine(ISD::SIGN_EXTEND);
481 setTargetDAGCombine(ISD::ANY_EXTEND);
483 computeRegisterProperties();
485 // Set pre-RA register scheduler default to BURR, which produces slightly
486 // better code than the default (could also be TDRR, but TargetLowering.h
487 // needs a mod to support that model):
488 setSchedulingPreference(Sched::RegPressure);
492 SPUTargetLowering::getTargetNodeName(unsigned Opcode) const
494 if (node_names.empty()) {
495 node_names[(unsigned) SPUISD::RET_FLAG] = "SPUISD::RET_FLAG";
496 node_names[(unsigned) SPUISD::Hi] = "SPUISD::Hi";
497 node_names[(unsigned) SPUISD::Lo] = "SPUISD::Lo";
498 node_names[(unsigned) SPUISD::PCRelAddr] = "SPUISD::PCRelAddr";
499 node_names[(unsigned) SPUISD::AFormAddr] = "SPUISD::AFormAddr";
500 node_names[(unsigned) SPUISD::IndirectAddr] = "SPUISD::IndirectAddr";
501 node_names[(unsigned) SPUISD::LDRESULT] = "SPUISD::LDRESULT";
502 node_names[(unsigned) SPUISD::CALL] = "SPUISD::CALL";
503 node_names[(unsigned) SPUISD::SHUFB] = "SPUISD::SHUFB";
504 node_names[(unsigned) SPUISD::SHUFFLE_MASK] = "SPUISD::SHUFFLE_MASK";
505 node_names[(unsigned) SPUISD::CNTB] = "SPUISD::CNTB";
506 node_names[(unsigned) SPUISD::PREFSLOT2VEC] = "SPUISD::PREFSLOT2VEC";
507 node_names[(unsigned) SPUISD::VEC2PREFSLOT] = "SPUISD::VEC2PREFSLOT";
508 node_names[(unsigned) SPUISD::SHLQUAD_L_BITS] = "SPUISD::SHLQUAD_L_BITS";
509 node_names[(unsigned) SPUISD::SHLQUAD_L_BYTES] = "SPUISD::SHLQUAD_L_BYTES";
510 node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL";
511 node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR";
512 node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
513 node_names[(unsigned) SPUISD::ROTBYTES_LEFT_BITS] =
514 "SPUISD::ROTBYTES_LEFT_BITS";
515 node_names[(unsigned) SPUISD::SELECT_MASK] = "SPUISD::SELECT_MASK";
516 node_names[(unsigned) SPUISD::SELB] = "SPUISD::SELB";
517 node_names[(unsigned) SPUISD::ADD64_MARKER] = "SPUISD::ADD64_MARKER";
518 node_names[(unsigned) SPUISD::SUB64_MARKER] = "SPUISD::SUB64_MARKER";
519 node_names[(unsigned) SPUISD::MUL64_MARKER] = "SPUISD::MUL64_MARKER";
522 std::map<unsigned, const char *>::iterator i = node_names.find(Opcode);
524 return ((i != node_names.end()) ? i->second : 0);
527 /// getFunctionAlignment - Return the Log2 alignment of this function.
528 unsigned SPUTargetLowering::getFunctionAlignment(const Function *) const {
532 //===----------------------------------------------------------------------===//
533 // Return the Cell SPU's SETCC result type
534 //===----------------------------------------------------------------------===//
536 MVT::SimpleValueType SPUTargetLowering::getSetCCResultType(EVT VT) const {
537 // i16 and i32 are valid SETCC result types
538 return ((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) ?
539 VT.getSimpleVT().SimpleTy :
543 //===----------------------------------------------------------------------===//
544 // Calling convention code:
545 //===----------------------------------------------------------------------===//
547 #include "SPUGenCallingConv.inc"
549 //===----------------------------------------------------------------------===//
550 // LowerOperation implementation
551 //===----------------------------------------------------------------------===//
553 /// Custom lower loads for CellSPU
555 All CellSPU loads and stores are aligned to 16-byte boundaries, so for elements
556 within a 16-byte block, we have to rotate to extract the requested element.
558 For extending loads, we also want to ensure that the following sequence is
559 emitted, e.g. for MVT::f32 extending load to MVT::f64:
563 %2 v16i8,ch = rotate %1
564 %3 v4f8, ch = bitconvert %2
565 %4 f32 = vec2perfslot %3
566 %5 f64 = fp_extend %4
570 LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
571 LoadSDNode *LN = cast<LoadSDNode>(Op);
572 SDValue the_chain = LN->getChain();
573 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
574 EVT InVT = LN->getMemoryVT();
575 EVT OutVT = Op.getValueType();
576 ISD::LoadExtType ExtType = LN->getExtensionType();
577 unsigned alignment = LN->getAlignment();
578 const valtype_map_s *vtm = getValueTypeMapEntry(InVT);
579 DebugLoc dl = Op.getDebugLoc();
581 switch (LN->getAddressingMode()) {
582 case ISD::UNINDEXED: {
584 SDValue basePtr = LN->getBasePtr();
587 if (alignment == 16) {
590 // Special cases for a known aligned load to simplify the base pointer
591 // and the rotation amount:
592 if (basePtr.getOpcode() == ISD::ADD
593 && (CN = dyn_cast<ConstantSDNode > (basePtr.getOperand(1))) != 0) {
594 // Known offset into basePtr
595 int64_t offset = CN->getSExtValue();
596 int64_t rotamt = int64_t((offset & 0xf) - vtm->prefslot_byte);
601 rotate = DAG.getConstant(rotamt, MVT::i16);
603 // Simplify the base pointer for this case:
604 basePtr = basePtr.getOperand(0);
605 if ((offset & ~0xf) > 0) {
606 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
608 DAG.getConstant((offset & ~0xf), PtrVT));
610 } else if ((basePtr.getOpcode() == SPUISD::AFormAddr)
611 || (basePtr.getOpcode() == SPUISD::IndirectAddr
612 && basePtr.getOperand(0).getOpcode() == SPUISD::Hi
613 && basePtr.getOperand(1).getOpcode() == SPUISD::Lo)) {
614 // Plain aligned a-form address: rotate into preferred slot
615 // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
616 int64_t rotamt = -vtm->prefslot_byte;
619 rotate = DAG.getConstant(rotamt, MVT::i16);
621 // Offset the rotate amount by the basePtr and the preferred slot
623 int64_t rotamt = -vtm->prefslot_byte;
626 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
628 DAG.getConstant(rotamt, PtrVT));
631 // Unaligned load: must be more pessimistic about addressing modes:
632 if (basePtr.getOpcode() == ISD::ADD) {
633 MachineFunction &MF = DAG.getMachineFunction();
634 MachineRegisterInfo &RegInfo = MF.getRegInfo();
635 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
638 SDValue Op0 = basePtr.getOperand(0);
639 SDValue Op1 = basePtr.getOperand(1);
641 if (isa<ConstantSDNode>(Op1)) {
642 // Convert the (add <ptr>, <const>) to an indirect address contained
643 // in a register. Note that this is done because we need to avoid
644 // creating a 0(reg) d-form address due to the SPU's block loads.
645 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
646 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
647 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
649 // Convert the (add <arg1>, <arg2>) to an indirect address, which
650 // will likely be lowered as a reg(reg) x-form address.
651 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
654 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
656 DAG.getConstant(0, PtrVT));
659 // Offset the rotate amount by the basePtr and the preferred slot
661 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
663 DAG.getConstant(-vtm->prefslot_byte, PtrVT));
666 // Re-emit as a v16i8 vector load
667 result = DAG.getLoad(MVT::v16i8, dl, the_chain, basePtr,
668 LN->getSrcValue(), LN->getSrcValueOffset(),
669 LN->isVolatile(), LN->isNonTemporal(), 16);
672 the_chain = result.getValue(1);
674 // Rotate into the preferred slot:
675 result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::v16i8,
676 result.getValue(0), rotate);
678 // Convert the loaded v16i8 vector to the appropriate vector type
679 // specified by the operand:
680 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
681 InVT, (128 / InVT.getSizeInBits()));
682 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
683 DAG.getNode(ISD::BIT_CONVERT, dl, vecVT, result));
685 // Handle extending loads by extending the scalar result:
686 if (ExtType == ISD::SEXTLOAD) {
687 result = DAG.getNode(ISD::SIGN_EXTEND, dl, OutVT, result);
688 } else if (ExtType == ISD::ZEXTLOAD) {
689 result = DAG.getNode(ISD::ZERO_EXTEND, dl, OutVT, result);
690 } else if (ExtType == ISD::EXTLOAD) {
691 unsigned NewOpc = ISD::ANY_EXTEND;
693 if (OutVT.isFloatingPoint())
694 NewOpc = ISD::FP_EXTEND;
696 result = DAG.getNode(NewOpc, dl, OutVT, result);
699 SDVTList retvts = DAG.getVTList(OutVT, MVT::Other);
700 SDValue retops[2] = {
705 result = DAG.getNode(SPUISD::LDRESULT, dl, retvts,
706 retops, sizeof(retops) / sizeof(retops[0]));
713 case ISD::LAST_INDEXED_MODE:
715 report_fatal_error("LowerLOAD: Got a LoadSDNode with an addr mode other "
717 Twine((unsigned)LN->getAddressingMode()));
725 /// Custom lower stores for CellSPU
727 All CellSPU stores are aligned to 16-byte boundaries, so for elements
728 within a 16-byte block, we have to generate a shuffle to insert the
729 requested element into its place, then store the resulting block.
732 LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
733 StoreSDNode *SN = cast<StoreSDNode>(Op);
734 SDValue Value = SN->getValue();
735 EVT VT = Value.getValueType();
736 EVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
737 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
738 DebugLoc dl = Op.getDebugLoc();
739 unsigned alignment = SN->getAlignment();
741 switch (SN->getAddressingMode()) {
742 case ISD::UNINDEXED: {
743 // The vector type we really want to load from the 16-byte chunk.
744 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
745 VT, (128 / VT.getSizeInBits()));
747 SDValue alignLoadVec;
748 SDValue basePtr = SN->getBasePtr();
749 SDValue the_chain = SN->getChain();
750 SDValue insertEltOffs;
752 if (alignment == 16) {
755 // Special cases for a known aligned load to simplify the base pointer
756 // and insertion byte:
757 if (basePtr.getOpcode() == ISD::ADD
758 && (CN = dyn_cast<ConstantSDNode>(basePtr.getOperand(1))) != 0) {
759 // Known offset into basePtr
760 int64_t offset = CN->getSExtValue();
762 // Simplify the base pointer for this case:
763 basePtr = basePtr.getOperand(0);
764 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
766 DAG.getConstant((offset & 0xf), PtrVT));
768 if ((offset & ~0xf) > 0) {
769 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
771 DAG.getConstant((offset & ~0xf), PtrVT));
774 // Otherwise, assume it's at byte 0 of basePtr
775 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
777 DAG.getConstant(0, PtrVT));
780 // Unaligned load: must be more pessimistic about addressing modes:
781 if (basePtr.getOpcode() == ISD::ADD) {
782 MachineFunction &MF = DAG.getMachineFunction();
783 MachineRegisterInfo &RegInfo = MF.getRegInfo();
784 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
787 SDValue Op0 = basePtr.getOperand(0);
788 SDValue Op1 = basePtr.getOperand(1);
790 if (isa<ConstantSDNode>(Op1)) {
791 // Convert the (add <ptr>, <const>) to an indirect address contained
792 // in a register. Note that this is done because we need to avoid
793 // creating a 0(reg) d-form address due to the SPU's block loads.
794 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
795 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
796 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
798 // Convert the (add <arg1>, <arg2>) to an indirect address, which
799 // will likely be lowered as a reg(reg) x-form address.
800 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
803 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
805 DAG.getConstant(0, PtrVT));
808 // Insertion point is solely determined by basePtr's contents
809 insertEltOffs = DAG.getNode(ISD::ADD, dl, PtrVT,
811 DAG.getConstant(0, PtrVT));
814 // Re-emit as a v16i8 vector load
815 alignLoadVec = DAG.getLoad(MVT::v16i8, dl, the_chain, basePtr,
816 SN->getSrcValue(), SN->getSrcValueOffset(),
817 SN->isVolatile(), SN->isNonTemporal(), 16);
820 the_chain = alignLoadVec.getValue(1);
822 LoadSDNode *LN = cast<LoadSDNode>(alignLoadVec);
823 SDValue theValue = SN->getValue();
827 && (theValue.getOpcode() == ISD::AssertZext
828 || theValue.getOpcode() == ISD::AssertSext)) {
829 // Drill down and get the value for zero- and sign-extended
831 theValue = theValue.getOperand(0);
834 // If the base pointer is already a D-form address, then just create
835 // a new D-form address with a slot offset and the orignal base pointer.
836 // Otherwise generate a D-form address with the slot offset relative
837 // to the stack pointer, which is always aligned.
839 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
840 errs() << "CellSPU LowerSTORE: basePtr = ";
841 basePtr.getNode()->dump(&DAG);
846 SDValue insertEltOp =
847 DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT, insertEltOffs);
848 SDValue vectorizeOp =
849 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT, theValue);
851 result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
852 vectorizeOp, alignLoadVec,
853 DAG.getNode(ISD::BIT_CONVERT, dl,
854 MVT::v4i32, insertEltOp));
856 result = DAG.getStore(the_chain, dl, result, basePtr,
857 LN->getSrcValue(), LN->getSrcValueOffset(),
858 LN->isVolatile(), LN->isNonTemporal(),
861 #if 0 && !defined(NDEBUG)
862 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
863 const SDValue ¤tRoot = DAG.getRoot();
866 errs() << "------- CellSPU:LowerStore result:\n";
868 errs() << "-------\n";
869 DAG.setRoot(currentRoot);
880 case ISD::LAST_INDEXED_MODE:
882 report_fatal_error("LowerLOAD: Got a LoadSDNode with an addr mode other "
884 Twine((unsigned)SN->getAddressingMode()));
892 //! Generate the address of a constant pool entry.
894 LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
895 EVT PtrVT = Op.getValueType();
896 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
897 const Constant *C = CP->getConstVal();
898 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
899 SDValue Zero = DAG.getConstant(0, PtrVT);
900 const TargetMachine &TM = DAG.getTarget();
901 // FIXME there is no actual debug info here
902 DebugLoc dl = Op.getDebugLoc();
904 if (TM.getRelocationModel() == Reloc::Static) {
905 if (!ST->usingLargeMem()) {
906 // Just return the SDValue with the constant pool address in it.
907 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, CPI, Zero);
909 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, CPI, Zero);
910 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, CPI, Zero);
911 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
915 llvm_unreachable("LowerConstantPool: Relocation model other than static"
920 //! Alternate entry point for generating the address of a constant pool entry
922 SPU::LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUTargetMachine &TM) {
923 return ::LowerConstantPool(Op, DAG, TM.getSubtargetImpl());
927 LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
928 EVT PtrVT = Op.getValueType();
929 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
930 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
931 SDValue Zero = DAG.getConstant(0, PtrVT);
932 const TargetMachine &TM = DAG.getTarget();
933 // FIXME there is no actual debug info here
934 DebugLoc dl = Op.getDebugLoc();
936 if (TM.getRelocationModel() == Reloc::Static) {
937 if (!ST->usingLargeMem()) {
938 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, JTI, Zero);
940 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, JTI, Zero);
941 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, JTI, Zero);
942 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
946 llvm_unreachable("LowerJumpTable: Relocation model other than static"
952 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
953 EVT PtrVT = Op.getValueType();
954 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
955 const GlobalValue *GV = GSDN->getGlobal();
956 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
957 PtrVT, GSDN->getOffset());
958 const TargetMachine &TM = DAG.getTarget();
959 SDValue Zero = DAG.getConstant(0, PtrVT);
960 // FIXME there is no actual debug info here
961 DebugLoc dl = Op.getDebugLoc();
963 if (TM.getRelocationModel() == Reloc::Static) {
964 if (!ST->usingLargeMem()) {
965 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, GA, Zero);
967 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, GA, Zero);
968 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, GA, Zero);
969 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
972 report_fatal_error("LowerGlobalAddress: Relocation model other than static"
980 //! Custom lower double precision floating point constants
982 LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
983 EVT VT = Op.getValueType();
984 // FIXME there is no actual debug info here
985 DebugLoc dl = Op.getDebugLoc();
987 if (VT == MVT::f64) {
988 ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.getNode());
991 "LowerConstantFP: Node is not ConstantFPSDNode");
993 uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
994 SDValue T = DAG.getConstant(dbits, MVT::i64);
995 SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
996 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
997 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Tvec));
1004 SPUTargetLowering::LowerFormalArguments(SDValue Chain,
1005 CallingConv::ID CallConv, bool isVarArg,
1006 const SmallVectorImpl<ISD::InputArg>
1008 DebugLoc dl, SelectionDAG &DAG,
1009 SmallVectorImpl<SDValue> &InVals)
1012 MachineFunction &MF = DAG.getMachineFunction();
1013 MachineFrameInfo *MFI = MF.getFrameInfo();
1014 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1015 SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
1017 const unsigned *ArgRegs = SPURegisterInfo::getArgRegs();
1018 const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
1020 unsigned ArgOffset = SPUFrameInfo::minStackSize();
1021 unsigned ArgRegIdx = 0;
1022 unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
1024 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1026 // Add DAG nodes to load the arguments or copy them out of registers.
1027 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
1028 EVT ObjectVT = Ins[ArgNo].VT;
1029 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1032 if (ArgRegIdx < NumArgRegs) {
1033 const TargetRegisterClass *ArgRegClass;
1035 switch (ObjectVT.getSimpleVT().SimpleTy) {
1037 report_fatal_error("LowerFormalArguments Unhandled argument type: " +
1038 Twine(ObjectVT.getEVTString()));
1040 ArgRegClass = &SPU::R8CRegClass;
1043 ArgRegClass = &SPU::R16CRegClass;
1046 ArgRegClass = &SPU::R32CRegClass;
1049 ArgRegClass = &SPU::R64CRegClass;
1052 ArgRegClass = &SPU::GPRCRegClass;
1055 ArgRegClass = &SPU::R32FPRegClass;
1058 ArgRegClass = &SPU::R64FPRegClass;
1066 ArgRegClass = &SPU::VECREGRegClass;
1070 unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
1071 RegInfo.addLiveIn(ArgRegs[ArgRegIdx], VReg);
1072 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
1075 // We need to load the argument to a virtual register if we determined
1076 // above that we ran out of physical registers of the appropriate type
1077 // or we're forced to do vararg
1078 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true);
1079 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1080 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0, false, false, 0);
1081 ArgOffset += StackSlotSize;
1084 InVals.push_back(ArgVal);
1086 Chain = ArgVal.getOperand(0);
1091 // unsigned int ptr_size = PtrVT.getSizeInBits() / 8;
1092 // We will spill (79-3)+1 registers to the stack
1093 SmallVector<SDValue, 79-3+1> MemOps;
1095 // Create the frame slot
1097 for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
1098 FuncInfo->setVarArgsFrameIndex(
1099 MFI->CreateFixedObject(StackSlotSize, ArgOffset, true));
1100 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1101 unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::R32CRegClass);
1102 SDValue ArgVal = DAG.getRegister(VReg, MVT::v16i8);
1103 SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, NULL, 0,
1105 Chain = Store.getOperand(0);
1106 MemOps.push_back(Store);
1108 // Increment address by stack slot size for the next stored argument
1109 ArgOffset += StackSlotSize;
1111 if (!MemOps.empty())
1112 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1113 &MemOps[0], MemOps.size());
1119 /// isLSAAddress - Return the immediate to use if the specified
1120 /// value is representable as a LSA address.
1121 static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
1122 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1125 int Addr = C->getZExtValue();
1126 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1127 (Addr << 14 >> 14) != Addr)
1128 return 0; // Top 14 bits have to be sext of immediate.
1130 return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode();
1134 SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1135 CallingConv::ID CallConv, bool isVarArg,
1137 const SmallVectorImpl<ISD::OutputArg> &Outs,
1138 const SmallVectorImpl<SDValue> &OutVals,
1139 const SmallVectorImpl<ISD::InputArg> &Ins,
1140 DebugLoc dl, SelectionDAG &DAG,
1141 SmallVectorImpl<SDValue> &InVals) const {
1142 // CellSPU target does not yet support tail call optimization.
1145 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
1146 unsigned NumOps = Outs.size();
1147 unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
1148 const unsigned *ArgRegs = SPURegisterInfo::getArgRegs();
1149 const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
1151 // Handy pointer type
1152 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1154 // Set up a copy of the stack pointer for use loading and storing any
1155 // arguments that may not fit in the registers available for argument
1157 SDValue StackPtr = DAG.getRegister(SPU::R1, MVT::i32);
1159 // Figure out which arguments are going to go in registers, and which in
1161 unsigned ArgOffset = SPUFrameInfo::minStackSize(); // Just below [LR]
1162 unsigned ArgRegIdx = 0;
1164 // Keep track of registers passing arguments
1165 std::vector<std::pair<unsigned, SDValue> > RegsToPass;
1166 // And the arguments passed on the stack
1167 SmallVector<SDValue, 8> MemOpChains;
1169 for (unsigned i = 0; i != NumOps; ++i) {
1170 SDValue Arg = OutVals[i];
1172 // PtrOff will be used to store the current argument to the stack if a
1173 // register cannot be found for it.
1174 SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1175 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
1177 switch (Arg.getValueType().getSimpleVT().SimpleTy) {
1178 default: llvm_unreachable("Unexpected ValueType for argument!");
1192 if (ArgRegIdx != NumArgRegs) {
1193 RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
1195 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
1197 ArgOffset += StackSlotSize;
1203 // Accumulate how many bytes are to be pushed on the stack, including the
1204 // linkage area, and parameter passing area. According to the SPU ABI,
1205 // we minimally need space for [LR] and [SP].
1206 unsigned NumStackBytes = ArgOffset - SPUFrameInfo::minStackSize();
1208 // Insert a call sequence start
1209 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumStackBytes,
1212 if (!MemOpChains.empty()) {
1213 // Adjust the stack pointer for the stack arguments.
1214 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1215 &MemOpChains[0], MemOpChains.size());
1218 // Build a sequence of copy-to-reg nodes chained together with token chain
1219 // and flag operands which copy the outgoing args into the appropriate regs.
1221 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1222 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1223 RegsToPass[i].second, InFlag);
1224 InFlag = Chain.getValue(1);
1227 SmallVector<SDValue, 8> Ops;
1228 unsigned CallOpc = SPUISD::CALL;
1230 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1231 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1232 // node so that legalize doesn't hack it.
1233 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1234 const GlobalValue *GV = G->getGlobal();
1235 EVT CalleeVT = Callee.getValueType();
1236 SDValue Zero = DAG.getConstant(0, PtrVT);
1237 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, CalleeVT);
1239 if (!ST->usingLargeMem()) {
1240 // Turn calls to targets that are defined (i.e., have bodies) into BRSL
1241 // style calls, otherwise, external symbols are BRASL calls. This assumes
1242 // that declared/defined symbols are in the same compilation unit and can
1243 // be reached through PC-relative jumps.
1246 // This may be an unsafe assumption for JIT and really large compilation
1248 if (GV->isDeclaration()) {
1249 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, GA, Zero);
1251 Callee = DAG.getNode(SPUISD::PCRelAddr, dl, CalleeVT, GA, Zero);
1254 // "Large memory" mode: Turn all calls into indirect calls with a X-form
1256 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, GA, Zero);
1258 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1259 EVT CalleeVT = Callee.getValueType();
1260 SDValue Zero = DAG.getConstant(0, PtrVT);
1261 SDValue ExtSym = DAG.getTargetExternalSymbol(S->getSymbol(),
1262 Callee.getValueType());
1264 if (!ST->usingLargeMem()) {
1265 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, ExtSym, Zero);
1267 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, ExtSym, Zero);
1269 } else if (SDNode *Dest = isLSAAddress(Callee, DAG)) {
1270 // If this is an absolute destination address that appears to be a legal
1271 // local store address, use the munged value.
1272 Callee = SDValue(Dest, 0);
1275 Ops.push_back(Chain);
1276 Ops.push_back(Callee);
1278 // Add argument registers to the end of the list so that they are known live
1280 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1281 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1282 RegsToPass[i].second.getValueType()));
1284 if (InFlag.getNode())
1285 Ops.push_back(InFlag);
1286 // Returns a chain and a flag for retval copy to use.
1287 Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
1288 &Ops[0], Ops.size());
1289 InFlag = Chain.getValue(1);
1291 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumStackBytes, true),
1292 DAG.getIntPtrConstant(0, true), InFlag);
1294 InFlag = Chain.getValue(1);
1296 // If the function returns void, just return the chain.
1300 // If the call has results, copy the values out of the ret val registers.
1301 switch (Ins[0].VT.getSimpleVT().SimpleTy) {
1302 default: llvm_unreachable("Unexpected ret value!");
1303 case MVT::Other: break;
1305 if (Ins.size() > 1 && Ins[1].VT == MVT::i32) {
1306 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R4,
1307 MVT::i32, InFlag).getValue(1);
1308 InVals.push_back(Chain.getValue(0));
1309 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32,
1310 Chain.getValue(2)).getValue(1);
1311 InVals.push_back(Chain.getValue(0));
1313 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32,
1314 InFlag).getValue(1);
1315 InVals.push_back(Chain.getValue(0));
1330 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, Ins[0].VT,
1331 InFlag).getValue(1);
1332 InVals.push_back(Chain.getValue(0));
1340 SPUTargetLowering::LowerReturn(SDValue Chain,
1341 CallingConv::ID CallConv, bool isVarArg,
1342 const SmallVectorImpl<ISD::OutputArg> &Outs,
1343 const SmallVectorImpl<SDValue> &OutVals,
1344 DebugLoc dl, SelectionDAG &DAG) const {
1346 SmallVector<CCValAssign, 16> RVLocs;
1347 CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
1348 RVLocs, *DAG.getContext());
1349 CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
1351 // If this is the first return lowered for this function, add the regs to the
1352 // liveout set for the function.
1353 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1354 for (unsigned i = 0; i != RVLocs.size(); ++i)
1355 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1360 // Copy the result values into the output registers.
1361 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1362 CCValAssign &VA = RVLocs[i];
1363 assert(VA.isRegLoc() && "Can only return in registers!");
1364 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1366 Flag = Chain.getValue(1);
1370 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1372 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain);
1376 //===----------------------------------------------------------------------===//
1377 // Vector related lowering:
1378 //===----------------------------------------------------------------------===//
1380 static ConstantSDNode *
1381 getVecImm(SDNode *N) {
1382 SDValue OpVal(0, 0);
1384 // Check to see if this buildvec has a single non-undef value in its elements.
1385 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1386 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
1387 if (OpVal.getNode() == 0)
1388 OpVal = N->getOperand(i);
1389 else if (OpVal != N->getOperand(i))
1393 if (OpVal.getNode() != 0) {
1394 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1402 /// get_vec_i18imm - Test if this vector is a vector filled with the same value
1403 /// and the value fits into an unsigned 18-bit constant, and if so, return the
1405 SDValue SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
1407 if (ConstantSDNode *CN = getVecImm(N)) {
1408 uint64_t Value = CN->getZExtValue();
1409 if (ValueType == MVT::i64) {
1410 uint64_t UValue = CN->getZExtValue();
1411 uint32_t upper = uint32_t(UValue >> 32);
1412 uint32_t lower = uint32_t(UValue);
1415 Value = Value >> 32;
1417 if (Value <= 0x3ffff)
1418 return DAG.getTargetConstant(Value, ValueType);
1424 /// get_vec_i16imm - Test if this vector is a vector filled with the same value
1425 /// and the value fits into a signed 16-bit constant, and if so, return the
1427 SDValue SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
1429 if (ConstantSDNode *CN = getVecImm(N)) {
1430 int64_t Value = CN->getSExtValue();
1431 if (ValueType == MVT::i64) {
1432 uint64_t UValue = CN->getZExtValue();
1433 uint32_t upper = uint32_t(UValue >> 32);
1434 uint32_t lower = uint32_t(UValue);
1437 Value = Value >> 32;
1439 if (Value >= -(1 << 15) && Value <= ((1 << 15) - 1)) {
1440 return DAG.getTargetConstant(Value, ValueType);
1447 /// get_vec_i10imm - Test if this vector is a vector filled with the same value
1448 /// and the value fits into a signed 10-bit constant, and if so, return the
1450 SDValue SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
1452 if (ConstantSDNode *CN = getVecImm(N)) {
1453 int64_t Value = CN->getSExtValue();
1454 if (ValueType == MVT::i64) {
1455 uint64_t UValue = CN->getZExtValue();
1456 uint32_t upper = uint32_t(UValue >> 32);
1457 uint32_t lower = uint32_t(UValue);
1460 Value = Value >> 32;
1462 if (isInt<10>(Value))
1463 return DAG.getTargetConstant(Value, ValueType);
1469 /// get_vec_i8imm - Test if this vector is a vector filled with the same value
1470 /// and the value fits into a signed 8-bit constant, and if so, return the
1473 /// @note: The incoming vector is v16i8 because that's the only way we can load
1474 /// constant vectors. Thus, we test to see if the upper and lower bytes are the
1476 SDValue SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
1478 if (ConstantSDNode *CN = getVecImm(N)) {
1479 int Value = (int) CN->getZExtValue();
1480 if (ValueType == MVT::i16
1481 && Value <= 0xffff /* truncated from uint64_t */
1482 && ((short) Value >> 8) == ((short) Value & 0xff))
1483 return DAG.getTargetConstant(Value & 0xff, ValueType);
1484 else if (ValueType == MVT::i8
1485 && (Value & 0xff) == Value)
1486 return DAG.getTargetConstant(Value, ValueType);
1492 /// get_ILHUvec_imm - Test if this vector is a vector filled with the same value
1493 /// and the value fits into a signed 16-bit constant, and if so, return the
1495 SDValue SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
1497 if (ConstantSDNode *CN = getVecImm(N)) {
1498 uint64_t Value = CN->getZExtValue();
1499 if ((ValueType == MVT::i32
1500 && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
1501 || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
1502 return DAG.getTargetConstant(Value >> 16, ValueType);
1508 /// get_v4i32_imm - Catch-all for general 32-bit constant vectors
1509 SDValue SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) {
1510 if (ConstantSDNode *CN = getVecImm(N)) {
1511 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i32);
1517 /// get_v4i32_imm - Catch-all for general 64-bit constant vectors
1518 SDValue SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) {
1519 if (ConstantSDNode *CN = getVecImm(N)) {
1520 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i64);
1526 //! Lower a BUILD_VECTOR instruction creatively:
1528 LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
1529 EVT VT = Op.getValueType();
1530 EVT EltVT = VT.getVectorElementType();
1531 DebugLoc dl = Op.getDebugLoc();
1532 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(Op.getNode());
1533 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerBUILD_VECTOR");
1534 unsigned minSplatBits = EltVT.getSizeInBits();
1536 if (minSplatBits < 16)
1539 APInt APSplatBits, APSplatUndef;
1540 unsigned SplatBitSize;
1543 if (!BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
1544 HasAnyUndefs, minSplatBits)
1545 || minSplatBits < SplatBitSize)
1546 return SDValue(); // Wasn't a constant vector or splat exceeded min
1548 uint64_t SplatBits = APSplatBits.getZExtValue();
1550 switch (VT.getSimpleVT().SimpleTy) {
1552 report_fatal_error("CellSPU: Unhandled VT in LowerBUILD_VECTOR, VT = " +
1553 Twine(VT.getEVTString()));
1556 uint32_t Value32 = uint32_t(SplatBits);
1557 assert(SplatBitSize == 32
1558 && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
1559 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1560 SDValue T = DAG.getConstant(Value32, MVT::i32);
1561 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,
1562 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
1566 uint64_t f64val = uint64_t(SplatBits);
1567 assert(SplatBitSize == 64
1568 && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
1569 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1570 SDValue T = DAG.getConstant(f64val, MVT::i64);
1571 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64,
1572 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
1576 // 8-bit constants have to be expanded to 16-bits
1577 unsigned short Value16 = SplatBits /* | (SplatBits << 8) */;
1578 SmallVector<SDValue, 8> Ops;
1580 Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
1581 return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
1582 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
1585 unsigned short Value16 = SplatBits;
1586 SDValue T = DAG.getConstant(Value16, EltVT);
1587 SmallVector<SDValue, 8> Ops;
1590 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
1593 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1594 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T);
1597 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1598 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T);
1601 return SPU::LowerV2I64Splat(VT, DAG, SplatBits, dl);
1611 SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
1613 uint32_t upper = uint32_t(SplatVal >> 32);
1614 uint32_t lower = uint32_t(SplatVal);
1616 if (upper == lower) {
1617 // Magic constant that can be matched by IL, ILA, et. al.
1618 SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
1619 return DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1620 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1621 Val, Val, Val, Val));
1623 bool upper_special, lower_special;
1625 // NOTE: This code creates common-case shuffle masks that can be easily
1626 // detected as common expressions. It is not attempting to create highly
1627 // specialized masks to replace any and all 0's, 0xff's and 0x80's.
1629 // Detect if the upper or lower half is a special shuffle mask pattern:
1630 upper_special = (upper == 0 || upper == 0xffffffff || upper == 0x80000000);
1631 lower_special = (lower == 0 || lower == 0xffffffff || lower == 0x80000000);
1633 // Both upper and lower are special, lower to a constant pool load:
1634 if (lower_special && upper_special) {
1635 SDValue SplatValCN = DAG.getConstant(SplatVal, MVT::i64);
1636 return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64,
1637 SplatValCN, SplatValCN);
1642 SmallVector<SDValue, 16> ShufBytes;
1645 // Create lower vector if not a special pattern
1646 if (!lower_special) {
1647 SDValue LO32C = DAG.getConstant(lower, MVT::i32);
1648 LO32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1649 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1650 LO32C, LO32C, LO32C, LO32C));
1653 // Create upper vector if not a special pattern
1654 if (!upper_special) {
1655 SDValue HI32C = DAG.getConstant(upper, MVT::i32);
1656 HI32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1657 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1658 HI32C, HI32C, HI32C, HI32C));
1661 // If either upper or lower are special, then the two input operands are
1662 // the same (basically, one of them is a "don't care")
1668 for (int i = 0; i < 4; ++i) {
1670 for (int j = 0; j < 4; ++j) {
1672 bool process_upper, process_lower;
1674 process_upper = (upper_special && (i & 1) == 0);
1675 process_lower = (lower_special && (i & 1) == 1);
1677 if (process_upper || process_lower) {
1678 if ((process_upper && upper == 0)
1679 || (process_lower && lower == 0))
1681 else if ((process_upper && upper == 0xffffffff)
1682 || (process_lower && lower == 0xffffffff))
1684 else if ((process_upper && upper == 0x80000000)
1685 || (process_lower && lower == 0x80000000))
1686 val |= (j == 0 ? 0xe0 : 0x80);
1688 val |= i * 4 + j + ((i & 1) * 16);
1691 ShufBytes.push_back(DAG.getConstant(val, MVT::i32));
1694 return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32,
1695 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1696 &ShufBytes[0], ShufBytes.size()));
1700 /// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3) to something on
1701 /// which the Cell can operate. The code inspects V3 to ascertain whether the
1702 /// permutation vector, V3, is monotonically increasing with one "exception"
1703 /// element, e.g., (0, 1, _, 3). If this is the case, then generate a
1704 /// SHUFFLE_MASK synthetic instruction. Otherwise, spill V3 to the constant pool.
1705 /// In either case, the net result is going to eventually invoke SHUFB to
1706 /// permute/shuffle the bytes from V1 and V2.
1708 /// SHUFFLE_MASK is eventually selected as one of the C*D instructions, generate
1709 /// control word for byte/halfword/word insertion. This takes care of a single
1710 /// element move from V2 into V1.
1712 /// SPUISD::SHUFB is eventually selected as Cell's <i>shufb</i> instructions.
1713 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
1714 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
1715 SDValue V1 = Op.getOperand(0);
1716 SDValue V2 = Op.getOperand(1);
1717 DebugLoc dl = Op.getDebugLoc();
1719 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1721 // If we have a single element being moved from V1 to V2, this can be handled
1722 // using the C*[DX] compute mask instructions, but the vector elements have
1723 // to be monotonically increasing with one exception element.
1724 EVT VecVT = V1.getValueType();
1725 EVT EltVT = VecVT.getVectorElementType();
1726 unsigned EltsFromV2 = 0;
1728 unsigned V2EltIdx0 = 0;
1729 unsigned CurrElt = 0;
1730 unsigned MaxElts = VecVT.getVectorNumElements();
1731 unsigned PrevElt = 0;
1733 bool monotonic = true;
1735 EVT maskVT; // which of the c?d instructions to use
1737 if (EltVT == MVT::i8) {
1739 maskVT = MVT::v16i8;
1740 } else if (EltVT == MVT::i16) {
1742 maskVT = MVT::v8i16;
1743 } else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
1745 maskVT = MVT::v4i32;
1746 } else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
1748 maskVT = MVT::v2i64;
1750 llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
1752 for (unsigned i = 0; i != MaxElts; ++i) {
1753 if (SVN->getMaskElt(i) < 0)
1756 unsigned SrcElt = SVN->getMaskElt(i);
1759 if (SrcElt >= V2EltIdx0) {
1760 if (1 >= (++EltsFromV2)) {
1761 V2Elt = (V2EltIdx0 - SrcElt) << 2;
1763 } else if (CurrElt != SrcElt) {
1771 if (PrevElt > 0 && SrcElt < MaxElts) {
1772 if ((PrevElt == SrcElt - 1)
1773 || (PrevElt == MaxElts - 1 && SrcElt == 0)) {
1780 } else if (i == 0) {
1781 // First time through, need to keep track of previous element
1784 // This isn't a rotation, takes elements from vector 2
1790 if (EltsFromV2 == 1 && monotonic) {
1791 // Compute mask and shuffle
1792 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1794 // As SHUFFLE_MASK becomes a c?d instruction, feed it an address
1795 // R1 ($sp) is used here only as it is guaranteed to have last bits zero
1796 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
1797 DAG.getRegister(SPU::R1, PtrVT),
1798 DAG.getConstant(V2Elt, MVT::i32));
1799 SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
1802 // Use shuffle mask in SHUFB synthetic instruction:
1803 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
1805 } else if (rotate) {
1806 int rotamt = (MaxElts - V0Elt) * EltVT.getSizeInBits()/8;
1808 return DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, V1.getValueType(),
1809 V1, DAG.getConstant(rotamt, MVT::i16));
1811 // Convert the SHUFFLE_VECTOR mask's input element units to the
1813 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
1815 SmallVector<SDValue, 16> ResultMask;
1816 for (unsigned i = 0, e = MaxElts; i != e; ++i) {
1817 unsigned SrcElt = SVN->getMaskElt(i) < 0 ? 0 : SVN->getMaskElt(i);
1819 for (unsigned j = 0; j < BytesPerElement; ++j)
1820 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,MVT::i8));
1823 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
1824 &ResultMask[0], ResultMask.size());
1825 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask);
1829 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
1830 SDValue Op0 = Op.getOperand(0); // Op0 = the scalar
1831 DebugLoc dl = Op.getDebugLoc();
1833 if (Op0.getNode()->getOpcode() == ISD::Constant) {
1834 // For a constant, build the appropriate constant vector, which will
1835 // eventually simplify to a vector register load.
1837 ConstantSDNode *CN = cast<ConstantSDNode>(Op0.getNode());
1838 SmallVector<SDValue, 16> ConstVecValues;
1842 // Create a constant vector:
1843 switch (Op.getValueType().getSimpleVT().SimpleTy) {
1844 default: llvm_unreachable("Unexpected constant value type in "
1845 "LowerSCALAR_TO_VECTOR");
1846 case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
1847 case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
1848 case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
1849 case MVT::v4f32: n_copies = 4; VT = MVT::f32; break;
1850 case MVT::v2i64: n_copies = 2; VT = MVT::i64; break;
1851 case MVT::v2f64: n_copies = 2; VT = MVT::f64; break;
1854 SDValue CValue = DAG.getConstant(CN->getZExtValue(), VT);
1855 for (size_t j = 0; j < n_copies; ++j)
1856 ConstVecValues.push_back(CValue);
1858 return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(),
1859 &ConstVecValues[0], ConstVecValues.size());
1861 // Otherwise, copy the value from one register to another:
1862 switch (Op0.getValueType().getSimpleVT().SimpleTy) {
1863 default: llvm_unreachable("Unexpected value type in LowerSCALAR_TO_VECTOR");
1870 return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0);
1877 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
1878 EVT VT = Op.getValueType();
1879 SDValue N = Op.getOperand(0);
1880 SDValue Elt = Op.getOperand(1);
1881 DebugLoc dl = Op.getDebugLoc();
1884 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
1885 // Constant argument:
1886 int EltNo = (int) C->getZExtValue();
1889 if (VT == MVT::i8 && EltNo >= 16)
1890 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i8 extraction slot > 15");
1891 else if (VT == MVT::i16 && EltNo >= 8)
1892 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i16 extraction slot > 7");
1893 else if (VT == MVT::i32 && EltNo >= 4)
1894 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i32 extraction slot > 4");
1895 else if (VT == MVT::i64 && EltNo >= 2)
1896 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i64 extraction slot > 2");
1898 if (EltNo == 0 && (VT == MVT::i32 || VT == MVT::i64)) {
1899 // i32 and i64: Element 0 is the preferred slot
1900 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, N);
1903 // Need to generate shuffle mask and extract:
1904 int prefslot_begin = -1, prefslot_end = -1;
1905 int elt_byte = EltNo * VT.getSizeInBits() / 8;
1907 switch (VT.getSimpleVT().SimpleTy) {
1909 assert(false && "Invalid value type!");
1911 prefslot_begin = prefslot_end = 3;
1915 prefslot_begin = 2; prefslot_end = 3;
1920 prefslot_begin = 0; prefslot_end = 3;
1925 prefslot_begin = 0; prefslot_end = 7;
1930 assert(prefslot_begin != -1 && prefslot_end != -1 &&
1931 "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
1933 unsigned int ShufBytes[16] = {
1934 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1936 for (int i = 0; i < 16; ++i) {
1937 // zero fill uppper part of preferred slot, don't care about the
1939 unsigned int mask_val;
1940 if (i <= prefslot_end) {
1942 ((i < prefslot_begin)
1944 : elt_byte + (i - prefslot_begin));
1946 ShufBytes[i] = mask_val;
1948 ShufBytes[i] = ShufBytes[i % (prefslot_end + 1)];
1951 SDValue ShufMask[4];
1952 for (unsigned i = 0; i < sizeof(ShufMask)/sizeof(ShufMask[0]); ++i) {
1953 unsigned bidx = i * 4;
1954 unsigned int bits = ((ShufBytes[bidx] << 24) |
1955 (ShufBytes[bidx+1] << 16) |
1956 (ShufBytes[bidx+2] << 8) |
1958 ShufMask[i] = DAG.getConstant(bits, MVT::i32);
1961 SDValue ShufMaskVec =
1962 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1963 &ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0]));
1965 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
1966 DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(),
1967 N, N, ShufMaskVec));
1969 // Variable index: Rotate the requested element into slot 0, then replicate
1970 // slot 0 across the vector
1971 EVT VecVT = N.getValueType();
1972 if (!VecVT.isSimple() || !VecVT.isVector() || !VecVT.is128BitVector()) {
1973 report_fatal_error("LowerEXTRACT_VECTOR_ELT: Must have a simple, 128-bit"
1977 // Make life easier by making sure the index is zero-extended to i32
1978 if (Elt.getValueType() != MVT::i32)
1979 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Elt);
1981 // Scale the index to a bit/byte shift quantity
1983 APInt(32, uint64_t(16 / N.getValueType().getVectorNumElements()), false);
1984 unsigned scaleShift = scaleFactor.logBase2();
1987 if (scaleShift > 0) {
1988 // Scale the shift factor:
1989 Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
1990 DAG.getConstant(scaleShift, MVT::i32));
1993 vecShift = DAG.getNode(SPUISD::SHLQUAD_L_BYTES, dl, VecVT, N, Elt);
1995 // Replicate the bytes starting at byte 0 across the entire vector (for
1996 // consistency with the notion of a unified register set)
1999 switch (VT.getSimpleVT().SimpleTy) {
2001 report_fatal_error("LowerEXTRACT_VECTOR_ELT(varable): Unhandled vector"
2005 SDValue factor = DAG.getConstant(0x00000000, MVT::i32);
2006 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2007 factor, factor, factor, factor);
2011 SDValue factor = DAG.getConstant(0x00010001, MVT::i32);
2012 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2013 factor, factor, factor, factor);
2018 SDValue factor = DAG.getConstant(0x00010203, MVT::i32);
2019 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2020 factor, factor, factor, factor);
2025 SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32);
2026 SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32);
2027 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2028 loFactor, hiFactor, loFactor, hiFactor);
2033 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2034 DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2035 vecShift, vecShift, replicate));
2041 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2042 SDValue VecOp = Op.getOperand(0);
2043 SDValue ValOp = Op.getOperand(1);
2044 SDValue IdxOp = Op.getOperand(2);
2045 DebugLoc dl = Op.getDebugLoc();
2046 EVT VT = Op.getValueType();
2048 // use 0 when the lane to insert to is 'undef'
2050 if (IdxOp.getOpcode() != ISD::UNDEF) {
2051 ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
2052 assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
2053 Idx = (CN->getSExtValue());
2056 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2057 // Use $sp ($1) because it's always 16-byte aligned and it's available:
2058 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
2059 DAG.getRegister(SPU::R1, PtrVT),
2060 DAG.getConstant(Idx, PtrVT));
2061 SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, VT, Pointer);
2064 DAG.getNode(SPUISD::SHUFB, dl, VT,
2065 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
2067 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, ShufMask));
2072 static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
2073 const TargetLowering &TLI)
2075 SDValue N0 = Op.getOperand(0); // Everything has at least one operand
2076 DebugLoc dl = Op.getDebugLoc();
2077 EVT ShiftVT = TLI.getShiftAmountTy();
2079 assert(Op.getValueType() == MVT::i8);
2082 llvm_unreachable("Unhandled i8 math operator");
2086 // 8-bit addition: Promote the arguments up to 16-bits and truncate
2088 SDValue N1 = Op.getOperand(1);
2089 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2090 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2091 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2092 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2097 // 8-bit subtraction: Promote the arguments up to 16-bits and truncate
2099 SDValue N1 = Op.getOperand(1);
2100 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2101 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2102 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2103 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2107 SDValue N1 = Op.getOperand(1);
2108 EVT N1VT = N1.getValueType();
2110 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2111 if (!N1VT.bitsEq(ShiftVT)) {
2112 unsigned N1Opc = N1.getValueType().bitsLT(ShiftVT)
2115 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2118 // Replicate lower 8-bits into upper 8:
2120 DAG.getNode(ISD::OR, dl, MVT::i16, N0,
2121 DAG.getNode(ISD::SHL, dl, MVT::i16,
2122 N0, DAG.getConstant(8, MVT::i32)));
2124 // Truncate back down to i8
2125 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2126 DAG.getNode(Opc, dl, MVT::i16, ExpandArg, N1));
2130 SDValue N1 = Op.getOperand(1);
2131 EVT N1VT = N1.getValueType();
2133 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2134 if (!N1VT.bitsEq(ShiftVT)) {
2135 unsigned N1Opc = ISD::ZERO_EXTEND;
2137 if (N1.getValueType().bitsGT(ShiftVT))
2138 N1Opc = ISD::TRUNCATE;
2140 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2143 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2144 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2147 SDValue N1 = Op.getOperand(1);
2148 EVT N1VT = N1.getValueType();
2150 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2151 if (!N1VT.bitsEq(ShiftVT)) {
2152 unsigned N1Opc = ISD::SIGN_EXTEND;
2154 if (N1VT.bitsGT(ShiftVT))
2155 N1Opc = ISD::TRUNCATE;
2156 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2159 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2160 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2163 SDValue N1 = Op.getOperand(1);
2165 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2166 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2167 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2168 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2176 //! Lower byte immediate operations for v16i8 vectors:
2178 LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
2181 EVT VT = Op.getValueType();
2182 DebugLoc dl = Op.getDebugLoc();
2184 ConstVec = Op.getOperand(0);
2185 Arg = Op.getOperand(1);
2186 if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
2187 if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
2188 ConstVec = ConstVec.getOperand(0);
2190 ConstVec = Op.getOperand(1);
2191 Arg = Op.getOperand(0);
2192 if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
2193 ConstVec = ConstVec.getOperand(0);
2198 if (ConstVec.getNode()->getOpcode() == ISD::BUILD_VECTOR) {
2199 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(ConstVec.getNode());
2200 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerByteImmed");
2202 APInt APSplatBits, APSplatUndef;
2203 unsigned SplatBitSize;
2205 unsigned minSplatBits = VT.getVectorElementType().getSizeInBits();
2207 if (BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
2208 HasAnyUndefs, minSplatBits)
2209 && minSplatBits <= SplatBitSize) {
2210 uint64_t SplatBits = APSplatBits.getZExtValue();
2211 SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
2213 SmallVector<SDValue, 16> tcVec;
2214 tcVec.assign(16, tc);
2215 return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg,
2216 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &tcVec[0], tcVec.size()));
2220 // These operations (AND, OR, XOR) are legal, they just couldn't be custom
2221 // lowered. Return the operation, rather than a null SDValue.
2225 //! Custom lowering for CTPOP (count population)
2227 Custom lowering code that counts the number ones in the input
2228 operand. SPU has such an instruction, but it counts the number of
2229 ones per byte, which then have to be accumulated.
2231 static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
2232 EVT VT = Op.getValueType();
2233 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
2234 VT, (128 / VT.getSizeInBits()));
2235 DebugLoc dl = Op.getDebugLoc();
2237 switch (VT.getSimpleVT().SimpleTy) {
2239 assert(false && "Invalid value type!");
2241 SDValue N = Op.getOperand(0);
2242 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2244 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2245 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2247 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, CNTB, Elt0);
2251 MachineFunction &MF = DAG.getMachineFunction();
2252 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2254 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R16CRegClass);
2256 SDValue N = Op.getOperand(0);
2257 SDValue Elt0 = DAG.getConstant(0, MVT::i16);
2258 SDValue Mask0 = DAG.getConstant(0x0f, MVT::i16);
2259 SDValue Shift1 = DAG.getConstant(8, MVT::i32);
2261 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2262 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2264 // CNTB_result becomes the chain to which all of the virtual registers
2265 // CNTB_reg, SUM1_reg become associated:
2266 SDValue CNTB_result =
2267 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, CNTB, Elt0);
2269 SDValue CNTB_rescopy =
2270 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2272 SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i16);
2274 return DAG.getNode(ISD::AND, dl, MVT::i16,
2275 DAG.getNode(ISD::ADD, dl, MVT::i16,
2276 DAG.getNode(ISD::SRL, dl, MVT::i16,
2283 MachineFunction &MF = DAG.getMachineFunction();
2284 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2286 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2287 unsigned SUM1_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2289 SDValue N = Op.getOperand(0);
2290 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2291 SDValue Mask0 = DAG.getConstant(0xff, MVT::i32);
2292 SDValue Shift1 = DAG.getConstant(16, MVT::i32);
2293 SDValue Shift2 = DAG.getConstant(8, MVT::i32);
2295 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2296 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2298 // CNTB_result becomes the chain to which all of the virtual registers
2299 // CNTB_reg, SUM1_reg become associated:
2300 SDValue CNTB_result =
2301 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, CNTB, Elt0);
2303 SDValue CNTB_rescopy =
2304 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2307 DAG.getNode(ISD::SRL, dl, MVT::i32,
2308 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32),
2312 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp1,
2313 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32));
2315 SDValue Sum1_rescopy =
2316 DAG.getCopyToReg(CNTB_result, dl, SUM1_reg, Sum1);
2319 DAG.getNode(ISD::SRL, dl, MVT::i32,
2320 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32),
2323 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp2,
2324 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32));
2326 return DAG.getNode(ISD::AND, dl, MVT::i32, Sum2, Mask0);
2336 //! Lower ISD::FP_TO_SINT, ISD::FP_TO_UINT for i32
2338 f32->i32 passes through unchanged, whereas f64->i32 expands to a libcall.
2339 All conversions to i64 are expanded to a libcall.
2341 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2342 const SPUTargetLowering &TLI) {
2343 EVT OpVT = Op.getValueType();
2344 SDValue Op0 = Op.getOperand(0);
2345 EVT Op0VT = Op0.getValueType();
2347 if ((OpVT == MVT::i32 && Op0VT == MVT::f64)
2348 || OpVT == MVT::i64) {
2349 // Convert f32 / f64 to i32 / i64 via libcall.
2351 (Op.getOpcode() == ISD::FP_TO_SINT)
2352 ? RTLIB::getFPTOSINT(Op0VT, OpVT)
2353 : RTLIB::getFPTOUINT(Op0VT, OpVT);
2354 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd fp-to-int conversion!");
2356 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2362 //! Lower ISD::SINT_TO_FP, ISD::UINT_TO_FP for i32
2364 i32->f32 passes through unchanged, whereas i32->f64 is expanded to a libcall.
2365 All conversions from i64 are expanded to a libcall.
2367 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2368 const SPUTargetLowering &TLI) {
2369 EVT OpVT = Op.getValueType();
2370 SDValue Op0 = Op.getOperand(0);
2371 EVT Op0VT = Op0.getValueType();
2373 if ((OpVT == MVT::f64 && Op0VT == MVT::i32)
2374 || Op0VT == MVT::i64) {
2375 // Convert i32, i64 to f64 via libcall:
2377 (Op.getOpcode() == ISD::SINT_TO_FP)
2378 ? RTLIB::getSINTTOFP(Op0VT, OpVT)
2379 : RTLIB::getUINTTOFP(Op0VT, OpVT);
2380 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd int-to-fp conversion!");
2382 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2388 //! Lower ISD::SETCC
2390 This handles MVT::f64 (double floating point) condition lowering
2392 static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
2393 const TargetLowering &TLI) {
2394 CondCodeSDNode *CC = dyn_cast<CondCodeSDNode>(Op.getOperand(2));
2395 DebugLoc dl = Op.getDebugLoc();
2396 assert(CC != 0 && "LowerSETCC: CondCodeSDNode should not be null here!\n");
2398 SDValue lhs = Op.getOperand(0);
2399 SDValue rhs = Op.getOperand(1);
2400 EVT lhsVT = lhs.getValueType();
2401 assert(lhsVT == MVT::f64 && "LowerSETCC: type other than MVT::64\n");
2403 EVT ccResultVT = TLI.getSetCCResultType(lhs.getValueType());
2404 APInt ccResultOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2405 EVT IntVT(MVT::i64);
2407 // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
2408 // selected to a NOP:
2409 SDValue i64lhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, lhs);
2411 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2412 DAG.getNode(ISD::SRL, dl, IntVT,
2413 i64lhs, DAG.getConstant(32, MVT::i32)));
2414 SDValue lhsHi32abs =
2415 DAG.getNode(ISD::AND, dl, MVT::i32,
2416 lhsHi32, DAG.getConstant(0x7fffffff, MVT::i32));
2418 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, i64lhs);
2420 // SETO and SETUO only use the lhs operand:
2421 if (CC->get() == ISD::SETO) {
2422 // Evaluates to true if Op0 is not [SQ]NaN - lowers to the inverse of
2424 APInt ccResultAllOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2425 return DAG.getNode(ISD::XOR, dl, ccResultVT,
2426 DAG.getSetCC(dl, ccResultVT,
2427 lhs, DAG.getConstantFP(0.0, lhsVT),
2429 DAG.getConstant(ccResultAllOnes, ccResultVT));
2430 } else if (CC->get() == ISD::SETUO) {
2431 // Evaluates to true if Op0 is [SQ]NaN
2432 return DAG.getNode(ISD::AND, dl, ccResultVT,
2433 DAG.getSetCC(dl, ccResultVT,
2435 DAG.getConstant(0x7ff00000, MVT::i32),
2437 DAG.getSetCC(dl, ccResultVT,
2439 DAG.getConstant(0, MVT::i32),
2443 SDValue i64rhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, rhs);
2445 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2446 DAG.getNode(ISD::SRL, dl, IntVT,
2447 i64rhs, DAG.getConstant(32, MVT::i32)));
2449 // If a value is negative, subtract from the sign magnitude constant:
2450 SDValue signMag2TC = DAG.getConstant(0x8000000000000000ULL, IntVT);
2452 // Convert the sign-magnitude representation into 2's complement:
2453 SDValue lhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2454 lhsHi32, DAG.getConstant(31, MVT::i32));
2455 SDValue lhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64lhs);
2457 DAG.getNode(ISD::SELECT, dl, IntVT,
2458 lhsSelectMask, lhsSignMag2TC, i64lhs);
2460 SDValue rhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2461 rhsHi32, DAG.getConstant(31, MVT::i32));
2462 SDValue rhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64rhs);
2464 DAG.getNode(ISD::SELECT, dl, IntVT,
2465 rhsSelectMask, rhsSignMag2TC, i64rhs);
2469 switch (CC->get()) {
2472 compareOp = ISD::SETEQ; break;
2475 compareOp = ISD::SETGT; break;
2478 compareOp = ISD::SETGE; break;
2481 compareOp = ISD::SETLT; break;
2484 compareOp = ISD::SETLE; break;
2487 compareOp = ISD::SETNE; break;
2489 report_fatal_error("CellSPU ISel Select: unimplemented f64 condition");
2493 DAG.getSetCC(dl, ccResultVT, lhsSelect, rhsSelect,
2494 (ISD::CondCode) compareOp);
2496 if ((CC->get() & 0x8) == 0) {
2497 // Ordered comparison:
2498 SDValue lhsNaN = DAG.getSetCC(dl, ccResultVT,
2499 lhs, DAG.getConstantFP(0.0, MVT::f64),
2501 SDValue rhsNaN = DAG.getSetCC(dl, ccResultVT,
2502 rhs, DAG.getConstantFP(0.0, MVT::f64),
2504 SDValue ordered = DAG.getNode(ISD::AND, dl, ccResultVT, lhsNaN, rhsNaN);
2506 result = DAG.getNode(ISD::AND, dl, ccResultVT, ordered, result);
2512 //! Lower ISD::SELECT_CC
2514 ISD::SELECT_CC can (generally) be implemented directly on the SPU using the
2517 \note Need to revisit this in the future: if the code path through the true
2518 and false value computations is longer than the latency of a branch (6
2519 cycles), then it would be more advantageous to branch and insert a new basic
2520 block and branch on the condition. However, this code does not make that
2521 assumption, given the simplisitc uses so far.
2524 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2525 const TargetLowering &TLI) {
2526 EVT VT = Op.getValueType();
2527 SDValue lhs = Op.getOperand(0);
2528 SDValue rhs = Op.getOperand(1);
2529 SDValue trueval = Op.getOperand(2);
2530 SDValue falseval = Op.getOperand(3);
2531 SDValue condition = Op.getOperand(4);
2532 DebugLoc dl = Op.getDebugLoc();
2534 // NOTE: SELB's arguments: $rA, $rB, $mask
2536 // SELB selects bits from $rA where bits in $mask are 0, bits from $rB
2537 // where bits in $mask are 1. CCond will be inverted, having 1s where the
2538 // condition was true and 0s where the condition was false. Hence, the
2539 // arguments to SELB get reversed.
2541 // Note: Really should be ISD::SELECT instead of SPUISD::SELB, but LLVM's
2542 // legalizer insists on combining SETCC/SELECT into SELECT_CC, so we end up
2543 // with another "cannot select select_cc" assert:
2545 SDValue compare = DAG.getNode(ISD::SETCC, dl,
2546 TLI.getSetCCResultType(Op.getValueType()),
2547 lhs, rhs, condition);
2548 return DAG.getNode(SPUISD::SELB, dl, VT, falseval, trueval, compare);
2551 //! Custom lower ISD::TRUNCATE
2552 static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
2554 // Type to truncate to
2555 EVT VT = Op.getValueType();
2556 MVT simpleVT = VT.getSimpleVT();
2557 EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
2558 VT, (128 / VT.getSizeInBits()));
2559 DebugLoc dl = Op.getDebugLoc();
2561 // Type to truncate from
2562 SDValue Op0 = Op.getOperand(0);
2563 EVT Op0VT = Op0.getValueType();
2565 if (Op0VT.getSimpleVT() == MVT::i128 && simpleVT == MVT::i64) {
2566 // Create shuffle mask, least significant doubleword of quadword
2567 unsigned maskHigh = 0x08090a0b;
2568 unsigned maskLow = 0x0c0d0e0f;
2569 // Use a shuffle to perform the truncation
2570 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2571 DAG.getConstant(maskHigh, MVT::i32),
2572 DAG.getConstant(maskLow, MVT::i32),
2573 DAG.getConstant(maskHigh, MVT::i32),
2574 DAG.getConstant(maskLow, MVT::i32));
2576 SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2577 Op0, Op0, shufMask);
2579 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, truncShuffle);
2582 return SDValue(); // Leave the truncate unmolested
2586 * Emit the instruction sequence for i64/i32 -> i128 sign extend. The basic
2587 * algorithm is to duplicate the sign bit using rotmai to generate at
2588 * least one byte full of sign bits. Then propagate the "sign-byte" into
2589 * the leftmost words and the i64/i32 into the rightmost words using shufb.
2591 * @param Op The sext operand
2592 * @param DAG The current DAG
2593 * @return The SDValue with the entire instruction sequence
2595 static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
2597 DebugLoc dl = Op.getDebugLoc();
2599 // Type to extend to
2600 MVT OpVT = Op.getValueType().getSimpleVT();
2602 // Type to extend from
2603 SDValue Op0 = Op.getOperand(0);
2604 MVT Op0VT = Op0.getValueType().getSimpleVT();
2606 // The type to extend to needs to be a i128 and
2607 // the type to extend from needs to be i64 or i32.
2608 assert((OpVT == MVT::i128 && (Op0VT == MVT::i64 || Op0VT == MVT::i32)) &&
2609 "LowerSIGN_EXTEND: input and/or output operand have wrong size");
2611 // Create shuffle mask
2612 unsigned mask1 = 0x10101010; // byte 0 - 3 and 4 - 7
2613 unsigned mask2 = Op0VT == MVT::i64 ? 0x00010203 : 0x10101010; // byte 8 - 11
2614 unsigned mask3 = Op0VT == MVT::i64 ? 0x04050607 : 0x00010203; // byte 12 - 15
2615 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2616 DAG.getConstant(mask1, MVT::i32),
2617 DAG.getConstant(mask1, MVT::i32),
2618 DAG.getConstant(mask2, MVT::i32),
2619 DAG.getConstant(mask3, MVT::i32));
2621 // Word wise arithmetic right shift to generate at least one byte
2622 // that contains sign bits.
2623 MVT mvt = Op0VT == MVT::i64 ? MVT::v2i64 : MVT::v4i32;
2624 SDValue sraVal = DAG.getNode(ISD::SRA,
2627 DAG.getNode(SPUISD::PREFSLOT2VEC, dl, mvt, Op0, Op0),
2628 DAG.getConstant(31, MVT::i32));
2630 // Shuffle bytes - Copy the sign bits into the upper 64 bits
2631 // and the input value into the lower 64 bits.
2632 SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
2633 DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i128, Op0), sraVal, shufMask);
2635 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, extShuffle);
2638 //! Custom (target-specific) lowering entry point
2640 This is where LLVM's DAG selection process calls to do target-specific
2644 SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
2646 unsigned Opc = (unsigned) Op.getOpcode();
2647 EVT VT = Op.getValueType();
2652 errs() << "SPUTargetLowering::LowerOperation(): need to lower this!\n";
2653 errs() << "Op.getOpcode() = " << Opc << "\n";
2654 errs() << "*Op.getNode():\n";
2655 Op.getNode()->dump();
2657 llvm_unreachable(0);
2663 return LowerLOAD(Op, DAG, SPUTM.getSubtargetImpl());
2665 return LowerSTORE(Op, DAG, SPUTM.getSubtargetImpl());
2666 case ISD::ConstantPool:
2667 return LowerConstantPool(Op, DAG, SPUTM.getSubtargetImpl());
2668 case ISD::GlobalAddress:
2669 return LowerGlobalAddress(Op, DAG, SPUTM.getSubtargetImpl());
2670 case ISD::JumpTable:
2671 return LowerJumpTable(Op, DAG, SPUTM.getSubtargetImpl());
2672 case ISD::ConstantFP:
2673 return LowerConstantFP(Op, DAG);
2675 // i8, i64 math ops:
2684 return LowerI8Math(Op, DAG, Opc, *this);
2688 case ISD::FP_TO_SINT:
2689 case ISD::FP_TO_UINT:
2690 return LowerFP_TO_INT(Op, DAG, *this);
2692 case ISD::SINT_TO_FP:
2693 case ISD::UINT_TO_FP:
2694 return LowerINT_TO_FP(Op, DAG, *this);
2696 // Vector-related lowering.
2697 case ISD::BUILD_VECTOR:
2698 return LowerBUILD_VECTOR(Op, DAG);
2699 case ISD::SCALAR_TO_VECTOR:
2700 return LowerSCALAR_TO_VECTOR(Op, DAG);
2701 case ISD::VECTOR_SHUFFLE:
2702 return LowerVECTOR_SHUFFLE(Op, DAG);
2703 case ISD::EXTRACT_VECTOR_ELT:
2704 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2705 case ISD::INSERT_VECTOR_ELT:
2706 return LowerINSERT_VECTOR_ELT(Op, DAG);
2708 // Look for ANDBI, ORBI and XORBI opportunities and lower appropriately:
2712 return LowerByteImmed(Op, DAG);
2714 // Vector and i8 multiply:
2717 return LowerI8Math(Op, DAG, Opc, *this);
2720 return LowerCTPOP(Op, DAG);
2722 case ISD::SELECT_CC:
2723 return LowerSELECT_CC(Op, DAG, *this);
2726 return LowerSETCC(Op, DAG, *this);
2729 return LowerTRUNCATE(Op, DAG);
2731 case ISD::SIGN_EXTEND:
2732 return LowerSIGN_EXTEND(Op, DAG);
2738 void SPUTargetLowering::ReplaceNodeResults(SDNode *N,
2739 SmallVectorImpl<SDValue>&Results,
2740 SelectionDAG &DAG) const
2743 unsigned Opc = (unsigned) N->getOpcode();
2744 EVT OpVT = N->getValueType(0);
2748 errs() << "SPUTargetLowering::ReplaceNodeResults(): need to fix this!\n";
2749 errs() << "Op.getOpcode() = " << Opc << "\n";
2750 errs() << "*Op.getNode():\n";
2758 /* Otherwise, return unchanged */
2761 //===----------------------------------------------------------------------===//
2762 // Target Optimization Hooks
2763 //===----------------------------------------------------------------------===//
2766 SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
2769 TargetMachine &TM = getTargetMachine();
2771 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
2772 SelectionDAG &DAG = DCI.DAG;
2773 SDValue Op0 = N->getOperand(0); // everything has at least one operand
2774 EVT NodeVT = N->getValueType(0); // The node's value type
2775 EVT Op0VT = Op0.getValueType(); // The first operand's result
2776 SDValue Result; // Initially, empty result
2777 DebugLoc dl = N->getDebugLoc();
2779 switch (N->getOpcode()) {
2782 SDValue Op1 = N->getOperand(1);
2784 if (Op0.getOpcode() == SPUISD::IndirectAddr
2785 || Op1.getOpcode() == SPUISD::IndirectAddr) {
2786 // Normalize the operands to reduce repeated code
2787 SDValue IndirectArg = Op0, AddArg = Op1;
2789 if (Op1.getOpcode() == SPUISD::IndirectAddr) {
2794 if (isa<ConstantSDNode>(AddArg)) {
2795 ConstantSDNode *CN0 = cast<ConstantSDNode > (AddArg);
2796 SDValue IndOp1 = IndirectArg.getOperand(1);
2798 if (CN0->isNullValue()) {
2799 // (add (SPUindirect <arg>, <arg>), 0) ->
2800 // (SPUindirect <arg>, <arg>)
2802 #if !defined(NDEBUG)
2803 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2805 << "Replace: (add (SPUindirect <arg>, <arg>), 0)\n"
2806 << "With: (SPUindirect <arg>, <arg>)\n";
2811 } else if (isa<ConstantSDNode>(IndOp1)) {
2812 // (add (SPUindirect <arg>, <const>), <const>) ->
2813 // (SPUindirect <arg>, <const + const>)
2814 ConstantSDNode *CN1 = cast<ConstantSDNode > (IndOp1);
2815 int64_t combinedConst = CN0->getSExtValue() + CN1->getSExtValue();
2816 SDValue combinedValue = DAG.getConstant(combinedConst, Op0VT);
2818 #if !defined(NDEBUG)
2819 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2821 << "Replace: (add (SPUindirect <arg>, " << CN1->getSExtValue()
2822 << "), " << CN0->getSExtValue() << ")\n"
2823 << "With: (SPUindirect <arg>, "
2824 << combinedConst << ")\n";
2828 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2829 IndirectArg, combinedValue);
2835 case ISD::SIGN_EXTEND:
2836 case ISD::ZERO_EXTEND:
2837 case ISD::ANY_EXTEND: {
2838 if (Op0.getOpcode() == SPUISD::VEC2PREFSLOT && NodeVT == Op0VT) {
2839 // (any_extend (SPUextract_elt0 <arg>)) ->
2840 // (SPUextract_elt0 <arg>)
2841 // Types must match, however...
2842 #if !defined(NDEBUG)
2843 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2844 errs() << "\nReplace: ";
2846 errs() << "\nWith: ";
2847 Op0.getNode()->dump(&DAG);
2856 case SPUISD::IndirectAddr: {
2857 if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) {
2858 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1));
2859 if (CN != 0 && CN->isNullValue()) {
2860 // (SPUindirect (SPUaform <addr>, 0), 0) ->
2861 // (SPUaform <addr>, 0)
2863 DEBUG(errs() << "Replace: ");
2864 DEBUG(N->dump(&DAG));
2865 DEBUG(errs() << "\nWith: ");
2866 DEBUG(Op0.getNode()->dump(&DAG));
2867 DEBUG(errs() << "\n");
2871 } else if (Op0.getOpcode() == ISD::ADD) {
2872 SDValue Op1 = N->getOperand(1);
2873 if (ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(Op1)) {
2874 // (SPUindirect (add <arg>, <arg>), 0) ->
2875 // (SPUindirect <arg>, <arg>)
2876 if (CN1->isNullValue()) {
2878 #if !defined(NDEBUG)
2879 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2881 << "Replace: (SPUindirect (add <arg>, <arg>), 0)\n"
2882 << "With: (SPUindirect <arg>, <arg>)\n";
2886 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2887 Op0.getOperand(0), Op0.getOperand(1));
2893 case SPUISD::SHLQUAD_L_BITS:
2894 case SPUISD::SHLQUAD_L_BYTES:
2895 case SPUISD::ROTBYTES_LEFT: {
2896 SDValue Op1 = N->getOperand(1);
2898 // Kill degenerate vector shifts:
2899 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2900 if (CN->isNullValue()) {
2906 case SPUISD::PREFSLOT2VEC: {
2907 switch (Op0.getOpcode()) {
2910 case ISD::ANY_EXTEND:
2911 case ISD::ZERO_EXTEND:
2912 case ISD::SIGN_EXTEND: {
2913 // (SPUprefslot2vec (any|zero|sign_extend (SPUvec2prefslot <arg>))) ->
2915 // but only if the SPUprefslot2vec and <arg> types match.
2916 SDValue Op00 = Op0.getOperand(0);
2917 if (Op00.getOpcode() == SPUISD::VEC2PREFSLOT) {
2918 SDValue Op000 = Op00.getOperand(0);
2919 if (Op000.getValueType() == NodeVT) {
2925 case SPUISD::VEC2PREFSLOT: {
2926 // (SPUprefslot2vec (SPUvec2prefslot <arg>)) ->
2928 Result = Op0.getOperand(0);
2936 // Otherwise, return unchanged.
2938 if (Result.getNode()) {
2939 DEBUG(errs() << "\nReplace.SPU: ");
2940 DEBUG(N->dump(&DAG));
2941 DEBUG(errs() << "\nWith: ");
2942 DEBUG(Result.getNode()->dump(&DAG));
2943 DEBUG(errs() << "\n");
2950 //===----------------------------------------------------------------------===//
2951 // Inline Assembly Support
2952 //===----------------------------------------------------------------------===//
2954 /// getConstraintType - Given a constraint letter, return the type of
2955 /// constraint it is for this target.
2956 SPUTargetLowering::ConstraintType
2957 SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const {
2958 if (ConstraintLetter.size() == 1) {
2959 switch (ConstraintLetter[0]) {
2966 return C_RegisterClass;
2969 return TargetLowering::getConstraintType(ConstraintLetter);
2972 std::pair<unsigned, const TargetRegisterClass*>
2973 SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
2976 if (Constraint.size() == 1) {
2977 // GCC RS6000 Constraint Letters
2978 switch (Constraint[0]) {
2982 return std::make_pair(0U, SPU::R64CRegisterClass);
2983 return std::make_pair(0U, SPU::R32CRegisterClass);
2986 return std::make_pair(0U, SPU::R32FPRegisterClass);
2987 else if (VT == MVT::f64)
2988 return std::make_pair(0U, SPU::R64FPRegisterClass);
2991 return std::make_pair(0U, SPU::GPRCRegisterClass);
2995 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
2998 //! Compute used/known bits for a SPU operand
3000 SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
3004 const SelectionDAG &DAG,
3005 unsigned Depth ) const {
3007 const uint64_t uint64_sizebits = sizeof(uint64_t) * CHAR_BIT;
3009 switch (Op.getOpcode()) {
3011 // KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
3017 case SPUISD::PREFSLOT2VEC:
3018 case SPUISD::LDRESULT:
3019 case SPUISD::VEC2PREFSLOT:
3020 case SPUISD::SHLQUAD_L_BITS:
3021 case SPUISD::SHLQUAD_L_BYTES:
3022 case SPUISD::VEC_ROTL:
3023 case SPUISD::VEC_ROTR:
3024 case SPUISD::ROTBYTES_LEFT:
3025 case SPUISD::SELECT_MASK:
3032 SPUTargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
3033 unsigned Depth) const {
3034 switch (Op.getOpcode()) {
3039 EVT VT = Op.getValueType();
3041 if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32) {
3044 return VT.getSizeInBits();
3049 // LowerAsmOperandForConstraint
3051 SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3052 char ConstraintLetter,
3053 std::vector<SDValue> &Ops,
3054 SelectionDAG &DAG) const {
3055 // Default, for the time being, to the base class handler
3056 TargetLowering::LowerAsmOperandForConstraint(Op, ConstraintLetter, Ops, DAG);
3059 /// isLegalAddressImmediate - Return true if the integer value can be used
3060 /// as the offset of the target addressing mode.
3061 bool SPUTargetLowering::isLegalAddressImmediate(int64_t V,
3062 const Type *Ty) const {
3063 // SPU's addresses are 256K:
3064 return (V > -(1 << 18) && V < (1 << 18) - 1);
3067 bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
3072 SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3073 // The SPU target isn't yet aware of offsets.