2 //===-- SPUISelLowering.cpp - Cell SPU DAG Lowering Implementation --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SPUTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "SPURegisterNames.h"
15 #include "SPUISelLowering.h"
16 #include "SPUTargetMachine.h"
17 #include "SPUFrameInfo.h"
18 #include "SPUMachineFunction.h"
19 #include "llvm/Constants.h"
20 #include "llvm/Function.h"
21 #include "llvm/Intrinsics.h"
22 #include "llvm/CallingConv.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/ADT/VectorExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/raw_ostream.h"
40 // Used in getTargetNodeName() below
42 std::map<unsigned, const char *> node_names;
44 //! EVT mapping to useful data for Cell SPU
45 struct valtype_map_s {
50 const valtype_map_s valtype_map[] = {
61 const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]);
63 const valtype_map_s *getValueTypeMapEntry(EVT VT) {
64 const valtype_map_s *retval = 0;
66 for (size_t i = 0; i < n_valtype_map; ++i) {
67 if (valtype_map[i].valtype == VT) {
68 retval = valtype_map + i;
75 report_fatal_error("getValueTypeMapEntry returns NULL for " +
76 Twine(VT.getEVTString()));
83 //! Expand a library call into an actual call DAG node
86 This code is taken from SelectionDAGLegalize, since it is not exposed as
87 part of the LLVM SelectionDAG API.
91 ExpandLibCall(RTLIB::Libcall LC, SDValue Op, SelectionDAG &DAG,
92 bool isSigned, SDValue &Hi, const SPUTargetLowering &TLI) {
93 // The input chain to this libcall is the entry node of the function.
94 // Legalizing the call will automatically add the previous call to the
96 SDValue InChain = DAG.getEntryNode();
98 TargetLowering::ArgListTy Args;
99 TargetLowering::ArgListEntry Entry;
100 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
101 EVT ArgVT = Op.getOperand(i).getValueType();
102 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
103 Entry.Node = Op.getOperand(i);
105 Entry.isSExt = isSigned;
106 Entry.isZExt = !isSigned;
107 Args.push_back(Entry);
109 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
112 // Splice the libcall in wherever FindInputOutputChains tells us to.
114 Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
115 std::pair<SDValue, SDValue> CallInfo =
116 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
117 0, TLI.getLibcallCallingConv(LC), false,
118 /*isReturnValueUsed=*/true,
119 Callee, Args, DAG, Op.getDebugLoc());
121 return CallInfo.first;
125 SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
126 : TargetLowering(TM, new TargetLoweringObjectFileELF()),
128 // Fold away setcc operations if possible.
131 // Use _setjmp/_longjmp instead of setjmp/longjmp.
132 setUseUnderscoreSetJmp(true);
133 setUseUnderscoreLongJmp(true);
135 // Set RTLIB libcall names as used by SPU:
136 setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
138 // Set up the SPU's register classes:
139 addRegisterClass(MVT::i8, SPU::R8CRegisterClass);
140 addRegisterClass(MVT::i16, SPU::R16CRegisterClass);
141 addRegisterClass(MVT::i32, SPU::R32CRegisterClass);
142 addRegisterClass(MVT::i64, SPU::R64CRegisterClass);
143 addRegisterClass(MVT::f32, SPU::R32FPRegisterClass);
144 addRegisterClass(MVT::f64, SPU::R64FPRegisterClass);
145 addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
147 // SPU has no sign or zero extended loads for i1, i8, i16:
148 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
149 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
150 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
152 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
153 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
155 setTruncStoreAction(MVT::i128, MVT::i64, Expand);
156 setTruncStoreAction(MVT::i128, MVT::i32, Expand);
157 setTruncStoreAction(MVT::i128, MVT::i16, Expand);
158 setTruncStoreAction(MVT::i128, MVT::i8, Expand);
160 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
162 // SPU constant load actions are custom lowered:
163 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
164 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
166 // SPU's loads and stores have to be custom lowered:
167 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::i128;
169 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
171 setOperationAction(ISD::LOAD, VT, Custom);
172 setOperationAction(ISD::STORE, VT, Custom);
173 setLoadExtAction(ISD::EXTLOAD, VT, Custom);
174 setLoadExtAction(ISD::ZEXTLOAD, VT, Custom);
175 setLoadExtAction(ISD::SEXTLOAD, VT, Custom);
177 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::i8; --stype) {
178 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
179 setTruncStoreAction(VT, StoreVT, Expand);
183 for (unsigned sctype = (unsigned) MVT::f32; sctype < (unsigned) MVT::f64;
185 MVT::SimpleValueType VT = (MVT::SimpleValueType) sctype;
187 setOperationAction(ISD::LOAD, VT, Custom);
188 setOperationAction(ISD::STORE, VT, Custom);
190 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::f32; --stype) {
191 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
192 setTruncStoreAction(VT, StoreVT, Expand);
196 // Expand the jumptable branches
197 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
198 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
200 // Custom lower SELECT_CC for most cases, but expand by default
201 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
202 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
203 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
204 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
205 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
207 // SPU has no intrinsics for these particular operations:
208 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
210 // SPU has no division/remainder instructions
211 setOperationAction(ISD::SREM, MVT::i8, Expand);
212 setOperationAction(ISD::UREM, MVT::i8, Expand);
213 setOperationAction(ISD::SDIV, MVT::i8, Expand);
214 setOperationAction(ISD::UDIV, MVT::i8, Expand);
215 setOperationAction(ISD::SDIVREM, MVT::i8, Expand);
216 setOperationAction(ISD::UDIVREM, MVT::i8, Expand);
217 setOperationAction(ISD::SREM, MVT::i16, Expand);
218 setOperationAction(ISD::UREM, MVT::i16, Expand);
219 setOperationAction(ISD::SDIV, MVT::i16, Expand);
220 setOperationAction(ISD::UDIV, MVT::i16, Expand);
221 setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
222 setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
223 setOperationAction(ISD::SREM, MVT::i32, Expand);
224 setOperationAction(ISD::UREM, MVT::i32, Expand);
225 setOperationAction(ISD::SDIV, MVT::i32, Expand);
226 setOperationAction(ISD::UDIV, MVT::i32, Expand);
227 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
228 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
229 setOperationAction(ISD::SREM, MVT::i64, Expand);
230 setOperationAction(ISD::UREM, MVT::i64, Expand);
231 setOperationAction(ISD::SDIV, MVT::i64, Expand);
232 setOperationAction(ISD::UDIV, MVT::i64, Expand);
233 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
234 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
235 setOperationAction(ISD::SREM, MVT::i128, Expand);
236 setOperationAction(ISD::UREM, MVT::i128, Expand);
237 setOperationAction(ISD::SDIV, MVT::i128, Expand);
238 setOperationAction(ISD::UDIV, MVT::i128, Expand);
239 setOperationAction(ISD::SDIVREM, MVT::i128, Expand);
240 setOperationAction(ISD::UDIVREM, MVT::i128, Expand);
242 // We don't support sin/cos/sqrt/fmod
243 setOperationAction(ISD::FSIN , MVT::f64, Expand);
244 setOperationAction(ISD::FCOS , MVT::f64, Expand);
245 setOperationAction(ISD::FREM , MVT::f64, Expand);
246 setOperationAction(ISD::FSIN , MVT::f32, Expand);
247 setOperationAction(ISD::FCOS , MVT::f32, Expand);
248 setOperationAction(ISD::FREM , MVT::f32, Expand);
250 // Expand fsqrt to the appropriate libcall (NOTE: should use h/w fsqrt
252 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
253 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
255 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
256 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
258 // SPU can do rotate right and left, so legalize it... but customize for i8
259 // because instructions don't exist.
261 // FIXME: Change from "expand" to appropriate type once ROTR is supported in
263 setOperationAction(ISD::ROTR, MVT::i32, Expand /*Legal*/);
264 setOperationAction(ISD::ROTR, MVT::i16, Expand /*Legal*/);
265 setOperationAction(ISD::ROTR, MVT::i8, Expand /*Custom*/);
267 setOperationAction(ISD::ROTL, MVT::i32, Legal);
268 setOperationAction(ISD::ROTL, MVT::i16, Legal);
269 setOperationAction(ISD::ROTL, MVT::i8, Custom);
271 // SPU has no native version of shift left/right for i8
272 setOperationAction(ISD::SHL, MVT::i8, Custom);
273 setOperationAction(ISD::SRL, MVT::i8, Custom);
274 setOperationAction(ISD::SRA, MVT::i8, Custom);
276 // Make these operations legal and handle them during instruction selection:
277 setOperationAction(ISD::SHL, MVT::i64, Legal);
278 setOperationAction(ISD::SRL, MVT::i64, Legal);
279 setOperationAction(ISD::SRA, MVT::i64, Legal);
281 // Custom lower i8, i32 and i64 multiplications
282 setOperationAction(ISD::MUL, MVT::i8, Custom);
283 setOperationAction(ISD::MUL, MVT::i32, Legal);
284 setOperationAction(ISD::MUL, MVT::i64, Legal);
286 // Expand double-width multiplication
287 // FIXME: It would probably be reasonable to support some of these operations
288 setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
289 setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
290 setOperationAction(ISD::MULHU, MVT::i8, Expand);
291 setOperationAction(ISD::MULHS, MVT::i8, Expand);
292 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
293 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
294 setOperationAction(ISD::MULHU, MVT::i16, Expand);
295 setOperationAction(ISD::MULHS, MVT::i16, Expand);
296 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
297 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
298 setOperationAction(ISD::MULHU, MVT::i32, Expand);
299 setOperationAction(ISD::MULHS, MVT::i32, Expand);
300 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
301 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
302 setOperationAction(ISD::MULHU, MVT::i64, Expand);
303 setOperationAction(ISD::MULHS, MVT::i64, Expand);
305 // Need to custom handle (some) common i8, i64 math ops
306 setOperationAction(ISD::ADD, MVT::i8, Custom);
307 setOperationAction(ISD::ADD, MVT::i64, Legal);
308 setOperationAction(ISD::SUB, MVT::i8, Custom);
309 setOperationAction(ISD::SUB, MVT::i64, Legal);
311 // SPU does not have BSWAP. It does have i32 support CTLZ.
312 // CTPOP has to be custom lowered.
313 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
314 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
316 setOperationAction(ISD::CTPOP, MVT::i8, Custom);
317 setOperationAction(ISD::CTPOP, MVT::i16, Custom);
318 setOperationAction(ISD::CTPOP, MVT::i32, Custom);
319 setOperationAction(ISD::CTPOP, MVT::i64, Custom);
320 setOperationAction(ISD::CTPOP, MVT::i128, Expand);
322 setOperationAction(ISD::CTTZ , MVT::i8, Expand);
323 setOperationAction(ISD::CTTZ , MVT::i16, Expand);
324 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
325 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
326 setOperationAction(ISD::CTTZ , MVT::i128, Expand);
328 setOperationAction(ISD::CTLZ , MVT::i8, Promote);
329 setOperationAction(ISD::CTLZ , MVT::i16, Promote);
330 setOperationAction(ISD::CTLZ , MVT::i32, Legal);
331 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
332 setOperationAction(ISD::CTLZ , MVT::i128, Expand);
334 // SPU has a version of select that implements (a&~c)|(b&c), just like
335 // select ought to work:
336 setOperationAction(ISD::SELECT, MVT::i8, Legal);
337 setOperationAction(ISD::SELECT, MVT::i16, Legal);
338 setOperationAction(ISD::SELECT, MVT::i32, Legal);
339 setOperationAction(ISD::SELECT, MVT::i64, Legal);
341 setOperationAction(ISD::SETCC, MVT::i8, Legal);
342 setOperationAction(ISD::SETCC, MVT::i16, Legal);
343 setOperationAction(ISD::SETCC, MVT::i32, Legal);
344 setOperationAction(ISD::SETCC, MVT::i64, Legal);
345 setOperationAction(ISD::SETCC, MVT::f64, Custom);
347 // Custom lower i128 -> i64 truncates
348 setOperationAction(ISD::TRUNCATE, MVT::i64, Custom);
350 // Custom lower i32/i64 -> i128 sign extend
351 setOperationAction(ISD::SIGN_EXTEND, MVT::i128, Custom);
353 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
354 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
355 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
356 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
357 // SPU has a legal FP -> signed INT instruction for f32, but for f64, need
358 // to expand to a libcall, hence the custom lowering:
359 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
360 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
361 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
362 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
363 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Expand);
364 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Expand);
366 // FDIV on SPU requires custom lowering
367 setOperationAction(ISD::FDIV, MVT::f64, Expand); // to libcall
369 // SPU has [U|S]INT_TO_FP for f32->i32, but not for f64->i32, f64->i64:
370 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
371 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
372 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
373 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
374 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
375 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
376 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
377 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
379 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Legal);
380 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Legal);
381 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Legal);
382 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Legal);
384 // We cannot sextinreg(i1). Expand to shifts.
385 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
387 // We want to legalize GlobalAddress and ConstantPool nodes into the
388 // appropriate instructions to materialize the address.
389 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::f128;
391 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
393 setOperationAction(ISD::GlobalAddress, VT, Custom);
394 setOperationAction(ISD::ConstantPool, VT, Custom);
395 setOperationAction(ISD::JumpTable, VT, Custom);
398 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
399 setOperationAction(ISD::VASTART , MVT::Other, Custom);
401 // Use the default implementation.
402 setOperationAction(ISD::VAARG , MVT::Other, Expand);
403 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
404 setOperationAction(ISD::VAEND , MVT::Other, Expand);
405 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
406 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
407 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
408 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Expand);
410 // Cell SPU has instructions for converting between i64 and fp.
411 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
412 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
414 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
415 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
417 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
418 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
420 // First set operation action for all vector types to expand. Then we
421 // will selectively turn on ones that can be effectively codegen'd.
422 addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
423 addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
424 addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
425 addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
426 addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
427 addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
429 // "Odd size" vector classes that we're willing to support:
430 addRegisterClass(MVT::v2i32, SPU::VECREGRegisterClass);
432 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
433 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
434 MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
436 // add/sub are legal for all supported vector VT's.
437 setOperationAction(ISD::ADD, VT, Legal);
438 setOperationAction(ISD::SUB, VT, Legal);
439 // mul has to be custom lowered.
440 setOperationAction(ISD::MUL, VT, Legal);
442 setOperationAction(ISD::AND, VT, Legal);
443 setOperationAction(ISD::OR, VT, Legal);
444 setOperationAction(ISD::XOR, VT, Legal);
445 setOperationAction(ISD::LOAD, VT, Legal);
446 setOperationAction(ISD::SELECT, VT, Legal);
447 setOperationAction(ISD::STORE, VT, Legal);
449 // These operations need to be expanded:
450 setOperationAction(ISD::SDIV, VT, Expand);
451 setOperationAction(ISD::SREM, VT, Expand);
452 setOperationAction(ISD::UDIV, VT, Expand);
453 setOperationAction(ISD::UREM, VT, Expand);
455 // Custom lower build_vector, constant pool spills, insert and
456 // extract vector elements:
457 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
458 setOperationAction(ISD::ConstantPool, VT, Custom);
459 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
460 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
461 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
462 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
465 setOperationAction(ISD::AND, MVT::v16i8, Custom);
466 setOperationAction(ISD::OR, MVT::v16i8, Custom);
467 setOperationAction(ISD::XOR, MVT::v16i8, Custom);
468 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
470 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
472 setShiftAmountType(MVT::i32);
473 setBooleanContents(ZeroOrNegativeOneBooleanContent);
475 setStackPointerRegisterToSaveRestore(SPU::R1);
477 // We have target-specific dag combine patterns for the following nodes:
478 setTargetDAGCombine(ISD::ADD);
479 setTargetDAGCombine(ISD::ZERO_EXTEND);
480 setTargetDAGCombine(ISD::SIGN_EXTEND);
481 setTargetDAGCombine(ISD::ANY_EXTEND);
483 computeRegisterProperties();
485 // Set pre-RA register scheduler default to BURR, which produces slightly
486 // better code than the default (could also be TDRR, but TargetLowering.h
487 // needs a mod to support that model):
488 setSchedulingPreference(Sched::RegPressure);
492 SPUTargetLowering::getTargetNodeName(unsigned Opcode) const
494 if (node_names.empty()) {
495 node_names[(unsigned) SPUISD::RET_FLAG] = "SPUISD::RET_FLAG";
496 node_names[(unsigned) SPUISD::Hi] = "SPUISD::Hi";
497 node_names[(unsigned) SPUISD::Lo] = "SPUISD::Lo";
498 node_names[(unsigned) SPUISD::PCRelAddr] = "SPUISD::PCRelAddr";
499 node_names[(unsigned) SPUISD::AFormAddr] = "SPUISD::AFormAddr";
500 node_names[(unsigned) SPUISD::IndirectAddr] = "SPUISD::IndirectAddr";
501 node_names[(unsigned) SPUISD::LDRESULT] = "SPUISD::LDRESULT";
502 node_names[(unsigned) SPUISD::CALL] = "SPUISD::CALL";
503 node_names[(unsigned) SPUISD::SHUFB] = "SPUISD::SHUFB";
504 node_names[(unsigned) SPUISD::SHUFFLE_MASK] = "SPUISD::SHUFFLE_MASK";
505 node_names[(unsigned) SPUISD::CNTB] = "SPUISD::CNTB";
506 node_names[(unsigned) SPUISD::PREFSLOT2VEC] = "SPUISD::PREFSLOT2VEC";
507 node_names[(unsigned) SPUISD::VEC2PREFSLOT] = "SPUISD::VEC2PREFSLOT";
508 node_names[(unsigned) SPUISD::SHLQUAD_L_BITS] = "SPUISD::SHLQUAD_L_BITS";
509 node_names[(unsigned) SPUISD::SHLQUAD_L_BYTES] = "SPUISD::SHLQUAD_L_BYTES";
510 node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL";
511 node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR";
512 node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
513 node_names[(unsigned) SPUISD::ROTBYTES_LEFT_BITS] =
514 "SPUISD::ROTBYTES_LEFT_BITS";
515 node_names[(unsigned) SPUISD::SELECT_MASK] = "SPUISD::SELECT_MASK";
516 node_names[(unsigned) SPUISD::SELB] = "SPUISD::SELB";
517 node_names[(unsigned) SPUISD::ADD64_MARKER] = "SPUISD::ADD64_MARKER";
518 node_names[(unsigned) SPUISD::SUB64_MARKER] = "SPUISD::SUB64_MARKER";
519 node_names[(unsigned) SPUISD::MUL64_MARKER] = "SPUISD::MUL64_MARKER";
522 std::map<unsigned, const char *>::iterator i = node_names.find(Opcode);
524 return ((i != node_names.end()) ? i->second : 0);
527 /// getFunctionAlignment - Return the Log2 alignment of this function.
528 unsigned SPUTargetLowering::getFunctionAlignment(const Function *) const {
532 //===----------------------------------------------------------------------===//
533 // Return the Cell SPU's SETCC result type
534 //===----------------------------------------------------------------------===//
536 MVT::SimpleValueType SPUTargetLowering::getSetCCResultType(EVT VT) const {
537 // i16 and i32 are valid SETCC result types
538 return ((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) ?
539 VT.getSimpleVT().SimpleTy :
543 //===----------------------------------------------------------------------===//
544 // Calling convention code:
545 //===----------------------------------------------------------------------===//
547 #include "SPUGenCallingConv.inc"
549 //===----------------------------------------------------------------------===//
550 // LowerOperation implementation
551 //===----------------------------------------------------------------------===//
553 /// Custom lower loads for CellSPU
555 All CellSPU loads and stores are aligned to 16-byte boundaries, so for elements
556 within a 16-byte block, we have to rotate to extract the requested element.
558 For extending loads, we also want to ensure that the following sequence is
559 emitted, e.g. for MVT::f32 extending load to MVT::f64:
563 %2 v16i8,ch = rotate %1
564 %3 v4f8, ch = bitconvert %2
565 %4 f32 = vec2perfslot %3
566 %5 f64 = fp_extend %4
570 LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
571 LoadSDNode *LN = cast<LoadSDNode>(Op);
572 SDValue the_chain = LN->getChain();
573 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
574 EVT InVT = LN->getMemoryVT();
575 EVT OutVT = Op.getValueType();
576 ISD::LoadExtType ExtType = LN->getExtensionType();
577 unsigned alignment = LN->getAlignment();
578 const valtype_map_s *vtm = getValueTypeMapEntry(InVT);
579 DebugLoc dl = Op.getDebugLoc();
581 switch (LN->getAddressingMode()) {
582 case ISD::UNINDEXED: {
584 SDValue basePtr = LN->getBasePtr();
587 if (alignment == 16) {
590 // Special cases for a known aligned load to simplify the base pointer
591 // and the rotation amount:
592 if (basePtr.getOpcode() == ISD::ADD
593 && (CN = dyn_cast<ConstantSDNode > (basePtr.getOperand(1))) != 0) {
594 // Known offset into basePtr
595 int64_t offset = CN->getSExtValue();
596 int64_t rotamt = int64_t((offset & 0xf) - vtm->prefslot_byte);
601 rotate = DAG.getConstant(rotamt, MVT::i16);
603 // Simplify the base pointer for this case:
604 basePtr = basePtr.getOperand(0);
605 if ((offset & ~0xf) > 0) {
606 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
608 DAG.getConstant((offset & ~0xf), PtrVT));
610 } else if ((basePtr.getOpcode() == SPUISD::AFormAddr)
611 || (basePtr.getOpcode() == SPUISD::IndirectAddr
612 && basePtr.getOperand(0).getOpcode() == SPUISD::Hi
613 && basePtr.getOperand(1).getOpcode() == SPUISD::Lo)) {
614 // Plain aligned a-form address: rotate into preferred slot
615 // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
616 int64_t rotamt = -vtm->prefslot_byte;
619 rotate = DAG.getConstant(rotamt, MVT::i16);
621 // Offset the rotate amount by the basePtr and the preferred slot
623 int64_t rotamt = -vtm->prefslot_byte;
626 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
628 DAG.getConstant(rotamt, PtrVT));
631 // Unaligned load: must be more pessimistic about addressing modes:
632 if (basePtr.getOpcode() == ISD::ADD) {
633 MachineFunction &MF = DAG.getMachineFunction();
634 MachineRegisterInfo &RegInfo = MF.getRegInfo();
635 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
638 SDValue Op0 = basePtr.getOperand(0);
639 SDValue Op1 = basePtr.getOperand(1);
641 if (isa<ConstantSDNode>(Op1)) {
642 // Convert the (add <ptr>, <const>) to an indirect address contained
643 // in a register. Note that this is done because we need to avoid
644 // creating a 0(reg) d-form address due to the SPU's block loads.
645 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
646 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
647 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
649 // Convert the (add <arg1>, <arg2>) to an indirect address, which
650 // will likely be lowered as a reg(reg) x-form address.
651 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
654 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
656 DAG.getConstant(0, PtrVT));
659 // Offset the rotate amount by the basePtr and the preferred slot
661 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
663 DAG.getConstant(-vtm->prefslot_byte, PtrVT));
666 // Re-emit as a v16i8 vector load
667 result = DAG.getLoad(MVT::v16i8, dl, the_chain, basePtr,
668 LN->getSrcValue(), LN->getSrcValueOffset(),
669 LN->isVolatile(), LN->isNonTemporal(), 16);
672 the_chain = result.getValue(1);
674 // Rotate into the preferred slot:
675 result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::v16i8,
676 result.getValue(0), rotate);
678 // Convert the loaded v16i8 vector to the appropriate vector type
679 // specified by the operand:
680 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
681 InVT, (128 / InVT.getSizeInBits()));
682 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
683 DAG.getNode(ISD::BIT_CONVERT, dl, vecVT, result));
685 // Handle extending loads by extending the scalar result:
686 if (ExtType == ISD::SEXTLOAD) {
687 result = DAG.getNode(ISD::SIGN_EXTEND, dl, OutVT, result);
688 } else if (ExtType == ISD::ZEXTLOAD) {
689 result = DAG.getNode(ISD::ZERO_EXTEND, dl, OutVT, result);
690 } else if (ExtType == ISD::EXTLOAD) {
691 unsigned NewOpc = ISD::ANY_EXTEND;
693 if (OutVT.isFloatingPoint())
694 NewOpc = ISD::FP_EXTEND;
696 result = DAG.getNode(NewOpc, dl, OutVT, result);
699 SDVTList retvts = DAG.getVTList(OutVT, MVT::Other);
700 SDValue retops[2] = {
705 result = DAG.getNode(SPUISD::LDRESULT, dl, retvts,
706 retops, sizeof(retops) / sizeof(retops[0]));
713 case ISD::LAST_INDEXED_MODE:
715 report_fatal_error("LowerLOAD: Got a LoadSDNode with an addr mode other "
717 Twine((unsigned)LN->getAddressingMode()));
725 /// Custom lower stores for CellSPU
727 All CellSPU stores are aligned to 16-byte boundaries, so for elements
728 within a 16-byte block, we have to generate a shuffle to insert the
729 requested element into its place, then store the resulting block.
732 LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
733 StoreSDNode *SN = cast<StoreSDNode>(Op);
734 SDValue Value = SN->getValue();
735 EVT VT = Value.getValueType();
736 EVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
737 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
738 DebugLoc dl = Op.getDebugLoc();
739 unsigned alignment = SN->getAlignment();
741 switch (SN->getAddressingMode()) {
742 case ISD::UNINDEXED: {
743 // The vector type we really want to load from the 16-byte chunk.
744 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
745 VT, (128 / VT.getSizeInBits()));
747 SDValue alignLoadVec;
748 SDValue basePtr = SN->getBasePtr();
749 SDValue the_chain = SN->getChain();
750 SDValue insertEltOffs;
752 if (alignment == 16) {
755 // Special cases for a known aligned load to simplify the base pointer
756 // and insertion byte:
757 if (basePtr.getOpcode() == ISD::ADD
758 && (CN = dyn_cast<ConstantSDNode>(basePtr.getOperand(1))) != 0) {
759 // Known offset into basePtr
760 int64_t offset = CN->getSExtValue();
762 // Simplify the base pointer for this case:
763 basePtr = basePtr.getOperand(0);
764 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
766 DAG.getConstant((offset & 0xf), PtrVT));
768 if ((offset & ~0xf) > 0) {
769 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
771 DAG.getConstant((offset & ~0xf), PtrVT));
774 // Otherwise, assume it's at byte 0 of basePtr
775 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
777 DAG.getConstant(0, PtrVT));
780 // Unaligned load: must be more pessimistic about addressing modes:
781 if (basePtr.getOpcode() == ISD::ADD) {
782 MachineFunction &MF = DAG.getMachineFunction();
783 MachineRegisterInfo &RegInfo = MF.getRegInfo();
784 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
787 SDValue Op0 = basePtr.getOperand(0);
788 SDValue Op1 = basePtr.getOperand(1);
790 if (isa<ConstantSDNode>(Op1)) {
791 // Convert the (add <ptr>, <const>) to an indirect address contained
792 // in a register. Note that this is done because we need to avoid
793 // creating a 0(reg) d-form address due to the SPU's block loads.
794 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
795 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
796 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
798 // Convert the (add <arg1>, <arg2>) to an indirect address, which
799 // will likely be lowered as a reg(reg) x-form address.
800 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
803 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
805 DAG.getConstant(0, PtrVT));
808 // Insertion point is solely determined by basePtr's contents
809 insertEltOffs = DAG.getNode(ISD::ADD, dl, PtrVT,
811 DAG.getConstant(0, PtrVT));
814 // Re-emit as a v16i8 vector load
815 alignLoadVec = DAG.getLoad(MVT::v16i8, dl, the_chain, basePtr,
816 SN->getSrcValue(), SN->getSrcValueOffset(),
817 SN->isVolatile(), SN->isNonTemporal(), 16);
820 the_chain = alignLoadVec.getValue(1);
822 LoadSDNode *LN = cast<LoadSDNode>(alignLoadVec);
823 SDValue theValue = SN->getValue();
827 && (theValue.getOpcode() == ISD::AssertZext
828 || theValue.getOpcode() == ISD::AssertSext)) {
829 // Drill down and get the value for zero- and sign-extended
831 theValue = theValue.getOperand(0);
834 // If the base pointer is already a D-form address, then just create
835 // a new D-form address with a slot offset and the orignal base pointer.
836 // Otherwise generate a D-form address with the slot offset relative
837 // to the stack pointer, which is always aligned.
839 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
840 errs() << "CellSPU LowerSTORE: basePtr = ";
841 basePtr.getNode()->dump(&DAG);
846 SDValue insertEltOp =
847 DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT, insertEltOffs);
848 SDValue vectorizeOp =
849 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT, theValue);
851 result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
852 vectorizeOp, alignLoadVec,
853 DAG.getNode(ISD::BIT_CONVERT, dl,
854 MVT::v4i32, insertEltOp));
856 result = DAG.getStore(the_chain, dl, result, basePtr,
857 LN->getSrcValue(), LN->getSrcValueOffset(),
858 LN->isVolatile(), LN->isNonTemporal(),
861 #if 0 && !defined(NDEBUG)
862 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
863 const SDValue ¤tRoot = DAG.getRoot();
866 errs() << "------- CellSPU:LowerStore result:\n";
868 errs() << "-------\n";
869 DAG.setRoot(currentRoot);
880 case ISD::LAST_INDEXED_MODE:
882 report_fatal_error("LowerLOAD: Got a LoadSDNode with an addr mode other "
884 Twine((unsigned)SN->getAddressingMode()));
892 //! Generate the address of a constant pool entry.
894 LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
895 EVT PtrVT = Op.getValueType();
896 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
897 const Constant *C = CP->getConstVal();
898 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
899 SDValue Zero = DAG.getConstant(0, PtrVT);
900 const TargetMachine &TM = DAG.getTarget();
901 // FIXME there is no actual debug info here
902 DebugLoc dl = Op.getDebugLoc();
904 if (TM.getRelocationModel() == Reloc::Static) {
905 if (!ST->usingLargeMem()) {
906 // Just return the SDValue with the constant pool address in it.
907 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, CPI, Zero);
909 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, CPI, Zero);
910 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, CPI, Zero);
911 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
915 llvm_unreachable("LowerConstantPool: Relocation model other than static"
920 //! Alternate entry point for generating the address of a constant pool entry
922 SPU::LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUTargetMachine &TM) {
923 return ::LowerConstantPool(Op, DAG, TM.getSubtargetImpl());
927 LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
928 EVT PtrVT = Op.getValueType();
929 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
930 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
931 SDValue Zero = DAG.getConstant(0, PtrVT);
932 const TargetMachine &TM = DAG.getTarget();
933 // FIXME there is no actual debug info here
934 DebugLoc dl = Op.getDebugLoc();
936 if (TM.getRelocationModel() == Reloc::Static) {
937 if (!ST->usingLargeMem()) {
938 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, JTI, Zero);
940 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, JTI, Zero);
941 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, JTI, Zero);
942 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
946 llvm_unreachable("LowerJumpTable: Relocation model other than static"
952 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
953 EVT PtrVT = Op.getValueType();
954 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
955 const GlobalValue *GV = GSDN->getGlobal();
956 SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
957 const TargetMachine &TM = DAG.getTarget();
958 SDValue Zero = DAG.getConstant(0, PtrVT);
959 // FIXME there is no actual debug info here
960 DebugLoc dl = Op.getDebugLoc();
962 if (TM.getRelocationModel() == Reloc::Static) {
963 if (!ST->usingLargeMem()) {
964 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, GA, Zero);
966 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, GA, Zero);
967 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, GA, Zero);
968 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
971 report_fatal_error("LowerGlobalAddress: Relocation model other than static"
979 //! Custom lower double precision floating point constants
981 LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
982 EVT VT = Op.getValueType();
983 // FIXME there is no actual debug info here
984 DebugLoc dl = Op.getDebugLoc();
986 if (VT == MVT::f64) {
987 ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.getNode());
990 "LowerConstantFP: Node is not ConstantFPSDNode");
992 uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
993 SDValue T = DAG.getConstant(dbits, MVT::i64);
994 SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
995 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
996 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Tvec));
1003 SPUTargetLowering::LowerFormalArguments(SDValue Chain,
1004 CallingConv::ID CallConv, bool isVarArg,
1005 const SmallVectorImpl<ISD::InputArg>
1007 DebugLoc dl, SelectionDAG &DAG,
1008 SmallVectorImpl<SDValue> &InVals)
1011 MachineFunction &MF = DAG.getMachineFunction();
1012 MachineFrameInfo *MFI = MF.getFrameInfo();
1013 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1014 SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
1016 const unsigned *ArgRegs = SPURegisterInfo::getArgRegs();
1017 const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
1019 unsigned ArgOffset = SPUFrameInfo::minStackSize();
1020 unsigned ArgRegIdx = 0;
1021 unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
1023 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1025 // Add DAG nodes to load the arguments or copy them out of registers.
1026 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
1027 EVT ObjectVT = Ins[ArgNo].VT;
1028 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1031 if (ArgRegIdx < NumArgRegs) {
1032 const TargetRegisterClass *ArgRegClass;
1034 switch (ObjectVT.getSimpleVT().SimpleTy) {
1036 report_fatal_error("LowerFormalArguments Unhandled argument type: " +
1037 Twine(ObjectVT.getEVTString()));
1039 ArgRegClass = &SPU::R8CRegClass;
1042 ArgRegClass = &SPU::R16CRegClass;
1045 ArgRegClass = &SPU::R32CRegClass;
1048 ArgRegClass = &SPU::R64CRegClass;
1051 ArgRegClass = &SPU::GPRCRegClass;
1054 ArgRegClass = &SPU::R32FPRegClass;
1057 ArgRegClass = &SPU::R64FPRegClass;
1065 ArgRegClass = &SPU::VECREGRegClass;
1069 unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
1070 RegInfo.addLiveIn(ArgRegs[ArgRegIdx], VReg);
1071 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
1074 // We need to load the argument to a virtual register if we determined
1075 // above that we ran out of physical registers of the appropriate type
1076 // or we're forced to do vararg
1077 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true, false);
1078 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1079 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0, false, false, 0);
1080 ArgOffset += StackSlotSize;
1083 InVals.push_back(ArgVal);
1085 Chain = ArgVal.getOperand(0);
1090 // unsigned int ptr_size = PtrVT.getSizeInBits() / 8;
1091 // We will spill (79-3)+1 registers to the stack
1092 SmallVector<SDValue, 79-3+1> MemOps;
1094 // Create the frame slot
1096 for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
1097 FuncInfo->setVarArgsFrameIndex(
1098 MFI->CreateFixedObject(StackSlotSize, ArgOffset,
1100 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1101 unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::R32CRegClass);
1102 SDValue ArgVal = DAG.getRegister(VReg, MVT::v16i8);
1103 SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, NULL, 0,
1105 Chain = Store.getOperand(0);
1106 MemOps.push_back(Store);
1108 // Increment address by stack slot size for the next stored argument
1109 ArgOffset += StackSlotSize;
1111 if (!MemOps.empty())
1112 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1113 &MemOps[0], MemOps.size());
1119 /// isLSAAddress - Return the immediate to use if the specified
1120 /// value is representable as a LSA address.
1121 static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
1122 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1125 int Addr = C->getZExtValue();
1126 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1127 (Addr << 14 >> 14) != Addr)
1128 return 0; // Top 14 bits have to be sext of immediate.
1130 return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode();
1134 SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1135 CallingConv::ID CallConv, bool isVarArg,
1137 const SmallVectorImpl<ISD::OutputArg> &Outs,
1138 const SmallVectorImpl<ISD::InputArg> &Ins,
1139 DebugLoc dl, SelectionDAG &DAG,
1140 SmallVectorImpl<SDValue> &InVals) const {
1141 // CellSPU target does not yet support tail call optimization.
1144 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
1145 unsigned NumOps = Outs.size();
1146 unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
1147 const unsigned *ArgRegs = SPURegisterInfo::getArgRegs();
1148 const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
1150 // Handy pointer type
1151 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1153 // Set up a copy of the stack pointer for use loading and storing any
1154 // arguments that may not fit in the registers available for argument
1156 SDValue StackPtr = DAG.getRegister(SPU::R1, MVT::i32);
1158 // Figure out which arguments are going to go in registers, and which in
1160 unsigned ArgOffset = SPUFrameInfo::minStackSize(); // Just below [LR]
1161 unsigned ArgRegIdx = 0;
1163 // Keep track of registers passing arguments
1164 std::vector<std::pair<unsigned, SDValue> > RegsToPass;
1165 // And the arguments passed on the stack
1166 SmallVector<SDValue, 8> MemOpChains;
1168 for (unsigned i = 0; i != NumOps; ++i) {
1169 SDValue Arg = Outs[i].Val;
1171 // PtrOff will be used to store the current argument to the stack if a
1172 // register cannot be found for it.
1173 SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1174 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
1176 switch (Arg.getValueType().getSimpleVT().SimpleTy) {
1177 default: llvm_unreachable("Unexpected ValueType for argument!");
1183 if (ArgRegIdx != NumArgRegs) {
1184 RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
1186 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
1188 ArgOffset += StackSlotSize;
1193 if (ArgRegIdx != NumArgRegs) {
1194 RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
1196 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
1198 ArgOffset += StackSlotSize;
1207 if (ArgRegIdx != NumArgRegs) {
1208 RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
1210 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
1212 ArgOffset += StackSlotSize;
1218 // Accumulate how many bytes are to be pushed on the stack, including the
1219 // linkage area, and parameter passing area. According to the SPU ABI,
1220 // we minimally need space for [LR] and [SP].
1221 unsigned NumStackBytes = ArgOffset - SPUFrameInfo::minStackSize();
1223 // Insert a call sequence start
1224 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumStackBytes,
1227 if (!MemOpChains.empty()) {
1228 // Adjust the stack pointer for the stack arguments.
1229 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1230 &MemOpChains[0], MemOpChains.size());
1233 // Build a sequence of copy-to-reg nodes chained together with token chain
1234 // and flag operands which copy the outgoing args into the appropriate regs.
1236 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1237 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1238 RegsToPass[i].second, InFlag);
1239 InFlag = Chain.getValue(1);
1242 SmallVector<SDValue, 8> Ops;
1243 unsigned CallOpc = SPUISD::CALL;
1245 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1246 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1247 // node so that legalize doesn't hack it.
1248 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1249 const GlobalValue *GV = G->getGlobal();
1250 EVT CalleeVT = Callee.getValueType();
1251 SDValue Zero = DAG.getConstant(0, PtrVT);
1252 SDValue GA = DAG.getTargetGlobalAddress(GV, CalleeVT);
1254 if (!ST->usingLargeMem()) {
1255 // Turn calls to targets that are defined (i.e., have bodies) into BRSL
1256 // style calls, otherwise, external symbols are BRASL calls. This assumes
1257 // that declared/defined symbols are in the same compilation unit and can
1258 // be reached through PC-relative jumps.
1261 // This may be an unsafe assumption for JIT and really large compilation
1263 if (GV->isDeclaration()) {
1264 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, GA, Zero);
1266 Callee = DAG.getNode(SPUISD::PCRelAddr, dl, CalleeVT, GA, Zero);
1269 // "Large memory" mode: Turn all calls into indirect calls with a X-form
1271 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, GA, Zero);
1273 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1274 EVT CalleeVT = Callee.getValueType();
1275 SDValue Zero = DAG.getConstant(0, PtrVT);
1276 SDValue ExtSym = DAG.getTargetExternalSymbol(S->getSymbol(),
1277 Callee.getValueType());
1279 if (!ST->usingLargeMem()) {
1280 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, ExtSym, Zero);
1282 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, ExtSym, Zero);
1284 } else if (SDNode *Dest = isLSAAddress(Callee, DAG)) {
1285 // If this is an absolute destination address that appears to be a legal
1286 // local store address, use the munged value.
1287 Callee = SDValue(Dest, 0);
1290 Ops.push_back(Chain);
1291 Ops.push_back(Callee);
1293 // Add argument registers to the end of the list so that they are known live
1295 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1296 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1297 RegsToPass[i].second.getValueType()));
1299 if (InFlag.getNode())
1300 Ops.push_back(InFlag);
1301 // Returns a chain and a flag for retval copy to use.
1302 Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
1303 &Ops[0], Ops.size());
1304 InFlag = Chain.getValue(1);
1306 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumStackBytes, true),
1307 DAG.getIntPtrConstant(0, true), InFlag);
1309 InFlag = Chain.getValue(1);
1311 // If the function returns void, just return the chain.
1315 // If the call has results, copy the values out of the ret val registers.
1316 switch (Ins[0].VT.getSimpleVT().SimpleTy) {
1317 default: llvm_unreachable("Unexpected ret value!");
1318 case MVT::Other: break;
1320 if (Ins.size() > 1 && Ins[1].VT == MVT::i32) {
1321 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R4,
1322 MVT::i32, InFlag).getValue(1);
1323 InVals.push_back(Chain.getValue(0));
1324 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32,
1325 Chain.getValue(2)).getValue(1);
1326 InVals.push_back(Chain.getValue(0));
1328 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32,
1329 InFlag).getValue(1);
1330 InVals.push_back(Chain.getValue(0));
1345 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, Ins[0].VT,
1346 InFlag).getValue(1);
1347 InVals.push_back(Chain.getValue(0));
1355 SPUTargetLowering::LowerReturn(SDValue Chain,
1356 CallingConv::ID CallConv, bool isVarArg,
1357 const SmallVectorImpl<ISD::OutputArg> &Outs,
1358 DebugLoc dl, SelectionDAG &DAG) const {
1360 SmallVector<CCValAssign, 16> RVLocs;
1361 CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
1362 RVLocs, *DAG.getContext());
1363 CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
1365 // If this is the first return lowered for this function, add the regs to the
1366 // liveout set for the function.
1367 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1368 for (unsigned i = 0; i != RVLocs.size(); ++i)
1369 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1374 // Copy the result values into the output registers.
1375 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1376 CCValAssign &VA = RVLocs[i];
1377 assert(VA.isRegLoc() && "Can only return in registers!");
1378 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1380 Flag = Chain.getValue(1);
1384 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1386 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain);
1390 //===----------------------------------------------------------------------===//
1391 // Vector related lowering:
1392 //===----------------------------------------------------------------------===//
1394 static ConstantSDNode *
1395 getVecImm(SDNode *N) {
1396 SDValue OpVal(0, 0);
1398 // Check to see if this buildvec has a single non-undef value in its elements.
1399 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1400 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
1401 if (OpVal.getNode() == 0)
1402 OpVal = N->getOperand(i);
1403 else if (OpVal != N->getOperand(i))
1407 if (OpVal.getNode() != 0) {
1408 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1416 /// get_vec_i18imm - Test if this vector is a vector filled with the same value
1417 /// and the value fits into an unsigned 18-bit constant, and if so, return the
1419 SDValue SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
1421 if (ConstantSDNode *CN = getVecImm(N)) {
1422 uint64_t Value = CN->getZExtValue();
1423 if (ValueType == MVT::i64) {
1424 uint64_t UValue = CN->getZExtValue();
1425 uint32_t upper = uint32_t(UValue >> 32);
1426 uint32_t lower = uint32_t(UValue);
1429 Value = Value >> 32;
1431 if (Value <= 0x3ffff)
1432 return DAG.getTargetConstant(Value, ValueType);
1438 /// get_vec_i16imm - Test if this vector is a vector filled with the same value
1439 /// and the value fits into a signed 16-bit constant, and if so, return the
1441 SDValue SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
1443 if (ConstantSDNode *CN = getVecImm(N)) {
1444 int64_t Value = CN->getSExtValue();
1445 if (ValueType == MVT::i64) {
1446 uint64_t UValue = CN->getZExtValue();
1447 uint32_t upper = uint32_t(UValue >> 32);
1448 uint32_t lower = uint32_t(UValue);
1451 Value = Value >> 32;
1453 if (Value >= -(1 << 15) && Value <= ((1 << 15) - 1)) {
1454 return DAG.getTargetConstant(Value, ValueType);
1461 /// get_vec_i10imm - Test if this vector is a vector filled with the same value
1462 /// and the value fits into a signed 10-bit constant, and if so, return the
1464 SDValue SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
1466 if (ConstantSDNode *CN = getVecImm(N)) {
1467 int64_t Value = CN->getSExtValue();
1468 if (ValueType == MVT::i64) {
1469 uint64_t UValue = CN->getZExtValue();
1470 uint32_t upper = uint32_t(UValue >> 32);
1471 uint32_t lower = uint32_t(UValue);
1474 Value = Value >> 32;
1476 if (isInt<10>(Value))
1477 return DAG.getTargetConstant(Value, ValueType);
1483 /// get_vec_i8imm - Test if this vector is a vector filled with the same value
1484 /// and the value fits into a signed 8-bit constant, and if so, return the
1487 /// @note: The incoming vector is v16i8 because that's the only way we can load
1488 /// constant vectors. Thus, we test to see if the upper and lower bytes are the
1490 SDValue SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
1492 if (ConstantSDNode *CN = getVecImm(N)) {
1493 int Value = (int) CN->getZExtValue();
1494 if (ValueType == MVT::i16
1495 && Value <= 0xffff /* truncated from uint64_t */
1496 && ((short) Value >> 8) == ((short) Value & 0xff))
1497 return DAG.getTargetConstant(Value & 0xff, ValueType);
1498 else if (ValueType == MVT::i8
1499 && (Value & 0xff) == Value)
1500 return DAG.getTargetConstant(Value, ValueType);
1506 /// get_ILHUvec_imm - Test if this vector is a vector filled with the same value
1507 /// and the value fits into a signed 16-bit constant, and if so, return the
1509 SDValue SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
1511 if (ConstantSDNode *CN = getVecImm(N)) {
1512 uint64_t Value = CN->getZExtValue();
1513 if ((ValueType == MVT::i32
1514 && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
1515 || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
1516 return DAG.getTargetConstant(Value >> 16, ValueType);
1522 /// get_v4i32_imm - Catch-all for general 32-bit constant vectors
1523 SDValue SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) {
1524 if (ConstantSDNode *CN = getVecImm(N)) {
1525 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i32);
1531 /// get_v4i32_imm - Catch-all for general 64-bit constant vectors
1532 SDValue SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) {
1533 if (ConstantSDNode *CN = getVecImm(N)) {
1534 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i64);
1540 //! Lower a BUILD_VECTOR instruction creatively:
1542 LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
1543 EVT VT = Op.getValueType();
1544 EVT EltVT = VT.getVectorElementType();
1545 DebugLoc dl = Op.getDebugLoc();
1546 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(Op.getNode());
1547 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerBUILD_VECTOR");
1548 unsigned minSplatBits = EltVT.getSizeInBits();
1550 if (minSplatBits < 16)
1553 APInt APSplatBits, APSplatUndef;
1554 unsigned SplatBitSize;
1557 if (!BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
1558 HasAnyUndefs, minSplatBits)
1559 || minSplatBits < SplatBitSize)
1560 return SDValue(); // Wasn't a constant vector or splat exceeded min
1562 uint64_t SplatBits = APSplatBits.getZExtValue();
1564 switch (VT.getSimpleVT().SimpleTy) {
1566 report_fatal_error("CellSPU: Unhandled VT in LowerBUILD_VECTOR, VT = " +
1567 Twine(VT.getEVTString()));
1570 uint32_t Value32 = uint32_t(SplatBits);
1571 assert(SplatBitSize == 32
1572 && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
1573 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1574 SDValue T = DAG.getConstant(Value32, MVT::i32);
1575 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,
1576 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
1580 uint64_t f64val = uint64_t(SplatBits);
1581 assert(SplatBitSize == 64
1582 && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
1583 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1584 SDValue T = DAG.getConstant(f64val, MVT::i64);
1585 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64,
1586 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
1590 // 8-bit constants have to be expanded to 16-bits
1591 unsigned short Value16 = SplatBits /* | (SplatBits << 8) */;
1592 SmallVector<SDValue, 8> Ops;
1594 Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
1595 return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
1596 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
1599 unsigned short Value16 = SplatBits;
1600 SDValue T = DAG.getConstant(Value16, EltVT);
1601 SmallVector<SDValue, 8> Ops;
1604 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
1607 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1608 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T);
1611 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1612 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T);
1615 return SPU::LowerV2I64Splat(VT, DAG, SplatBits, dl);
1625 SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
1627 uint32_t upper = uint32_t(SplatVal >> 32);
1628 uint32_t lower = uint32_t(SplatVal);
1630 if (upper == lower) {
1631 // Magic constant that can be matched by IL, ILA, et. al.
1632 SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
1633 return DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1634 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1635 Val, Val, Val, Val));
1637 bool upper_special, lower_special;
1639 // NOTE: This code creates common-case shuffle masks that can be easily
1640 // detected as common expressions. It is not attempting to create highly
1641 // specialized masks to replace any and all 0's, 0xff's and 0x80's.
1643 // Detect if the upper or lower half is a special shuffle mask pattern:
1644 upper_special = (upper == 0 || upper == 0xffffffff || upper == 0x80000000);
1645 lower_special = (lower == 0 || lower == 0xffffffff || lower == 0x80000000);
1647 // Both upper and lower are special, lower to a constant pool load:
1648 if (lower_special && upper_special) {
1649 SDValue SplatValCN = DAG.getConstant(SplatVal, MVT::i64);
1650 return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64,
1651 SplatValCN, SplatValCN);
1656 SmallVector<SDValue, 16> ShufBytes;
1659 // Create lower vector if not a special pattern
1660 if (!lower_special) {
1661 SDValue LO32C = DAG.getConstant(lower, MVT::i32);
1662 LO32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1663 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1664 LO32C, LO32C, LO32C, LO32C));
1667 // Create upper vector if not a special pattern
1668 if (!upper_special) {
1669 SDValue HI32C = DAG.getConstant(upper, MVT::i32);
1670 HI32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1671 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1672 HI32C, HI32C, HI32C, HI32C));
1675 // If either upper or lower are special, then the two input operands are
1676 // the same (basically, one of them is a "don't care")
1682 for (int i = 0; i < 4; ++i) {
1684 for (int j = 0; j < 4; ++j) {
1686 bool process_upper, process_lower;
1688 process_upper = (upper_special && (i & 1) == 0);
1689 process_lower = (lower_special && (i & 1) == 1);
1691 if (process_upper || process_lower) {
1692 if ((process_upper && upper == 0)
1693 || (process_lower && lower == 0))
1695 else if ((process_upper && upper == 0xffffffff)
1696 || (process_lower && lower == 0xffffffff))
1698 else if ((process_upper && upper == 0x80000000)
1699 || (process_lower && lower == 0x80000000))
1700 val |= (j == 0 ? 0xe0 : 0x80);
1702 val |= i * 4 + j + ((i & 1) * 16);
1705 ShufBytes.push_back(DAG.getConstant(val, MVT::i32));
1708 return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32,
1709 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1710 &ShufBytes[0], ShufBytes.size()));
1714 /// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3) to something on
1715 /// which the Cell can operate. The code inspects V3 to ascertain whether the
1716 /// permutation vector, V3, is monotonically increasing with one "exception"
1717 /// element, e.g., (0, 1, _, 3). If this is the case, then generate a
1718 /// SHUFFLE_MASK synthetic instruction. Otherwise, spill V3 to the constant pool.
1719 /// In either case, the net result is going to eventually invoke SHUFB to
1720 /// permute/shuffle the bytes from V1 and V2.
1722 /// SHUFFLE_MASK is eventually selected as one of the C*D instructions, generate
1723 /// control word for byte/halfword/word insertion. This takes care of a single
1724 /// element move from V2 into V1.
1726 /// SPUISD::SHUFB is eventually selected as Cell's <i>shufb</i> instructions.
1727 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
1728 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
1729 SDValue V1 = Op.getOperand(0);
1730 SDValue V2 = Op.getOperand(1);
1731 DebugLoc dl = Op.getDebugLoc();
1733 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1735 // If we have a single element being moved from V1 to V2, this can be handled
1736 // using the C*[DX] compute mask instructions, but the vector elements have
1737 // to be monotonically increasing with one exception element.
1738 EVT VecVT = V1.getValueType();
1739 EVT EltVT = VecVT.getVectorElementType();
1740 unsigned EltsFromV2 = 0;
1742 unsigned V2EltIdx0 = 0;
1743 unsigned CurrElt = 0;
1744 unsigned MaxElts = VecVT.getVectorNumElements();
1745 unsigned PrevElt = 0;
1747 bool monotonic = true;
1750 if (EltVT == MVT::i8) {
1752 } else if (EltVT == MVT::i16) {
1754 } else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
1756 } else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
1759 llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
1761 for (unsigned i = 0; i != MaxElts; ++i) {
1762 if (SVN->getMaskElt(i) < 0)
1765 unsigned SrcElt = SVN->getMaskElt(i);
1768 if (SrcElt >= V2EltIdx0) {
1769 if (1 >= (++EltsFromV2)) {
1770 V2Elt = (V2EltIdx0 - SrcElt) << 2;
1772 } else if (CurrElt != SrcElt) {
1780 if (PrevElt > 0 && SrcElt < MaxElts) {
1781 if ((PrevElt == SrcElt - 1)
1782 || (PrevElt == MaxElts - 1 && SrcElt == 0)) {
1789 } else if (PrevElt == 0) {
1790 // First time through, need to keep track of previous element
1793 // This isn't a rotation, takes elements from vector 2
1799 if (EltsFromV2 == 1 && monotonic) {
1800 // Compute mask and shuffle
1801 MachineFunction &MF = DAG.getMachineFunction();
1802 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1803 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
1804 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1805 // Initialize temporary register to 0
1806 SDValue InitTempReg =
1807 DAG.getCopyToReg(DAG.getEntryNode(), dl, VReg, DAG.getConstant(0, PtrVT));
1808 // Copy register's contents as index in SHUFFLE_MASK:
1809 SDValue ShufMaskOp =
1810 DAG.getNode(SPUISD::SHUFFLE_MASK, dl, MVT::v4i32,
1811 DAG.getTargetConstant(V2Elt, MVT::i32),
1812 DAG.getCopyFromReg(InitTempReg, dl, VReg, PtrVT));
1813 // Use shuffle mask in SHUFB synthetic instruction:
1814 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
1816 } else if (rotate) {
1817 int rotamt = (MaxElts - V0Elt) * EltVT.getSizeInBits()/8;
1819 return DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, V1.getValueType(),
1820 V1, DAG.getConstant(rotamt, MVT::i16));
1822 // Convert the SHUFFLE_VECTOR mask's input element units to the
1824 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
1826 SmallVector<SDValue, 16> ResultMask;
1827 for (unsigned i = 0, e = MaxElts; i != e; ++i) {
1828 unsigned SrcElt = SVN->getMaskElt(i) < 0 ? 0 : SVN->getMaskElt(i);
1830 for (unsigned j = 0; j < BytesPerElement; ++j)
1831 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,MVT::i8));
1834 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
1835 &ResultMask[0], ResultMask.size());
1836 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask);
1840 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
1841 SDValue Op0 = Op.getOperand(0); // Op0 = the scalar
1842 DebugLoc dl = Op.getDebugLoc();
1844 if (Op0.getNode()->getOpcode() == ISD::Constant) {
1845 // For a constant, build the appropriate constant vector, which will
1846 // eventually simplify to a vector register load.
1848 ConstantSDNode *CN = cast<ConstantSDNode>(Op0.getNode());
1849 SmallVector<SDValue, 16> ConstVecValues;
1853 // Create a constant vector:
1854 switch (Op.getValueType().getSimpleVT().SimpleTy) {
1855 default: llvm_unreachable("Unexpected constant value type in "
1856 "LowerSCALAR_TO_VECTOR");
1857 case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
1858 case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
1859 case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
1860 case MVT::v4f32: n_copies = 4; VT = MVT::f32; break;
1861 case MVT::v2i64: n_copies = 2; VT = MVT::i64; break;
1862 case MVT::v2f64: n_copies = 2; VT = MVT::f64; break;
1865 SDValue CValue = DAG.getConstant(CN->getZExtValue(), VT);
1866 for (size_t j = 0; j < n_copies; ++j)
1867 ConstVecValues.push_back(CValue);
1869 return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(),
1870 &ConstVecValues[0], ConstVecValues.size());
1872 // Otherwise, copy the value from one register to another:
1873 switch (Op0.getValueType().getSimpleVT().SimpleTy) {
1874 default: llvm_unreachable("Unexpected value type in LowerSCALAR_TO_VECTOR");
1881 return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0);
1888 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
1889 EVT VT = Op.getValueType();
1890 SDValue N = Op.getOperand(0);
1891 SDValue Elt = Op.getOperand(1);
1892 DebugLoc dl = Op.getDebugLoc();
1895 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
1896 // Constant argument:
1897 int EltNo = (int) C->getZExtValue();
1900 if (VT == MVT::i8 && EltNo >= 16)
1901 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i8 extraction slot > 15");
1902 else if (VT == MVT::i16 && EltNo >= 8)
1903 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i16 extraction slot > 7");
1904 else if (VT == MVT::i32 && EltNo >= 4)
1905 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i32 extraction slot > 4");
1906 else if (VT == MVT::i64 && EltNo >= 2)
1907 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i64 extraction slot > 2");
1909 if (EltNo == 0 && (VT == MVT::i32 || VT == MVT::i64)) {
1910 // i32 and i64: Element 0 is the preferred slot
1911 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, N);
1914 // Need to generate shuffle mask and extract:
1915 int prefslot_begin = -1, prefslot_end = -1;
1916 int elt_byte = EltNo * VT.getSizeInBits() / 8;
1918 switch (VT.getSimpleVT().SimpleTy) {
1920 assert(false && "Invalid value type!");
1922 prefslot_begin = prefslot_end = 3;
1926 prefslot_begin = 2; prefslot_end = 3;
1931 prefslot_begin = 0; prefslot_end = 3;
1936 prefslot_begin = 0; prefslot_end = 7;
1941 assert(prefslot_begin != -1 && prefslot_end != -1 &&
1942 "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
1944 unsigned int ShufBytes[16] = {
1945 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1947 for (int i = 0; i < 16; ++i) {
1948 // zero fill uppper part of preferred slot, don't care about the
1950 unsigned int mask_val;
1951 if (i <= prefslot_end) {
1953 ((i < prefslot_begin)
1955 : elt_byte + (i - prefslot_begin));
1957 ShufBytes[i] = mask_val;
1959 ShufBytes[i] = ShufBytes[i % (prefslot_end + 1)];
1962 SDValue ShufMask[4];
1963 for (unsigned i = 0; i < sizeof(ShufMask)/sizeof(ShufMask[0]); ++i) {
1964 unsigned bidx = i * 4;
1965 unsigned int bits = ((ShufBytes[bidx] << 24) |
1966 (ShufBytes[bidx+1] << 16) |
1967 (ShufBytes[bidx+2] << 8) |
1969 ShufMask[i] = DAG.getConstant(bits, MVT::i32);
1972 SDValue ShufMaskVec =
1973 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1974 &ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0]));
1976 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
1977 DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(),
1978 N, N, ShufMaskVec));
1980 // Variable index: Rotate the requested element into slot 0, then replicate
1981 // slot 0 across the vector
1982 EVT VecVT = N.getValueType();
1983 if (!VecVT.isSimple() || !VecVT.isVector() || !VecVT.is128BitVector()) {
1984 report_fatal_error("LowerEXTRACT_VECTOR_ELT: Must have a simple, 128-bit"
1988 // Make life easier by making sure the index is zero-extended to i32
1989 if (Elt.getValueType() != MVT::i32)
1990 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Elt);
1992 // Scale the index to a bit/byte shift quantity
1994 APInt(32, uint64_t(16 / N.getValueType().getVectorNumElements()), false);
1995 unsigned scaleShift = scaleFactor.logBase2();
1998 if (scaleShift > 0) {
1999 // Scale the shift factor:
2000 Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
2001 DAG.getConstant(scaleShift, MVT::i32));
2004 vecShift = DAG.getNode(SPUISD::SHLQUAD_L_BYTES, dl, VecVT, N, Elt);
2006 // Replicate the bytes starting at byte 0 across the entire vector (for
2007 // consistency with the notion of a unified register set)
2010 switch (VT.getSimpleVT().SimpleTy) {
2012 report_fatal_error("LowerEXTRACT_VECTOR_ELT(varable): Unhandled vector"
2016 SDValue factor = DAG.getConstant(0x00000000, MVT::i32);
2017 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2018 factor, factor, factor, factor);
2022 SDValue factor = DAG.getConstant(0x00010001, MVT::i32);
2023 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2024 factor, factor, factor, factor);
2029 SDValue factor = DAG.getConstant(0x00010203, MVT::i32);
2030 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2031 factor, factor, factor, factor);
2036 SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32);
2037 SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32);
2038 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2039 loFactor, hiFactor, loFactor, hiFactor);
2044 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2045 DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2046 vecShift, vecShift, replicate));
2052 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2053 SDValue VecOp = Op.getOperand(0);
2054 SDValue ValOp = Op.getOperand(1);
2055 SDValue IdxOp = Op.getOperand(2);
2056 DebugLoc dl = Op.getDebugLoc();
2057 EVT VT = Op.getValueType();
2059 // use 0 when the lane to insert to is 'undef'
2061 if (IdxOp.getOpcode() != ISD::UNDEF) {
2062 ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
2063 assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
2064 Idx = (CN->getSExtValue());
2067 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2068 // Use $sp ($1) because it's always 16-byte aligned and it's available:
2069 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
2070 DAG.getRegister(SPU::R1, PtrVT),
2071 DAG.getConstant(Idx, PtrVT));
2072 SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, VT, Pointer);
2075 DAG.getNode(SPUISD::SHUFB, dl, VT,
2076 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
2078 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, ShufMask));
2083 static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
2084 const TargetLowering &TLI)
2086 SDValue N0 = Op.getOperand(0); // Everything has at least one operand
2087 DebugLoc dl = Op.getDebugLoc();
2088 EVT ShiftVT = TLI.getShiftAmountTy();
2090 assert(Op.getValueType() == MVT::i8);
2093 llvm_unreachable("Unhandled i8 math operator");
2097 // 8-bit addition: Promote the arguments up to 16-bits and truncate
2099 SDValue N1 = Op.getOperand(1);
2100 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2101 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2102 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2103 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2108 // 8-bit subtraction: Promote the arguments up to 16-bits and truncate
2110 SDValue N1 = Op.getOperand(1);
2111 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2112 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2113 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2114 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2118 SDValue N1 = Op.getOperand(1);
2119 EVT N1VT = N1.getValueType();
2121 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2122 if (!N1VT.bitsEq(ShiftVT)) {
2123 unsigned N1Opc = N1.getValueType().bitsLT(ShiftVT)
2126 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2129 // Replicate lower 8-bits into upper 8:
2131 DAG.getNode(ISD::OR, dl, MVT::i16, N0,
2132 DAG.getNode(ISD::SHL, dl, MVT::i16,
2133 N0, DAG.getConstant(8, MVT::i32)));
2135 // Truncate back down to i8
2136 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2137 DAG.getNode(Opc, dl, MVT::i16, ExpandArg, N1));
2141 SDValue N1 = Op.getOperand(1);
2142 EVT N1VT = N1.getValueType();
2144 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2145 if (!N1VT.bitsEq(ShiftVT)) {
2146 unsigned N1Opc = ISD::ZERO_EXTEND;
2148 if (N1.getValueType().bitsGT(ShiftVT))
2149 N1Opc = ISD::TRUNCATE;
2151 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2154 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2155 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2158 SDValue N1 = Op.getOperand(1);
2159 EVT N1VT = N1.getValueType();
2161 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2162 if (!N1VT.bitsEq(ShiftVT)) {
2163 unsigned N1Opc = ISD::SIGN_EXTEND;
2165 if (N1VT.bitsGT(ShiftVT))
2166 N1Opc = ISD::TRUNCATE;
2167 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2170 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2171 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2174 SDValue N1 = Op.getOperand(1);
2176 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2177 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2178 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2179 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2187 //! Lower byte immediate operations for v16i8 vectors:
2189 LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
2192 EVT VT = Op.getValueType();
2193 DebugLoc dl = Op.getDebugLoc();
2195 ConstVec = Op.getOperand(0);
2196 Arg = Op.getOperand(1);
2197 if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
2198 if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
2199 ConstVec = ConstVec.getOperand(0);
2201 ConstVec = Op.getOperand(1);
2202 Arg = Op.getOperand(0);
2203 if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
2204 ConstVec = ConstVec.getOperand(0);
2209 if (ConstVec.getNode()->getOpcode() == ISD::BUILD_VECTOR) {
2210 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(ConstVec.getNode());
2211 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerByteImmed");
2213 APInt APSplatBits, APSplatUndef;
2214 unsigned SplatBitSize;
2216 unsigned minSplatBits = VT.getVectorElementType().getSizeInBits();
2218 if (BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
2219 HasAnyUndefs, minSplatBits)
2220 && minSplatBits <= SplatBitSize) {
2221 uint64_t SplatBits = APSplatBits.getZExtValue();
2222 SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
2224 SmallVector<SDValue, 16> tcVec;
2225 tcVec.assign(16, tc);
2226 return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg,
2227 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &tcVec[0], tcVec.size()));
2231 // These operations (AND, OR, XOR) are legal, they just couldn't be custom
2232 // lowered. Return the operation, rather than a null SDValue.
2236 //! Custom lowering for CTPOP (count population)
2238 Custom lowering code that counts the number ones in the input
2239 operand. SPU has such an instruction, but it counts the number of
2240 ones per byte, which then have to be accumulated.
2242 static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
2243 EVT VT = Op.getValueType();
2244 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
2245 VT, (128 / VT.getSizeInBits()));
2246 DebugLoc dl = Op.getDebugLoc();
2248 switch (VT.getSimpleVT().SimpleTy) {
2250 assert(false && "Invalid value type!");
2252 SDValue N = Op.getOperand(0);
2253 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2255 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2256 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2258 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, CNTB, Elt0);
2262 MachineFunction &MF = DAG.getMachineFunction();
2263 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2265 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R16CRegClass);
2267 SDValue N = Op.getOperand(0);
2268 SDValue Elt0 = DAG.getConstant(0, MVT::i16);
2269 SDValue Mask0 = DAG.getConstant(0x0f, MVT::i16);
2270 SDValue Shift1 = DAG.getConstant(8, MVT::i32);
2272 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2273 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2275 // CNTB_result becomes the chain to which all of the virtual registers
2276 // CNTB_reg, SUM1_reg become associated:
2277 SDValue CNTB_result =
2278 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, CNTB, Elt0);
2280 SDValue CNTB_rescopy =
2281 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2283 SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i16);
2285 return DAG.getNode(ISD::AND, dl, MVT::i16,
2286 DAG.getNode(ISD::ADD, dl, MVT::i16,
2287 DAG.getNode(ISD::SRL, dl, MVT::i16,
2294 MachineFunction &MF = DAG.getMachineFunction();
2295 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2297 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2298 unsigned SUM1_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2300 SDValue N = Op.getOperand(0);
2301 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2302 SDValue Mask0 = DAG.getConstant(0xff, MVT::i32);
2303 SDValue Shift1 = DAG.getConstant(16, MVT::i32);
2304 SDValue Shift2 = DAG.getConstant(8, MVT::i32);
2306 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2307 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2309 // CNTB_result becomes the chain to which all of the virtual registers
2310 // CNTB_reg, SUM1_reg become associated:
2311 SDValue CNTB_result =
2312 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, CNTB, Elt0);
2314 SDValue CNTB_rescopy =
2315 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2318 DAG.getNode(ISD::SRL, dl, MVT::i32,
2319 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32),
2323 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp1,
2324 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32));
2326 SDValue Sum1_rescopy =
2327 DAG.getCopyToReg(CNTB_result, dl, SUM1_reg, Sum1);
2330 DAG.getNode(ISD::SRL, dl, MVT::i32,
2331 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32),
2334 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp2,
2335 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32));
2337 return DAG.getNode(ISD::AND, dl, MVT::i32, Sum2, Mask0);
2347 //! Lower ISD::FP_TO_SINT, ISD::FP_TO_UINT for i32
2349 f32->i32 passes through unchanged, whereas f64->i32 expands to a libcall.
2350 All conversions to i64 are expanded to a libcall.
2352 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2353 const SPUTargetLowering &TLI) {
2354 EVT OpVT = Op.getValueType();
2355 SDValue Op0 = Op.getOperand(0);
2356 EVT Op0VT = Op0.getValueType();
2358 if ((OpVT == MVT::i32 && Op0VT == MVT::f64)
2359 || OpVT == MVT::i64) {
2360 // Convert f32 / f64 to i32 / i64 via libcall.
2362 (Op.getOpcode() == ISD::FP_TO_SINT)
2363 ? RTLIB::getFPTOSINT(Op0VT, OpVT)
2364 : RTLIB::getFPTOUINT(Op0VT, OpVT);
2365 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd fp-to-int conversion!");
2367 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2373 //! Lower ISD::SINT_TO_FP, ISD::UINT_TO_FP for i32
2375 i32->f32 passes through unchanged, whereas i32->f64 is expanded to a libcall.
2376 All conversions from i64 are expanded to a libcall.
2378 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2379 const SPUTargetLowering &TLI) {
2380 EVT OpVT = Op.getValueType();
2381 SDValue Op0 = Op.getOperand(0);
2382 EVT Op0VT = Op0.getValueType();
2384 if ((OpVT == MVT::f64 && Op0VT == MVT::i32)
2385 || Op0VT == MVT::i64) {
2386 // Convert i32, i64 to f64 via libcall:
2388 (Op.getOpcode() == ISD::SINT_TO_FP)
2389 ? RTLIB::getSINTTOFP(Op0VT, OpVT)
2390 : RTLIB::getUINTTOFP(Op0VT, OpVT);
2391 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd int-to-fp conversion!");
2393 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2399 //! Lower ISD::SETCC
2401 This handles MVT::f64 (double floating point) condition lowering
2403 static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
2404 const TargetLowering &TLI) {
2405 CondCodeSDNode *CC = dyn_cast<CondCodeSDNode>(Op.getOperand(2));
2406 DebugLoc dl = Op.getDebugLoc();
2407 assert(CC != 0 && "LowerSETCC: CondCodeSDNode should not be null here!\n");
2409 SDValue lhs = Op.getOperand(0);
2410 SDValue rhs = Op.getOperand(1);
2411 EVT lhsVT = lhs.getValueType();
2412 assert(lhsVT == MVT::f64 && "LowerSETCC: type other than MVT::64\n");
2414 EVT ccResultVT = TLI.getSetCCResultType(lhs.getValueType());
2415 APInt ccResultOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2416 EVT IntVT(MVT::i64);
2418 // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
2419 // selected to a NOP:
2420 SDValue i64lhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, lhs);
2422 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2423 DAG.getNode(ISD::SRL, dl, IntVT,
2424 i64lhs, DAG.getConstant(32, MVT::i32)));
2425 SDValue lhsHi32abs =
2426 DAG.getNode(ISD::AND, dl, MVT::i32,
2427 lhsHi32, DAG.getConstant(0x7fffffff, MVT::i32));
2429 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, i64lhs);
2431 // SETO and SETUO only use the lhs operand:
2432 if (CC->get() == ISD::SETO) {
2433 // Evaluates to true if Op0 is not [SQ]NaN - lowers to the inverse of
2435 APInt ccResultAllOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2436 return DAG.getNode(ISD::XOR, dl, ccResultVT,
2437 DAG.getSetCC(dl, ccResultVT,
2438 lhs, DAG.getConstantFP(0.0, lhsVT),
2440 DAG.getConstant(ccResultAllOnes, ccResultVT));
2441 } else if (CC->get() == ISD::SETUO) {
2442 // Evaluates to true if Op0 is [SQ]NaN
2443 return DAG.getNode(ISD::AND, dl, ccResultVT,
2444 DAG.getSetCC(dl, ccResultVT,
2446 DAG.getConstant(0x7ff00000, MVT::i32),
2448 DAG.getSetCC(dl, ccResultVT,
2450 DAG.getConstant(0, MVT::i32),
2454 SDValue i64rhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, rhs);
2456 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2457 DAG.getNode(ISD::SRL, dl, IntVT,
2458 i64rhs, DAG.getConstant(32, MVT::i32)));
2460 // If a value is negative, subtract from the sign magnitude constant:
2461 SDValue signMag2TC = DAG.getConstant(0x8000000000000000ULL, IntVT);
2463 // Convert the sign-magnitude representation into 2's complement:
2464 SDValue lhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2465 lhsHi32, DAG.getConstant(31, MVT::i32));
2466 SDValue lhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64lhs);
2468 DAG.getNode(ISD::SELECT, dl, IntVT,
2469 lhsSelectMask, lhsSignMag2TC, i64lhs);
2471 SDValue rhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2472 rhsHi32, DAG.getConstant(31, MVT::i32));
2473 SDValue rhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64rhs);
2475 DAG.getNode(ISD::SELECT, dl, IntVT,
2476 rhsSelectMask, rhsSignMag2TC, i64rhs);
2480 switch (CC->get()) {
2483 compareOp = ISD::SETEQ; break;
2486 compareOp = ISD::SETGT; break;
2489 compareOp = ISD::SETGE; break;
2492 compareOp = ISD::SETLT; break;
2495 compareOp = ISD::SETLE; break;
2498 compareOp = ISD::SETNE; break;
2500 report_fatal_error("CellSPU ISel Select: unimplemented f64 condition");
2504 DAG.getSetCC(dl, ccResultVT, lhsSelect, rhsSelect,
2505 (ISD::CondCode) compareOp);
2507 if ((CC->get() & 0x8) == 0) {
2508 // Ordered comparison:
2509 SDValue lhsNaN = DAG.getSetCC(dl, ccResultVT,
2510 lhs, DAG.getConstantFP(0.0, MVT::f64),
2512 SDValue rhsNaN = DAG.getSetCC(dl, ccResultVT,
2513 rhs, DAG.getConstantFP(0.0, MVT::f64),
2515 SDValue ordered = DAG.getNode(ISD::AND, dl, ccResultVT, lhsNaN, rhsNaN);
2517 result = DAG.getNode(ISD::AND, dl, ccResultVT, ordered, result);
2523 //! Lower ISD::SELECT_CC
2525 ISD::SELECT_CC can (generally) be implemented directly on the SPU using the
2528 \note Need to revisit this in the future: if the code path through the true
2529 and false value computations is longer than the latency of a branch (6
2530 cycles), then it would be more advantageous to branch and insert a new basic
2531 block and branch on the condition. However, this code does not make that
2532 assumption, given the simplisitc uses so far.
2535 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2536 const TargetLowering &TLI) {
2537 EVT VT = Op.getValueType();
2538 SDValue lhs = Op.getOperand(0);
2539 SDValue rhs = Op.getOperand(1);
2540 SDValue trueval = Op.getOperand(2);
2541 SDValue falseval = Op.getOperand(3);
2542 SDValue condition = Op.getOperand(4);
2543 DebugLoc dl = Op.getDebugLoc();
2545 // NOTE: SELB's arguments: $rA, $rB, $mask
2547 // SELB selects bits from $rA where bits in $mask are 0, bits from $rB
2548 // where bits in $mask are 1. CCond will be inverted, having 1s where the
2549 // condition was true and 0s where the condition was false. Hence, the
2550 // arguments to SELB get reversed.
2552 // Note: Really should be ISD::SELECT instead of SPUISD::SELB, but LLVM's
2553 // legalizer insists on combining SETCC/SELECT into SELECT_CC, so we end up
2554 // with another "cannot select select_cc" assert:
2556 SDValue compare = DAG.getNode(ISD::SETCC, dl,
2557 TLI.getSetCCResultType(Op.getValueType()),
2558 lhs, rhs, condition);
2559 return DAG.getNode(SPUISD::SELB, dl, VT, falseval, trueval, compare);
2562 //! Custom lower ISD::TRUNCATE
2563 static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
2565 // Type to truncate to
2566 EVT VT = Op.getValueType();
2567 MVT simpleVT = VT.getSimpleVT();
2568 EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
2569 VT, (128 / VT.getSizeInBits()));
2570 DebugLoc dl = Op.getDebugLoc();
2572 // Type to truncate from
2573 SDValue Op0 = Op.getOperand(0);
2574 EVT Op0VT = Op0.getValueType();
2576 if (Op0VT.getSimpleVT() == MVT::i128 && simpleVT == MVT::i64) {
2577 // Create shuffle mask, least significant doubleword of quadword
2578 unsigned maskHigh = 0x08090a0b;
2579 unsigned maskLow = 0x0c0d0e0f;
2580 // Use a shuffle to perform the truncation
2581 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2582 DAG.getConstant(maskHigh, MVT::i32),
2583 DAG.getConstant(maskLow, MVT::i32),
2584 DAG.getConstant(maskHigh, MVT::i32),
2585 DAG.getConstant(maskLow, MVT::i32));
2587 SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2588 Op0, Op0, shufMask);
2590 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, truncShuffle);
2593 return SDValue(); // Leave the truncate unmolested
2597 * Emit the instruction sequence for i64/i32 -> i128 sign extend. The basic
2598 * algorithm is to duplicate the sign bit using rotmai to generate at
2599 * least one byte full of sign bits. Then propagate the "sign-byte" into
2600 * the leftmost words and the i64/i32 into the rightmost words using shufb.
2602 * @param Op The sext operand
2603 * @param DAG The current DAG
2604 * @return The SDValue with the entire instruction sequence
2606 static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
2608 DebugLoc dl = Op.getDebugLoc();
2610 // Type to extend to
2611 MVT OpVT = Op.getValueType().getSimpleVT();
2613 // Type to extend from
2614 SDValue Op0 = Op.getOperand(0);
2615 MVT Op0VT = Op0.getValueType().getSimpleVT();
2617 // The type to extend to needs to be a i128 and
2618 // the type to extend from needs to be i64 or i32.
2619 assert((OpVT == MVT::i128 && (Op0VT == MVT::i64 || Op0VT == MVT::i32)) &&
2620 "LowerSIGN_EXTEND: input and/or output operand have wrong size");
2622 // Create shuffle mask
2623 unsigned mask1 = 0x10101010; // byte 0 - 3 and 4 - 7
2624 unsigned mask2 = Op0VT == MVT::i64 ? 0x00010203 : 0x10101010; // byte 8 - 11
2625 unsigned mask3 = Op0VT == MVT::i64 ? 0x04050607 : 0x00010203; // byte 12 - 15
2626 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2627 DAG.getConstant(mask1, MVT::i32),
2628 DAG.getConstant(mask1, MVT::i32),
2629 DAG.getConstant(mask2, MVT::i32),
2630 DAG.getConstant(mask3, MVT::i32));
2632 // Word wise arithmetic right shift to generate at least one byte
2633 // that contains sign bits.
2634 MVT mvt = Op0VT == MVT::i64 ? MVT::v2i64 : MVT::v4i32;
2635 SDValue sraVal = DAG.getNode(ISD::SRA,
2638 DAG.getNode(SPUISD::PREFSLOT2VEC, dl, mvt, Op0, Op0),
2639 DAG.getConstant(31, MVT::i32));
2641 // Shuffle bytes - Copy the sign bits into the upper 64 bits
2642 // and the input value into the lower 64 bits.
2643 SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
2644 DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i128, Op0), sraVal, shufMask);
2646 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, extShuffle);
2649 //! Custom (target-specific) lowering entry point
2651 This is where LLVM's DAG selection process calls to do target-specific
2655 SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
2657 unsigned Opc = (unsigned) Op.getOpcode();
2658 EVT VT = Op.getValueType();
2663 errs() << "SPUTargetLowering::LowerOperation(): need to lower this!\n";
2664 errs() << "Op.getOpcode() = " << Opc << "\n";
2665 errs() << "*Op.getNode():\n";
2666 Op.getNode()->dump();
2668 llvm_unreachable(0);
2674 return LowerLOAD(Op, DAG, SPUTM.getSubtargetImpl());
2676 return LowerSTORE(Op, DAG, SPUTM.getSubtargetImpl());
2677 case ISD::ConstantPool:
2678 return LowerConstantPool(Op, DAG, SPUTM.getSubtargetImpl());
2679 case ISD::GlobalAddress:
2680 return LowerGlobalAddress(Op, DAG, SPUTM.getSubtargetImpl());
2681 case ISD::JumpTable:
2682 return LowerJumpTable(Op, DAG, SPUTM.getSubtargetImpl());
2683 case ISD::ConstantFP:
2684 return LowerConstantFP(Op, DAG);
2686 // i8, i64 math ops:
2695 return LowerI8Math(Op, DAG, Opc, *this);
2699 case ISD::FP_TO_SINT:
2700 case ISD::FP_TO_UINT:
2701 return LowerFP_TO_INT(Op, DAG, *this);
2703 case ISD::SINT_TO_FP:
2704 case ISD::UINT_TO_FP:
2705 return LowerINT_TO_FP(Op, DAG, *this);
2707 // Vector-related lowering.
2708 case ISD::BUILD_VECTOR:
2709 return LowerBUILD_VECTOR(Op, DAG);
2710 case ISD::SCALAR_TO_VECTOR:
2711 return LowerSCALAR_TO_VECTOR(Op, DAG);
2712 case ISD::VECTOR_SHUFFLE:
2713 return LowerVECTOR_SHUFFLE(Op, DAG);
2714 case ISD::EXTRACT_VECTOR_ELT:
2715 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2716 case ISD::INSERT_VECTOR_ELT:
2717 return LowerINSERT_VECTOR_ELT(Op, DAG);
2719 // Look for ANDBI, ORBI and XORBI opportunities and lower appropriately:
2723 return LowerByteImmed(Op, DAG);
2725 // Vector and i8 multiply:
2728 return LowerI8Math(Op, DAG, Opc, *this);
2731 return LowerCTPOP(Op, DAG);
2733 case ISD::SELECT_CC:
2734 return LowerSELECT_CC(Op, DAG, *this);
2737 return LowerSETCC(Op, DAG, *this);
2740 return LowerTRUNCATE(Op, DAG);
2742 case ISD::SIGN_EXTEND:
2743 return LowerSIGN_EXTEND(Op, DAG);
2749 void SPUTargetLowering::ReplaceNodeResults(SDNode *N,
2750 SmallVectorImpl<SDValue>&Results,
2751 SelectionDAG &DAG) const
2754 unsigned Opc = (unsigned) N->getOpcode();
2755 EVT OpVT = N->getValueType(0);
2759 errs() << "SPUTargetLowering::ReplaceNodeResults(): need to fix this!\n";
2760 errs() << "Op.getOpcode() = " << Opc << "\n";
2761 errs() << "*Op.getNode():\n";
2769 /* Otherwise, return unchanged */
2772 //===----------------------------------------------------------------------===//
2773 // Target Optimization Hooks
2774 //===----------------------------------------------------------------------===//
2777 SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
2780 TargetMachine &TM = getTargetMachine();
2782 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
2783 SelectionDAG &DAG = DCI.DAG;
2784 SDValue Op0 = N->getOperand(0); // everything has at least one operand
2785 EVT NodeVT = N->getValueType(0); // The node's value type
2786 EVT Op0VT = Op0.getValueType(); // The first operand's result
2787 SDValue Result; // Initially, empty result
2788 DebugLoc dl = N->getDebugLoc();
2790 switch (N->getOpcode()) {
2793 SDValue Op1 = N->getOperand(1);
2795 if (Op0.getOpcode() == SPUISD::IndirectAddr
2796 || Op1.getOpcode() == SPUISD::IndirectAddr) {
2797 // Normalize the operands to reduce repeated code
2798 SDValue IndirectArg = Op0, AddArg = Op1;
2800 if (Op1.getOpcode() == SPUISD::IndirectAddr) {
2805 if (isa<ConstantSDNode>(AddArg)) {
2806 ConstantSDNode *CN0 = cast<ConstantSDNode > (AddArg);
2807 SDValue IndOp1 = IndirectArg.getOperand(1);
2809 if (CN0->isNullValue()) {
2810 // (add (SPUindirect <arg>, <arg>), 0) ->
2811 // (SPUindirect <arg>, <arg>)
2813 #if !defined(NDEBUG)
2814 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2816 << "Replace: (add (SPUindirect <arg>, <arg>), 0)\n"
2817 << "With: (SPUindirect <arg>, <arg>)\n";
2822 } else if (isa<ConstantSDNode>(IndOp1)) {
2823 // (add (SPUindirect <arg>, <const>), <const>) ->
2824 // (SPUindirect <arg>, <const + const>)
2825 ConstantSDNode *CN1 = cast<ConstantSDNode > (IndOp1);
2826 int64_t combinedConst = CN0->getSExtValue() + CN1->getSExtValue();
2827 SDValue combinedValue = DAG.getConstant(combinedConst, Op0VT);
2829 #if !defined(NDEBUG)
2830 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2832 << "Replace: (add (SPUindirect <arg>, " << CN1->getSExtValue()
2833 << "), " << CN0->getSExtValue() << ")\n"
2834 << "With: (SPUindirect <arg>, "
2835 << combinedConst << ")\n";
2839 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2840 IndirectArg, combinedValue);
2846 case ISD::SIGN_EXTEND:
2847 case ISD::ZERO_EXTEND:
2848 case ISD::ANY_EXTEND: {
2849 if (Op0.getOpcode() == SPUISD::VEC2PREFSLOT && NodeVT == Op0VT) {
2850 // (any_extend (SPUextract_elt0 <arg>)) ->
2851 // (SPUextract_elt0 <arg>)
2852 // Types must match, however...
2853 #if !defined(NDEBUG)
2854 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2855 errs() << "\nReplace: ";
2857 errs() << "\nWith: ";
2858 Op0.getNode()->dump(&DAG);
2867 case SPUISD::IndirectAddr: {
2868 if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) {
2869 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1));
2870 if (CN != 0 && CN->getZExtValue() == 0) {
2871 // (SPUindirect (SPUaform <addr>, 0), 0) ->
2872 // (SPUaform <addr>, 0)
2874 DEBUG(errs() << "Replace: ");
2875 DEBUG(N->dump(&DAG));
2876 DEBUG(errs() << "\nWith: ");
2877 DEBUG(Op0.getNode()->dump(&DAG));
2878 DEBUG(errs() << "\n");
2882 } else if (Op0.getOpcode() == ISD::ADD) {
2883 SDValue Op1 = N->getOperand(1);
2884 if (ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(Op1)) {
2885 // (SPUindirect (add <arg>, <arg>), 0) ->
2886 // (SPUindirect <arg>, <arg>)
2887 if (CN1->isNullValue()) {
2889 #if !defined(NDEBUG)
2890 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2892 << "Replace: (SPUindirect (add <arg>, <arg>), 0)\n"
2893 << "With: (SPUindirect <arg>, <arg>)\n";
2897 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2898 Op0.getOperand(0), Op0.getOperand(1));
2904 case SPUISD::SHLQUAD_L_BITS:
2905 case SPUISD::SHLQUAD_L_BYTES:
2906 case SPUISD::ROTBYTES_LEFT: {
2907 SDValue Op1 = N->getOperand(1);
2909 // Kill degenerate vector shifts:
2910 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2911 if (CN->isNullValue()) {
2917 case SPUISD::PREFSLOT2VEC: {
2918 switch (Op0.getOpcode()) {
2921 case ISD::ANY_EXTEND:
2922 case ISD::ZERO_EXTEND:
2923 case ISD::SIGN_EXTEND: {
2924 // (SPUprefslot2vec (any|zero|sign_extend (SPUvec2prefslot <arg>))) ->
2926 // but only if the SPUprefslot2vec and <arg> types match.
2927 SDValue Op00 = Op0.getOperand(0);
2928 if (Op00.getOpcode() == SPUISD::VEC2PREFSLOT) {
2929 SDValue Op000 = Op00.getOperand(0);
2930 if (Op000.getValueType() == NodeVT) {
2936 case SPUISD::VEC2PREFSLOT: {
2937 // (SPUprefslot2vec (SPUvec2prefslot <arg>)) ->
2939 Result = Op0.getOperand(0);
2947 // Otherwise, return unchanged.
2949 if (Result.getNode()) {
2950 DEBUG(errs() << "\nReplace.SPU: ");
2951 DEBUG(N->dump(&DAG));
2952 DEBUG(errs() << "\nWith: ");
2953 DEBUG(Result.getNode()->dump(&DAG));
2954 DEBUG(errs() << "\n");
2961 //===----------------------------------------------------------------------===//
2962 // Inline Assembly Support
2963 //===----------------------------------------------------------------------===//
2965 /// getConstraintType - Given a constraint letter, return the type of
2966 /// constraint it is for this target.
2967 SPUTargetLowering::ConstraintType
2968 SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const {
2969 if (ConstraintLetter.size() == 1) {
2970 switch (ConstraintLetter[0]) {
2977 return C_RegisterClass;
2980 return TargetLowering::getConstraintType(ConstraintLetter);
2983 std::pair<unsigned, const TargetRegisterClass*>
2984 SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
2987 if (Constraint.size() == 1) {
2988 // GCC RS6000 Constraint Letters
2989 switch (Constraint[0]) {
2993 return std::make_pair(0U, SPU::R64CRegisterClass);
2994 return std::make_pair(0U, SPU::R32CRegisterClass);
2997 return std::make_pair(0U, SPU::R32FPRegisterClass);
2998 else if (VT == MVT::f64)
2999 return std::make_pair(0U, SPU::R64FPRegisterClass);
3002 return std::make_pair(0U, SPU::GPRCRegisterClass);
3006 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
3009 //! Compute used/known bits for a SPU operand
3011 SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
3015 const SelectionDAG &DAG,
3016 unsigned Depth ) const {
3018 const uint64_t uint64_sizebits = sizeof(uint64_t) * CHAR_BIT;
3020 switch (Op.getOpcode()) {
3022 // KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
3028 case SPUISD::PREFSLOT2VEC:
3029 case SPUISD::LDRESULT:
3030 case SPUISD::VEC2PREFSLOT:
3031 case SPUISD::SHLQUAD_L_BITS:
3032 case SPUISD::SHLQUAD_L_BYTES:
3033 case SPUISD::VEC_ROTL:
3034 case SPUISD::VEC_ROTR:
3035 case SPUISD::ROTBYTES_LEFT:
3036 case SPUISD::SELECT_MASK:
3043 SPUTargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
3044 unsigned Depth) const {
3045 switch (Op.getOpcode()) {
3050 EVT VT = Op.getValueType();
3052 if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32) {
3055 return VT.getSizeInBits();
3060 // LowerAsmOperandForConstraint
3062 SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3063 char ConstraintLetter,
3065 std::vector<SDValue> &Ops,
3066 SelectionDAG &DAG) const {
3067 // Default, for the time being, to the base class handler
3068 TargetLowering::LowerAsmOperandForConstraint(Op, ConstraintLetter, hasMemory,
3072 /// isLegalAddressImmediate - Return true if the integer value can be used
3073 /// as the offset of the target addressing mode.
3074 bool SPUTargetLowering::isLegalAddressImmediate(int64_t V,
3075 const Type *Ty) const {
3076 // SPU's addresses are 256K:
3077 return (V > -(1 << 18) && V < (1 << 18) - 1);
3080 bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
3085 SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3086 // The SPU target isn't yet aware of offsets.