1 //===-- SPUISelLowering.cpp - Cell SPU DAG Lowering Implementation --------===//
2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file implements the SPUTargetLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "SPUISelLowering.h"
14 #include "SPUTargetMachine.h"
15 #include "SPUFrameLowering.h"
16 #include "SPUMachineFunction.h"
17 #include "llvm/Constants.h"
18 #include "llvm/Function.h"
19 #include "llvm/Intrinsics.h"
20 #include "llvm/CallingConv.h"
21 #include "llvm/Type.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/Target/TargetOptions.h"
30 #include "llvm/ADT/VectorExtras.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Support/raw_ostream.h"
39 // Used in getTargetNodeName() below
41 std::map<unsigned, const char *> node_names;
43 // Byte offset of the preferred slot (counted from the MSB)
44 int prefslotOffset(EVT VT) {
46 if (VT==MVT::i1) retval=3;
47 if (VT==MVT::i8) retval=3;
48 if (VT==MVT::i16) retval=2;
53 //! Expand a library call into an actual call DAG node
56 This code is taken from SelectionDAGLegalize, since it is not exposed as
57 part of the LLVM SelectionDAG API.
61 ExpandLibCall(RTLIB::Libcall LC, SDValue Op, SelectionDAG &DAG,
62 bool isSigned, SDValue &Hi, const SPUTargetLowering &TLI) {
63 // The input chain to this libcall is the entry node of the function.
64 // Legalizing the call will automatically add the previous call to the
66 SDValue InChain = DAG.getEntryNode();
68 TargetLowering::ArgListTy Args;
69 TargetLowering::ArgListEntry Entry;
70 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
71 EVT ArgVT = Op.getOperand(i).getValueType();
72 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
73 Entry.Node = Op.getOperand(i);
75 Entry.isSExt = isSigned;
76 Entry.isZExt = !isSigned;
77 Args.push_back(Entry);
79 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
82 // Splice the libcall in wherever FindInputOutputChains tells us to.
84 Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
85 std::pair<SDValue, SDValue> CallInfo =
86 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
87 0, TLI.getLibcallCallingConv(LC), false,
88 /*isReturnValueUsed=*/true,
89 Callee, Args, DAG, Op.getDebugLoc());
91 return CallInfo.first;
95 SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
96 : TargetLowering(TM, new TargetLoweringObjectFileELF()),
99 // Use _setjmp/_longjmp instead of setjmp/longjmp.
100 setUseUnderscoreSetJmp(true);
101 setUseUnderscoreLongJmp(true);
103 // Set RTLIB libcall names as used by SPU:
104 setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
106 // Set up the SPU's register classes:
107 addRegisterClass(MVT::i8, SPU::R8CRegisterClass);
108 addRegisterClass(MVT::i16, SPU::R16CRegisterClass);
109 addRegisterClass(MVT::i32, SPU::R32CRegisterClass);
110 addRegisterClass(MVT::i64, SPU::R64CRegisterClass);
111 addRegisterClass(MVT::f32, SPU::R32FPRegisterClass);
112 addRegisterClass(MVT::f64, SPU::R64FPRegisterClass);
113 addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
115 // SPU has no sign or zero extended loads for i1, i8, i16:
116 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
117 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
118 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
120 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
121 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
123 setTruncStoreAction(MVT::i128, MVT::i64, Expand);
124 setTruncStoreAction(MVT::i128, MVT::i32, Expand);
125 setTruncStoreAction(MVT::i128, MVT::i16, Expand);
126 setTruncStoreAction(MVT::i128, MVT::i8, Expand);
128 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
130 // SPU constant load actions are custom lowered:
131 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
132 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
134 // SPU's loads and stores have to be custom lowered:
135 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::i128;
137 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
139 setOperationAction(ISD::LOAD, VT, Custom);
140 setOperationAction(ISD::STORE, VT, Custom);
141 setLoadExtAction(ISD::EXTLOAD, VT, Custom);
142 setLoadExtAction(ISD::ZEXTLOAD, VT, Custom);
143 setLoadExtAction(ISD::SEXTLOAD, VT, Custom);
145 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::i8; --stype) {
146 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
147 setTruncStoreAction(VT, StoreVT, Expand);
151 for (unsigned sctype = (unsigned) MVT::f32; sctype < (unsigned) MVT::f64;
153 MVT::SimpleValueType VT = (MVT::SimpleValueType) sctype;
155 setOperationAction(ISD::LOAD, VT, Custom);
156 setOperationAction(ISD::STORE, VT, Custom);
158 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::f32; --stype) {
159 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
160 setTruncStoreAction(VT, StoreVT, Expand);
164 // Expand the jumptable branches
165 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
166 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
168 // Custom lower SELECT_CC for most cases, but expand by default
169 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
170 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
171 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
172 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
173 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
175 // SPU has no intrinsics for these particular operations:
176 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
177 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
179 // SPU has no division/remainder instructions
180 setOperationAction(ISD::SREM, MVT::i8, Expand);
181 setOperationAction(ISD::UREM, MVT::i8, Expand);
182 setOperationAction(ISD::SDIV, MVT::i8, Expand);
183 setOperationAction(ISD::UDIV, MVT::i8, Expand);
184 setOperationAction(ISD::SDIVREM, MVT::i8, Expand);
185 setOperationAction(ISD::UDIVREM, MVT::i8, Expand);
186 setOperationAction(ISD::SREM, MVT::i16, Expand);
187 setOperationAction(ISD::UREM, MVT::i16, Expand);
188 setOperationAction(ISD::SDIV, MVT::i16, Expand);
189 setOperationAction(ISD::UDIV, MVT::i16, Expand);
190 setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
191 setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
192 setOperationAction(ISD::SREM, MVT::i32, Expand);
193 setOperationAction(ISD::UREM, MVT::i32, Expand);
194 setOperationAction(ISD::SDIV, MVT::i32, Expand);
195 setOperationAction(ISD::UDIV, MVT::i32, Expand);
196 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
197 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
198 setOperationAction(ISD::SREM, MVT::i64, Expand);
199 setOperationAction(ISD::UREM, MVT::i64, Expand);
200 setOperationAction(ISD::SDIV, MVT::i64, Expand);
201 setOperationAction(ISD::UDIV, MVT::i64, Expand);
202 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
203 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
204 setOperationAction(ISD::SREM, MVT::i128, Expand);
205 setOperationAction(ISD::UREM, MVT::i128, Expand);
206 setOperationAction(ISD::SDIV, MVT::i128, Expand);
207 setOperationAction(ISD::UDIV, MVT::i128, Expand);
208 setOperationAction(ISD::SDIVREM, MVT::i128, Expand);
209 setOperationAction(ISD::UDIVREM, MVT::i128, Expand);
211 // We don't support sin/cos/sqrt/fmod
212 setOperationAction(ISD::FSIN , MVT::f64, Expand);
213 setOperationAction(ISD::FCOS , MVT::f64, Expand);
214 setOperationAction(ISD::FREM , MVT::f64, Expand);
215 setOperationAction(ISD::FSIN , MVT::f32, Expand);
216 setOperationAction(ISD::FCOS , MVT::f32, Expand);
217 setOperationAction(ISD::FREM , MVT::f32, Expand);
219 // Expand fsqrt to the appropriate libcall (NOTE: should use h/w fsqrt
221 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
222 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
224 setOperationAction(ISD::FMA, MVT::f64, Expand);
225 setOperationAction(ISD::FMA, MVT::f32, Expand);
227 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
228 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
230 // SPU can do rotate right and left, so legalize it... but customize for i8
231 // because instructions don't exist.
233 // FIXME: Change from "expand" to appropriate type once ROTR is supported in
235 setOperationAction(ISD::ROTR, MVT::i32, Expand /*Legal*/);
236 setOperationAction(ISD::ROTR, MVT::i16, Expand /*Legal*/);
237 setOperationAction(ISD::ROTR, MVT::i8, Expand /*Custom*/);
239 setOperationAction(ISD::ROTL, MVT::i32, Legal);
240 setOperationAction(ISD::ROTL, MVT::i16, Legal);
241 setOperationAction(ISD::ROTL, MVT::i8, Custom);
243 // SPU has no native version of shift left/right for i8
244 setOperationAction(ISD::SHL, MVT::i8, Custom);
245 setOperationAction(ISD::SRL, MVT::i8, Custom);
246 setOperationAction(ISD::SRA, MVT::i8, Custom);
248 // Make these operations legal and handle them during instruction selection:
249 setOperationAction(ISD::SHL, MVT::i64, Legal);
250 setOperationAction(ISD::SRL, MVT::i64, Legal);
251 setOperationAction(ISD::SRA, MVT::i64, Legal);
253 // Custom lower i8, i32 and i64 multiplications
254 setOperationAction(ISD::MUL, MVT::i8, Custom);
255 setOperationAction(ISD::MUL, MVT::i32, Legal);
256 setOperationAction(ISD::MUL, MVT::i64, Legal);
258 // Expand double-width multiplication
259 // FIXME: It would probably be reasonable to support some of these operations
260 setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
261 setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
262 setOperationAction(ISD::MULHU, MVT::i8, Expand);
263 setOperationAction(ISD::MULHS, MVT::i8, Expand);
264 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
265 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
266 setOperationAction(ISD::MULHU, MVT::i16, Expand);
267 setOperationAction(ISD::MULHS, MVT::i16, Expand);
268 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
269 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
270 setOperationAction(ISD::MULHU, MVT::i32, Expand);
271 setOperationAction(ISD::MULHS, MVT::i32, Expand);
272 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
273 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
274 setOperationAction(ISD::MULHU, MVT::i64, Expand);
275 setOperationAction(ISD::MULHS, MVT::i64, Expand);
277 // Need to custom handle (some) common i8, i64 math ops
278 setOperationAction(ISD::ADD, MVT::i8, Custom);
279 setOperationAction(ISD::ADD, MVT::i64, Legal);
280 setOperationAction(ISD::SUB, MVT::i8, Custom);
281 setOperationAction(ISD::SUB, MVT::i64, Legal);
283 // SPU does not have BSWAP. It does have i32 support CTLZ.
284 // CTPOP has to be custom lowered.
285 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
286 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
288 setOperationAction(ISD::CTPOP, MVT::i8, Custom);
289 setOperationAction(ISD::CTPOP, MVT::i16, Custom);
290 setOperationAction(ISD::CTPOP, MVT::i32, Custom);
291 setOperationAction(ISD::CTPOP, MVT::i64, Custom);
292 setOperationAction(ISD::CTPOP, MVT::i128, Expand);
294 setOperationAction(ISD::CTTZ , MVT::i8, Expand);
295 setOperationAction(ISD::CTTZ , MVT::i16, Expand);
296 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
297 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
298 setOperationAction(ISD::CTTZ , MVT::i128, Expand);
300 setOperationAction(ISD::CTLZ , MVT::i8, Promote);
301 setOperationAction(ISD::CTLZ , MVT::i16, Promote);
302 setOperationAction(ISD::CTLZ , MVT::i32, Legal);
303 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
304 setOperationAction(ISD::CTLZ , MVT::i128, Expand);
306 // SPU has a version of select that implements (a&~c)|(b&c), just like
307 // select ought to work:
308 setOperationAction(ISD::SELECT, MVT::i8, Legal);
309 setOperationAction(ISD::SELECT, MVT::i16, Legal);
310 setOperationAction(ISD::SELECT, MVT::i32, Legal);
311 setOperationAction(ISD::SELECT, MVT::i64, Legal);
313 setOperationAction(ISD::SETCC, MVT::i8, Legal);
314 setOperationAction(ISD::SETCC, MVT::i16, Legal);
315 setOperationAction(ISD::SETCC, MVT::i32, Legal);
316 setOperationAction(ISD::SETCC, MVT::i64, Legal);
317 setOperationAction(ISD::SETCC, MVT::f64, Custom);
319 // Custom lower i128 -> i64 truncates
320 setOperationAction(ISD::TRUNCATE, MVT::i64, Custom);
322 // Custom lower i32/i64 -> i128 sign extend
323 setOperationAction(ISD::SIGN_EXTEND, MVT::i128, Custom);
325 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
326 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
327 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
328 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
329 // SPU has a legal FP -> signed INT instruction for f32, but for f64, need
330 // to expand to a libcall, hence the custom lowering:
331 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
332 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
333 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
334 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
335 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Expand);
336 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Expand);
338 // FDIV on SPU requires custom lowering
339 setOperationAction(ISD::FDIV, MVT::f64, Expand); // to libcall
341 // SPU has [U|S]INT_TO_FP for f32->i32, but not for f64->i32, f64->i64:
342 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
343 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
344 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
345 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
346 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
347 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
348 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
349 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
351 setOperationAction(ISD::BITCAST, MVT::i32, Legal);
352 setOperationAction(ISD::BITCAST, MVT::f32, Legal);
353 setOperationAction(ISD::BITCAST, MVT::i64, Legal);
354 setOperationAction(ISD::BITCAST, MVT::f64, Legal);
356 // We cannot sextinreg(i1). Expand to shifts.
357 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
359 // We want to legalize GlobalAddress and ConstantPool nodes into the
360 // appropriate instructions to materialize the address.
361 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::f128;
363 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
365 setOperationAction(ISD::GlobalAddress, VT, Custom);
366 setOperationAction(ISD::ConstantPool, VT, Custom);
367 setOperationAction(ISD::JumpTable, VT, Custom);
370 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
371 setOperationAction(ISD::VASTART , MVT::Other, Custom);
373 // Use the default implementation.
374 setOperationAction(ISD::VAARG , MVT::Other, Expand);
375 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
376 setOperationAction(ISD::VAEND , MVT::Other, Expand);
377 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
378 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
379 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
380 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Expand);
382 // Cell SPU has instructions for converting between i64 and fp.
383 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
384 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
386 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
387 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
389 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
390 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
392 // First set operation action for all vector types to expand. Then we
393 // will selectively turn on ones that can be effectively codegen'd.
394 addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
395 addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
396 addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
397 addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
398 addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
399 addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
401 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
402 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
403 MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
405 // Set operation actions to legal types only.
406 if (!isTypeLegal(VT)) continue;
408 // add/sub are legal for all supported vector VT's.
409 setOperationAction(ISD::ADD, VT, Legal);
410 setOperationAction(ISD::SUB, VT, Legal);
411 // mul has to be custom lowered.
412 setOperationAction(ISD::MUL, VT, Legal);
414 setOperationAction(ISD::AND, VT, Legal);
415 setOperationAction(ISD::OR, VT, Legal);
416 setOperationAction(ISD::XOR, VT, Legal);
417 setOperationAction(ISD::LOAD, VT, Custom);
418 setOperationAction(ISD::SELECT, VT, Legal);
419 setOperationAction(ISD::STORE, VT, Custom);
421 // These operations need to be expanded:
422 setOperationAction(ISD::SDIV, VT, Expand);
423 setOperationAction(ISD::SREM, VT, Expand);
424 setOperationAction(ISD::UDIV, VT, Expand);
425 setOperationAction(ISD::UREM, VT, Expand);
427 // Expand all trunc stores
428 for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
429 j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) {
430 MVT::SimpleValueType TargetVT = (MVT::SimpleValueType)j;
431 setTruncStoreAction(VT, TargetVT, Expand);
434 // Custom lower build_vector, constant pool spills, insert and
435 // extract vector elements:
436 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
437 setOperationAction(ISD::ConstantPool, VT, Custom);
438 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
439 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
440 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
441 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
444 setOperationAction(ISD::SHL, MVT::v2i64, Expand);
446 setOperationAction(ISD::AND, MVT::v16i8, Custom);
447 setOperationAction(ISD::OR, MVT::v16i8, Custom);
448 setOperationAction(ISD::XOR, MVT::v16i8, Custom);
449 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
451 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
453 setBooleanContents(ZeroOrNegativeOneBooleanContent);
454 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); // FIXME: Is this correct?
456 setStackPointerRegisterToSaveRestore(SPU::R1);
458 // We have target-specific dag combine patterns for the following nodes:
459 setTargetDAGCombine(ISD::ADD);
460 setTargetDAGCombine(ISD::ZERO_EXTEND);
461 setTargetDAGCombine(ISD::SIGN_EXTEND);
462 setTargetDAGCombine(ISD::ANY_EXTEND);
464 setMinFunctionAlignment(3);
466 computeRegisterProperties();
468 // Set pre-RA register scheduler default to BURR, which produces slightly
469 // better code than the default (could also be TDRR, but TargetLowering.h
470 // needs a mod to support that model):
471 setSchedulingPreference(Sched::RegPressure);
475 SPUTargetLowering::getTargetNodeName(unsigned Opcode) const
477 if (node_names.empty()) {
478 node_names[(unsigned) SPUISD::RET_FLAG] = "SPUISD::RET_FLAG";
479 node_names[(unsigned) SPUISD::Hi] = "SPUISD::Hi";
480 node_names[(unsigned) SPUISD::Lo] = "SPUISD::Lo";
481 node_names[(unsigned) SPUISD::PCRelAddr] = "SPUISD::PCRelAddr";
482 node_names[(unsigned) SPUISD::AFormAddr] = "SPUISD::AFormAddr";
483 node_names[(unsigned) SPUISD::IndirectAddr] = "SPUISD::IndirectAddr";
484 node_names[(unsigned) SPUISD::LDRESULT] = "SPUISD::LDRESULT";
485 node_names[(unsigned) SPUISD::CALL] = "SPUISD::CALL";
486 node_names[(unsigned) SPUISD::SHUFB] = "SPUISD::SHUFB";
487 node_names[(unsigned) SPUISD::SHUFFLE_MASK] = "SPUISD::SHUFFLE_MASK";
488 node_names[(unsigned) SPUISD::CNTB] = "SPUISD::CNTB";
489 node_names[(unsigned) SPUISD::PREFSLOT2VEC] = "SPUISD::PREFSLOT2VEC";
490 node_names[(unsigned) SPUISD::VEC2PREFSLOT] = "SPUISD::VEC2PREFSLOT";
491 node_names[(unsigned) SPUISD::SHL_BITS] = "SPUISD::SHL_BITS";
492 node_names[(unsigned) SPUISD::SHL_BYTES] = "SPUISD::SHL_BYTES";
493 node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL";
494 node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR";
495 node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
496 node_names[(unsigned) SPUISD::ROTBYTES_LEFT_BITS] =
497 "SPUISD::ROTBYTES_LEFT_BITS";
498 node_names[(unsigned) SPUISD::SELECT_MASK] = "SPUISD::SELECT_MASK";
499 node_names[(unsigned) SPUISD::SELB] = "SPUISD::SELB";
500 node_names[(unsigned) SPUISD::ADD64_MARKER] = "SPUISD::ADD64_MARKER";
501 node_names[(unsigned) SPUISD::SUB64_MARKER] = "SPUISD::SUB64_MARKER";
502 node_names[(unsigned) SPUISD::MUL64_MARKER] = "SPUISD::MUL64_MARKER";
505 std::map<unsigned, const char *>::iterator i = node_names.find(Opcode);
507 return ((i != node_names.end()) ? i->second : 0);
510 //===----------------------------------------------------------------------===//
511 // Return the Cell SPU's SETCC result type
512 //===----------------------------------------------------------------------===//
514 EVT SPUTargetLowering::getSetCCResultType(EVT VT) const {
515 // i8, i16 and i32 are valid SETCC result types
516 MVT::SimpleValueType retval;
518 switch(VT.getSimpleVT().SimpleTy){
521 retval = MVT::i8; break;
523 retval = MVT::i16; break;
531 //===----------------------------------------------------------------------===//
532 // Calling convention code:
533 //===----------------------------------------------------------------------===//
535 #include "SPUGenCallingConv.inc"
537 //===----------------------------------------------------------------------===//
538 // LowerOperation implementation
539 //===----------------------------------------------------------------------===//
541 /// Custom lower loads for CellSPU
543 All CellSPU loads and stores are aligned to 16-byte boundaries, so for elements
544 within a 16-byte block, we have to rotate to extract the requested element.
546 For extending loads, we also want to ensure that the following sequence is
547 emitted, e.g. for MVT::f32 extending load to MVT::f64:
551 %2 v16i8,ch = rotate %1
552 %3 v4f8, ch = bitconvert %2
553 %4 f32 = vec2perfslot %3
554 %5 f64 = fp_extend %4
558 LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
559 LoadSDNode *LN = cast<LoadSDNode>(Op);
560 SDValue the_chain = LN->getChain();
561 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
562 EVT InVT = LN->getMemoryVT();
563 EVT OutVT = Op.getValueType();
564 ISD::LoadExtType ExtType = LN->getExtensionType();
565 unsigned alignment = LN->getAlignment();
566 int pso = prefslotOffset(InVT);
567 DebugLoc dl = Op.getDebugLoc();
568 EVT vecVT = InVT.isVector()? InVT: EVT::getVectorVT(*DAG.getContext(), InVT,
569 (128 / InVT.getSizeInBits()));
572 assert( LN->getAddressingMode() == ISD::UNINDEXED
573 && "we should get only UNINDEXED adresses");
574 // clean aligned loads can be selected as-is
575 if (InVT.getSizeInBits() == 128 && (alignment%16) == 0)
578 // Get pointerinfos to the memory chunk(s) that contain the data to load
579 uint64_t mpi_offset = LN->getPointerInfo().Offset;
580 mpi_offset -= mpi_offset%16;
581 MachinePointerInfo lowMemPtr(LN->getPointerInfo().V, mpi_offset);
582 MachinePointerInfo highMemPtr(LN->getPointerInfo().V, mpi_offset+16);
585 SDValue basePtr = LN->getBasePtr();
588 if ((alignment%16) == 0) {
591 // Special cases for a known aligned load to simplify the base pointer
592 // and the rotation amount:
593 if (basePtr.getOpcode() == ISD::ADD
594 && (CN = dyn_cast<ConstantSDNode > (basePtr.getOperand(1))) != 0) {
595 // Known offset into basePtr
596 int64_t offset = CN->getSExtValue();
597 int64_t rotamt = int64_t((offset & 0xf) - pso);
602 rotate = DAG.getConstant(rotamt, MVT::i16);
604 // Simplify the base pointer for this case:
605 basePtr = basePtr.getOperand(0);
606 if ((offset & ~0xf) > 0) {
607 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
609 DAG.getConstant((offset & ~0xf), PtrVT));
611 } else if ((basePtr.getOpcode() == SPUISD::AFormAddr)
612 || (basePtr.getOpcode() == SPUISD::IndirectAddr
613 && basePtr.getOperand(0).getOpcode() == SPUISD::Hi
614 && basePtr.getOperand(1).getOpcode() == SPUISD::Lo)) {
615 // Plain aligned a-form address: rotate into preferred slot
616 // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
617 int64_t rotamt = -pso;
620 rotate = DAG.getConstant(rotamt, MVT::i16);
622 // Offset the rotate amount by the basePtr and the preferred slot
624 int64_t rotamt = -pso;
627 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
629 DAG.getConstant(rotamt, PtrVT));
632 // Unaligned load: must be more pessimistic about addressing modes:
633 if (basePtr.getOpcode() == ISD::ADD) {
634 MachineFunction &MF = DAG.getMachineFunction();
635 MachineRegisterInfo &RegInfo = MF.getRegInfo();
636 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
639 SDValue Op0 = basePtr.getOperand(0);
640 SDValue Op1 = basePtr.getOperand(1);
642 if (isa<ConstantSDNode>(Op1)) {
643 // Convert the (add <ptr>, <const>) to an indirect address contained
644 // in a register. Note that this is done because we need to avoid
645 // creating a 0(reg) d-form address due to the SPU's block loads.
646 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
647 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
648 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
650 // Convert the (add <arg1>, <arg2>) to an indirect address, which
651 // will likely be lowered as a reg(reg) x-form address.
652 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
655 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
657 DAG.getConstant(0, PtrVT));
660 // Offset the rotate amount by the basePtr and the preferred slot
662 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
664 DAG.getConstant(-pso, PtrVT));
667 // Do the load as a i128 to allow possible shifting
668 SDValue low = DAG.getLoad(MVT::i128, dl, the_chain, basePtr,
670 LN->isVolatile(), LN->isNonTemporal(), false, 16);
672 // When the size is not greater than alignment we get all data with just
674 if (alignment >= InVT.getSizeInBits()/8) {
676 the_chain = low.getValue(1);
678 // Rotate into the preferred slot:
679 result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::i128,
680 low.getValue(0), rotate);
682 // Convert the loaded v16i8 vector to the appropriate vector type
683 // specified by the operand:
684 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
685 InVT, (128 / InVT.getSizeInBits()));
686 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
687 DAG.getNode(ISD::BITCAST, dl, vecVT, result));
689 // When alignment is less than the size, we might need (known only at
690 // run-time) two loads
691 // TODO: if the memory address is composed only from constants, we have
692 // extra kowledge, and might avoid the second load
694 // storage position offset from lower 16 byte aligned memory chunk
695 SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
696 basePtr, DAG.getConstant( 0xf, MVT::i32 ) );
697 // get a registerfull of ones. (this implementation is a workaround: LLVM
698 // cannot handle 128 bit signed int constants)
699 SDValue ones = DAG.getConstant(-1, MVT::v4i32 );
700 ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
702 SDValue high = DAG.getLoad(MVT::i128, dl, the_chain,
703 DAG.getNode(ISD::ADD, dl, PtrVT,
705 DAG.getConstant(16, PtrVT)),
707 LN->isVolatile(), LN->isNonTemporal(), false,
710 the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
713 // Shift the (possible) high part right to compensate the misalignemnt.
714 // if there is no highpart (i.e. value is i64 and offset is 4), this
715 // will zero out the high value.
716 high = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, high,
717 DAG.getNode(ISD::SUB, dl, MVT::i32,
718 DAG.getConstant( 16, MVT::i32),
722 // Shift the low similarly
723 // TODO: add SPUISD::SHL_BYTES
724 low = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, low, offset );
726 // Merge the two parts
727 result = DAG.getNode(ISD::BITCAST, dl, vecVT,
728 DAG.getNode(ISD::OR, dl, MVT::i128, low, high));
730 if (!InVT.isVector()) {
731 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT, result );
735 // Handle extending loads by extending the scalar result:
736 if (ExtType == ISD::SEXTLOAD) {
737 result = DAG.getNode(ISD::SIGN_EXTEND, dl, OutVT, result);
738 } else if (ExtType == ISD::ZEXTLOAD) {
739 result = DAG.getNode(ISD::ZERO_EXTEND, dl, OutVT, result);
740 } else if (ExtType == ISD::EXTLOAD) {
741 unsigned NewOpc = ISD::ANY_EXTEND;
743 if (OutVT.isFloatingPoint())
744 NewOpc = ISD::FP_EXTEND;
746 result = DAG.getNode(NewOpc, dl, OutVT, result);
749 SDVTList retvts = DAG.getVTList(OutVT, MVT::Other);
750 SDValue retops[2] = {
755 result = DAG.getNode(SPUISD::LDRESULT, dl, retvts,
756 retops, sizeof(retops) / sizeof(retops[0]));
760 /// Custom lower stores for CellSPU
762 All CellSPU stores are aligned to 16-byte boundaries, so for elements
763 within a 16-byte block, we have to generate a shuffle to insert the
764 requested element into its place, then store the resulting block.
767 LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
768 StoreSDNode *SN = cast<StoreSDNode>(Op);
769 SDValue Value = SN->getValue();
770 EVT VT = Value.getValueType();
771 EVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
772 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
773 DebugLoc dl = Op.getDebugLoc();
774 unsigned alignment = SN->getAlignment();
776 EVT vecVT = StVT.isVector()? StVT: EVT::getVectorVT(*DAG.getContext(), StVT,
777 (128 / StVT.getSizeInBits()));
778 // Get pointerinfos to the memory chunk(s) that contain the data to load
779 uint64_t mpi_offset = SN->getPointerInfo().Offset;
780 mpi_offset -= mpi_offset%16;
781 MachinePointerInfo lowMemPtr(SN->getPointerInfo().V, mpi_offset);
782 MachinePointerInfo highMemPtr(SN->getPointerInfo().V, mpi_offset+16);
786 assert( SN->getAddressingMode() == ISD::UNINDEXED
787 && "we should get only UNINDEXED adresses");
788 // clean aligned loads can be selected as-is
789 if (StVT.getSizeInBits() == 128 && (alignment%16) == 0)
792 SDValue alignLoadVec;
793 SDValue basePtr = SN->getBasePtr();
794 SDValue the_chain = SN->getChain();
795 SDValue insertEltOffs;
797 if ((alignment%16) == 0) {
799 // Special cases for a known aligned load to simplify the base pointer
800 // and insertion byte:
801 if (basePtr.getOpcode() == ISD::ADD
802 && (CN = dyn_cast<ConstantSDNode>(basePtr.getOperand(1))) != 0) {
803 // Known offset into basePtr
804 int64_t offset = CN->getSExtValue();
806 // Simplify the base pointer for this case:
807 basePtr = basePtr.getOperand(0);
808 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
810 DAG.getConstant((offset & 0xf), PtrVT));
812 if ((offset & ~0xf) > 0) {
813 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
815 DAG.getConstant((offset & ~0xf), PtrVT));
818 // Otherwise, assume it's at byte 0 of basePtr
819 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
821 DAG.getConstant(0, PtrVT));
822 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
824 DAG.getConstant(0, PtrVT));
827 // Unaligned load: must be more pessimistic about addressing modes:
828 if (basePtr.getOpcode() == ISD::ADD) {
829 MachineFunction &MF = DAG.getMachineFunction();
830 MachineRegisterInfo &RegInfo = MF.getRegInfo();
831 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
834 SDValue Op0 = basePtr.getOperand(0);
835 SDValue Op1 = basePtr.getOperand(1);
837 if (isa<ConstantSDNode>(Op1)) {
838 // Convert the (add <ptr>, <const>) to an indirect address contained
839 // in a register. Note that this is done because we need to avoid
840 // creating a 0(reg) d-form address due to the SPU's block loads.
841 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
842 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
843 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
845 // Convert the (add <arg1>, <arg2>) to an indirect address, which
846 // will likely be lowered as a reg(reg) x-form address.
847 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
850 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
852 DAG.getConstant(0, PtrVT));
855 // Insertion point is solely determined by basePtr's contents
856 insertEltOffs = DAG.getNode(ISD::ADD, dl, PtrVT,
858 DAG.getConstant(0, PtrVT));
861 // Load the lower part of the memory to which to store.
862 SDValue low = DAG.getLoad(vecVT, dl, the_chain, basePtr,
863 lowMemPtr, SN->isVolatile(), SN->isNonTemporal(),
866 // if we don't need to store over the 16 byte boundary, one store suffices
867 if (alignment >= StVT.getSizeInBits()/8) {
869 the_chain = low.getValue(1);
871 LoadSDNode *LN = cast<LoadSDNode>(low);
872 SDValue theValue = SN->getValue();
875 && (theValue.getOpcode() == ISD::AssertZext
876 || theValue.getOpcode() == ISD::AssertSext)) {
877 // Drill down and get the value for zero- and sign-extended
879 theValue = theValue.getOperand(0);
882 // If the base pointer is already a D-form address, then just create
883 // a new D-form address with a slot offset and the orignal base pointer.
884 // Otherwise generate a D-form address with the slot offset relative
885 // to the stack pointer, which is always aligned.
887 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
888 errs() << "CellSPU LowerSTORE: basePtr = ";
889 basePtr.getNode()->dump(&DAG);
894 SDValue insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT,
896 SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
899 result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
901 DAG.getNode(ISD::BITCAST, dl,
902 MVT::v4i32, insertEltOp));
904 result = DAG.getStore(the_chain, dl, result, basePtr,
906 LN->isVolatile(), LN->isNonTemporal(),
910 // do the store when it might cross the 16 byte memory access boundary.
912 // TODO issue a warning if SN->isVolatile()== true? This is likely not
913 // what the user wanted.
915 // address offset from nearest lower 16byte alinged address
916 SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
918 DAG.getConstant(0xf, MVT::i32));
920 SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
921 DAG.getConstant( 16, MVT::i32),
923 // 16 - sizeof(Value)
924 SDValue surplus = DAG.getNode(ISD::SUB, dl, MVT::i32,
925 DAG.getConstant( 16, MVT::i32),
926 DAG.getConstant( VT.getSizeInBits()/8,
928 // get a registerfull of ones
929 SDValue ones = DAG.getConstant(-1, MVT::v4i32);
930 ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
932 // Create the 128 bit masks that have ones where the data to store is
934 SDValue lowmask, himask;
935 // if the value to store don't fill up the an entire 128 bits, zero
936 // out the last bits of the mask so that only the value we want to store
938 // this is e.g. in the case of store i32, align 2
940 Value = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, Value);
941 lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, ones, surplus);
942 lowmask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
944 Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
945 Value = DAG.getNode(ISD::AND, dl, MVT::i128, Value, lowmask);
950 Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
952 // this will zero, if there are no data that goes to the high quad
953 himask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
955 lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, lowmask,
958 // Load in the old data and zero out the parts that will be overwritten with
959 // the new data to store.
960 SDValue hi = DAG.getLoad(MVT::i128, dl, the_chain,
961 DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
962 DAG.getConstant( 16, PtrVT)),
964 SN->isVolatile(), SN->isNonTemporal(),
966 the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
969 low = DAG.getNode(ISD::AND, dl, MVT::i128,
970 DAG.getNode( ISD::BITCAST, dl, MVT::i128, low),
971 DAG.getNode( ISD::XOR, dl, MVT::i128, lowmask, ones));
972 hi = DAG.getNode(ISD::AND, dl, MVT::i128,
973 DAG.getNode( ISD::BITCAST, dl, MVT::i128, hi),
974 DAG.getNode( ISD::XOR, dl, MVT::i128, himask, ones));
976 // Shift the Value to store into place. rlow contains the parts that go to
977 // the lower memory chunk, rhi has the parts that go to the upper one.
978 SDValue rlow = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, Value, offset);
979 rlow = DAG.getNode(ISD::AND, dl, MVT::i128, rlow, lowmask);
980 SDValue rhi = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, Value,
983 // Merge the old data and the new data and store the results
984 // Need to convert vectors here to integer as 'OR'ing floats assert
985 rlow = DAG.getNode(ISD::OR, dl, MVT::i128,
986 DAG.getNode(ISD::BITCAST, dl, MVT::i128, low),
987 DAG.getNode(ISD::BITCAST, dl, MVT::i128, rlow));
988 rhi = DAG.getNode(ISD::OR, dl, MVT::i128,
989 DAG.getNode(ISD::BITCAST, dl, MVT::i128, hi),
990 DAG.getNode(ISD::BITCAST, dl, MVT::i128, rhi));
992 low = DAG.getStore(the_chain, dl, rlow, basePtr,
994 SN->isVolatile(), SN->isNonTemporal(), 16);
995 hi = DAG.getStore(the_chain, dl, rhi,
996 DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
997 DAG.getConstant( 16, PtrVT)),
999 SN->isVolatile(), SN->isNonTemporal(), 16);
1000 result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(0),
1007 //! Generate the address of a constant pool entry.
1009 LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1010 EVT PtrVT = Op.getValueType();
1011 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1012 const Constant *C = CP->getConstVal();
1013 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
1014 SDValue Zero = DAG.getConstant(0, PtrVT);
1015 const TargetMachine &TM = DAG.getTarget();
1016 // FIXME there is no actual debug info here
1017 DebugLoc dl = Op.getDebugLoc();
1019 if (TM.getRelocationModel() == Reloc::Static) {
1020 if (!ST->usingLargeMem()) {
1021 // Just return the SDValue with the constant pool address in it.
1022 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, CPI, Zero);
1024 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, CPI, Zero);
1025 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, CPI, Zero);
1026 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1030 llvm_unreachable("LowerConstantPool: Relocation model other than static"
1035 //! Alternate entry point for generating the address of a constant pool entry
1037 SPU::LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUTargetMachine &TM) {
1038 return ::LowerConstantPool(Op, DAG, TM.getSubtargetImpl());
1042 LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1043 EVT PtrVT = Op.getValueType();
1044 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1045 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
1046 SDValue Zero = DAG.getConstant(0, PtrVT);
1047 const TargetMachine &TM = DAG.getTarget();
1048 // FIXME there is no actual debug info here
1049 DebugLoc dl = Op.getDebugLoc();
1051 if (TM.getRelocationModel() == Reloc::Static) {
1052 if (!ST->usingLargeMem()) {
1053 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, JTI, Zero);
1055 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, JTI, Zero);
1056 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, JTI, Zero);
1057 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1061 llvm_unreachable("LowerJumpTable: Relocation model other than static"
1067 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1068 EVT PtrVT = Op.getValueType();
1069 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
1070 const GlobalValue *GV = GSDN->getGlobal();
1071 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
1072 PtrVT, GSDN->getOffset());
1073 const TargetMachine &TM = DAG.getTarget();
1074 SDValue Zero = DAG.getConstant(0, PtrVT);
1075 // FIXME there is no actual debug info here
1076 DebugLoc dl = Op.getDebugLoc();
1078 if (TM.getRelocationModel() == Reloc::Static) {
1079 if (!ST->usingLargeMem()) {
1080 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, GA, Zero);
1082 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, GA, Zero);
1083 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, GA, Zero);
1084 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1087 report_fatal_error("LowerGlobalAddress: Relocation model other than static"
1095 //! Custom lower double precision floating point constants
1097 LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
1098 EVT VT = Op.getValueType();
1099 // FIXME there is no actual debug info here
1100 DebugLoc dl = Op.getDebugLoc();
1102 if (VT == MVT::f64) {
1103 ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.getNode());
1106 "LowerConstantFP: Node is not ConstantFPSDNode");
1108 uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
1109 SDValue T = DAG.getConstant(dbits, MVT::i64);
1110 SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
1111 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
1112 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Tvec));
1119 SPUTargetLowering::LowerFormalArguments(SDValue Chain,
1120 CallingConv::ID CallConv, bool isVarArg,
1121 const SmallVectorImpl<ISD::InputArg>
1123 DebugLoc dl, SelectionDAG &DAG,
1124 SmallVectorImpl<SDValue> &InVals)
1127 MachineFunction &MF = DAG.getMachineFunction();
1128 MachineFrameInfo *MFI = MF.getFrameInfo();
1129 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1130 SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
1132 unsigned ArgOffset = SPUFrameLowering::minStackSize();
1133 unsigned ArgRegIdx = 0;
1134 unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
1136 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1138 SmallVector<CCValAssign, 16> ArgLocs;
1139 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1140 getTargetMachine(), ArgLocs, *DAG.getContext());
1141 // FIXME: allow for other calling conventions
1142 CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU);
1144 // Add DAG nodes to load the arguments or copy them out of registers.
1145 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
1146 EVT ObjectVT = Ins[ArgNo].VT;
1147 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1149 CCValAssign &VA = ArgLocs[ArgNo];
1151 if (VA.isRegLoc()) {
1152 const TargetRegisterClass *ArgRegClass;
1154 switch (ObjectVT.getSimpleVT().SimpleTy) {
1156 report_fatal_error("LowerFormalArguments Unhandled argument type: " +
1157 Twine(ObjectVT.getEVTString()));
1159 ArgRegClass = &SPU::R8CRegClass;
1162 ArgRegClass = &SPU::R16CRegClass;
1165 ArgRegClass = &SPU::R32CRegClass;
1168 ArgRegClass = &SPU::R64CRegClass;
1171 ArgRegClass = &SPU::GPRCRegClass;
1174 ArgRegClass = &SPU::R32FPRegClass;
1177 ArgRegClass = &SPU::R64FPRegClass;
1185 ArgRegClass = &SPU::VECREGRegClass;
1189 unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
1190 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1191 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
1194 // We need to load the argument to a virtual register if we determined
1195 // above that we ran out of physical registers of the appropriate type
1196 // or we're forced to do vararg
1197 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true);
1198 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1199 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
1200 false, false, false, 0);
1201 ArgOffset += StackSlotSize;
1204 InVals.push_back(ArgVal);
1206 Chain = ArgVal.getOperand(0);
1211 // FIXME: we should be able to query the argument registers from
1212 // tablegen generated code.
1213 static const unsigned ArgRegs[] = {
1214 SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
1215 SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
1216 SPU::R17, SPU::R18, SPU::R19, SPU::R20, SPU::R21, SPU::R22, SPU::R23,
1217 SPU::R24, SPU::R25, SPU::R26, SPU::R27, SPU::R28, SPU::R29, SPU::R30,
1218 SPU::R31, SPU::R32, SPU::R33, SPU::R34, SPU::R35, SPU::R36, SPU::R37,
1219 SPU::R38, SPU::R39, SPU::R40, SPU::R41, SPU::R42, SPU::R43, SPU::R44,
1220 SPU::R45, SPU::R46, SPU::R47, SPU::R48, SPU::R49, SPU::R50, SPU::R51,
1221 SPU::R52, SPU::R53, SPU::R54, SPU::R55, SPU::R56, SPU::R57, SPU::R58,
1222 SPU::R59, SPU::R60, SPU::R61, SPU::R62, SPU::R63, SPU::R64, SPU::R65,
1223 SPU::R66, SPU::R67, SPU::R68, SPU::R69, SPU::R70, SPU::R71, SPU::R72,
1224 SPU::R73, SPU::R74, SPU::R75, SPU::R76, SPU::R77, SPU::R78, SPU::R79
1226 // size of ArgRegs array
1227 unsigned NumArgRegs = 77;
1229 // We will spill (79-3)+1 registers to the stack
1230 SmallVector<SDValue, 79-3+1> MemOps;
1232 // Create the frame slot
1233 for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
1234 FuncInfo->setVarArgsFrameIndex(
1235 MFI->CreateFixedObject(StackSlotSize, ArgOffset, true));
1236 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1237 unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::VECREGRegClass);
1238 SDValue ArgVal = DAG.getRegister(VReg, MVT::v16i8);
1239 SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, MachinePointerInfo(),
1241 Chain = Store.getOperand(0);
1242 MemOps.push_back(Store);
1244 // Increment address by stack slot size for the next stored argument
1245 ArgOffset += StackSlotSize;
1247 if (!MemOps.empty())
1248 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1249 &MemOps[0], MemOps.size());
1255 /// isLSAAddress - Return the immediate to use if the specified
1256 /// value is representable as a LSA address.
1257 static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
1258 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1261 int Addr = C->getZExtValue();
1262 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1263 (Addr << 14 >> 14) != Addr)
1264 return 0; // Top 14 bits have to be sext of immediate.
1266 return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode();
1270 SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1271 CallingConv::ID CallConv, bool isVarArg,
1273 const SmallVectorImpl<ISD::OutputArg> &Outs,
1274 const SmallVectorImpl<SDValue> &OutVals,
1275 const SmallVectorImpl<ISD::InputArg> &Ins,
1276 DebugLoc dl, SelectionDAG &DAG,
1277 SmallVectorImpl<SDValue> &InVals) const {
1278 // CellSPU target does not yet support tail call optimization.
1281 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
1282 unsigned NumOps = Outs.size();
1283 unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
1285 SmallVector<CCValAssign, 16> ArgLocs;
1286 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1287 getTargetMachine(), ArgLocs, *DAG.getContext());
1288 // FIXME: allow for other calling conventions
1289 CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
1291 const unsigned NumArgRegs = ArgLocs.size();
1294 // Handy pointer type
1295 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1297 // Set up a copy of the stack pointer for use loading and storing any
1298 // arguments that may not fit in the registers available for argument
1300 SDValue StackPtr = DAG.getRegister(SPU::R1, MVT::i32);
1302 // Figure out which arguments are going to go in registers, and which in
1304 unsigned ArgOffset = SPUFrameLowering::minStackSize(); // Just below [LR]
1305 unsigned ArgRegIdx = 0;
1307 // Keep track of registers passing arguments
1308 std::vector<std::pair<unsigned, SDValue> > RegsToPass;
1309 // And the arguments passed on the stack
1310 SmallVector<SDValue, 8> MemOpChains;
1312 for (; ArgRegIdx != NumOps; ++ArgRegIdx) {
1313 SDValue Arg = OutVals[ArgRegIdx];
1314 CCValAssign &VA = ArgLocs[ArgRegIdx];
1316 // PtrOff will be used to store the current argument to the stack if a
1317 // register cannot be found for it.
1318 SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1319 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
1321 switch (Arg.getValueType().getSimpleVT().SimpleTy) {
1322 default: llvm_unreachable("Unexpected ValueType for argument!");
1336 if (ArgRegIdx != NumArgRegs) {
1337 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1339 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
1340 MachinePointerInfo(),
1342 ArgOffset += StackSlotSize;
1348 // Accumulate how many bytes are to be pushed on the stack, including the
1349 // linkage area, and parameter passing area. According to the SPU ABI,
1350 // we minimally need space for [LR] and [SP].
1351 unsigned NumStackBytes = ArgOffset - SPUFrameLowering::minStackSize();
1353 // Insert a call sequence start
1354 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumStackBytes,
1357 if (!MemOpChains.empty()) {
1358 // Adjust the stack pointer for the stack arguments.
1359 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1360 &MemOpChains[0], MemOpChains.size());
1363 // Build a sequence of copy-to-reg nodes chained together with token chain
1364 // and flag operands which copy the outgoing args into the appropriate regs.
1366 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1367 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1368 RegsToPass[i].second, InFlag);
1369 InFlag = Chain.getValue(1);
1372 SmallVector<SDValue, 8> Ops;
1373 unsigned CallOpc = SPUISD::CALL;
1375 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1376 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1377 // node so that legalize doesn't hack it.
1378 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1379 const GlobalValue *GV = G->getGlobal();
1380 EVT CalleeVT = Callee.getValueType();
1381 SDValue Zero = DAG.getConstant(0, PtrVT);
1382 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, CalleeVT);
1384 if (!ST->usingLargeMem()) {
1385 // Turn calls to targets that are defined (i.e., have bodies) into BRSL
1386 // style calls, otherwise, external symbols are BRASL calls. This assumes
1387 // that declared/defined symbols are in the same compilation unit and can
1388 // be reached through PC-relative jumps.
1391 // This may be an unsafe assumption for JIT and really large compilation
1393 if (GV->isDeclaration()) {
1394 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, GA, Zero);
1396 Callee = DAG.getNode(SPUISD::PCRelAddr, dl, CalleeVT, GA, Zero);
1399 // "Large memory" mode: Turn all calls into indirect calls with a X-form
1401 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, GA, Zero);
1403 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1404 EVT CalleeVT = Callee.getValueType();
1405 SDValue Zero = DAG.getConstant(0, PtrVT);
1406 SDValue ExtSym = DAG.getTargetExternalSymbol(S->getSymbol(),
1407 Callee.getValueType());
1409 if (!ST->usingLargeMem()) {
1410 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, ExtSym, Zero);
1412 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, ExtSym, Zero);
1414 } else if (SDNode *Dest = isLSAAddress(Callee, DAG)) {
1415 // If this is an absolute destination address that appears to be a legal
1416 // local store address, use the munged value.
1417 Callee = SDValue(Dest, 0);
1420 Ops.push_back(Chain);
1421 Ops.push_back(Callee);
1423 // Add argument registers to the end of the list so that they are known live
1425 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1426 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1427 RegsToPass[i].second.getValueType()));
1429 if (InFlag.getNode())
1430 Ops.push_back(InFlag);
1431 // Returns a chain and a flag for retval copy to use.
1432 Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Glue),
1433 &Ops[0], Ops.size());
1434 InFlag = Chain.getValue(1);
1436 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumStackBytes, true),
1437 DAG.getIntPtrConstant(0, true), InFlag);
1439 InFlag = Chain.getValue(1);
1441 // If the function returns void, just return the chain.
1445 // Now handle the return value(s)
1446 SmallVector<CCValAssign, 16> RVLocs;
1447 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1448 getTargetMachine(), RVLocs, *DAG.getContext());
1449 CCRetInfo.AnalyzeCallResult(Ins, CCC_SPU);
1452 // If the call has results, copy the values out of the ret val registers.
1453 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1454 CCValAssign VA = RVLocs[i];
1456 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1458 Chain = Val.getValue(1);
1459 InFlag = Val.getValue(2);
1460 InVals.push_back(Val);
1467 SPUTargetLowering::LowerReturn(SDValue Chain,
1468 CallingConv::ID CallConv, bool isVarArg,
1469 const SmallVectorImpl<ISD::OutputArg> &Outs,
1470 const SmallVectorImpl<SDValue> &OutVals,
1471 DebugLoc dl, SelectionDAG &DAG) const {
1473 SmallVector<CCValAssign, 16> RVLocs;
1474 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1475 getTargetMachine(), RVLocs, *DAG.getContext());
1476 CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
1478 // If this is the first return lowered for this function, add the regs to the
1479 // liveout set for the function.
1480 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1481 for (unsigned i = 0; i != RVLocs.size(); ++i)
1482 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1487 // Copy the result values into the output registers.
1488 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1489 CCValAssign &VA = RVLocs[i];
1490 assert(VA.isRegLoc() && "Can only return in registers!");
1491 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1493 Flag = Chain.getValue(1);
1497 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1499 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain);
1503 //===----------------------------------------------------------------------===//
1504 // Vector related lowering:
1505 //===----------------------------------------------------------------------===//
1507 static ConstantSDNode *
1508 getVecImm(SDNode *N) {
1509 SDValue OpVal(0, 0);
1511 // Check to see if this buildvec has a single non-undef value in its elements.
1512 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1513 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
1514 if (OpVal.getNode() == 0)
1515 OpVal = N->getOperand(i);
1516 else if (OpVal != N->getOperand(i))
1520 if (OpVal.getNode() != 0) {
1521 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1529 /// get_vec_i18imm - Test if this vector is a vector filled with the same value
1530 /// and the value fits into an unsigned 18-bit constant, and if so, return the
1532 SDValue SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
1534 if (ConstantSDNode *CN = getVecImm(N)) {
1535 uint64_t Value = CN->getZExtValue();
1536 if (ValueType == MVT::i64) {
1537 uint64_t UValue = CN->getZExtValue();
1538 uint32_t upper = uint32_t(UValue >> 32);
1539 uint32_t lower = uint32_t(UValue);
1542 Value = Value >> 32;
1544 if (Value <= 0x3ffff)
1545 return DAG.getTargetConstant(Value, ValueType);
1551 /// get_vec_i16imm - Test if this vector is a vector filled with the same value
1552 /// and the value fits into a signed 16-bit constant, and if so, return the
1554 SDValue SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
1556 if (ConstantSDNode *CN = getVecImm(N)) {
1557 int64_t Value = CN->getSExtValue();
1558 if (ValueType == MVT::i64) {
1559 uint64_t UValue = CN->getZExtValue();
1560 uint32_t upper = uint32_t(UValue >> 32);
1561 uint32_t lower = uint32_t(UValue);
1564 Value = Value >> 32;
1566 if (Value >= -(1 << 15) && Value <= ((1 << 15) - 1)) {
1567 return DAG.getTargetConstant(Value, ValueType);
1574 /// get_vec_i10imm - Test if this vector is a vector filled with the same value
1575 /// and the value fits into a signed 10-bit constant, and if so, return the
1577 SDValue SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
1579 if (ConstantSDNode *CN = getVecImm(N)) {
1580 int64_t Value = CN->getSExtValue();
1581 if (ValueType == MVT::i64) {
1582 uint64_t UValue = CN->getZExtValue();
1583 uint32_t upper = uint32_t(UValue >> 32);
1584 uint32_t lower = uint32_t(UValue);
1587 Value = Value >> 32;
1589 if (isInt<10>(Value))
1590 return DAG.getTargetConstant(Value, ValueType);
1596 /// get_vec_i8imm - Test if this vector is a vector filled with the same value
1597 /// and the value fits into a signed 8-bit constant, and if so, return the
1600 /// @note: The incoming vector is v16i8 because that's the only way we can load
1601 /// constant vectors. Thus, we test to see if the upper and lower bytes are the
1603 SDValue SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
1605 if (ConstantSDNode *CN = getVecImm(N)) {
1606 int Value = (int) CN->getZExtValue();
1607 if (ValueType == MVT::i16
1608 && Value <= 0xffff /* truncated from uint64_t */
1609 && ((short) Value >> 8) == ((short) Value & 0xff))
1610 return DAG.getTargetConstant(Value & 0xff, ValueType);
1611 else if (ValueType == MVT::i8
1612 && (Value & 0xff) == Value)
1613 return DAG.getTargetConstant(Value, ValueType);
1619 /// get_ILHUvec_imm - Test if this vector is a vector filled with the same value
1620 /// and the value fits into a signed 16-bit constant, and if so, return the
1622 SDValue SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
1624 if (ConstantSDNode *CN = getVecImm(N)) {
1625 uint64_t Value = CN->getZExtValue();
1626 if ((ValueType == MVT::i32
1627 && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
1628 || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
1629 return DAG.getTargetConstant(Value >> 16, ValueType);
1635 /// get_v4i32_imm - Catch-all for general 32-bit constant vectors
1636 SDValue SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) {
1637 if (ConstantSDNode *CN = getVecImm(N)) {
1638 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i32);
1644 /// get_v4i32_imm - Catch-all for general 64-bit constant vectors
1645 SDValue SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) {
1646 if (ConstantSDNode *CN = getVecImm(N)) {
1647 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i64);
1653 //! Lower a BUILD_VECTOR instruction creatively:
1655 LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
1656 EVT VT = Op.getValueType();
1657 EVT EltVT = VT.getVectorElementType();
1658 DebugLoc dl = Op.getDebugLoc();
1659 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(Op.getNode());
1660 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerBUILD_VECTOR");
1661 unsigned minSplatBits = EltVT.getSizeInBits();
1663 if (minSplatBits < 16)
1666 APInt APSplatBits, APSplatUndef;
1667 unsigned SplatBitSize;
1670 if (!BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
1671 HasAnyUndefs, minSplatBits)
1672 || minSplatBits < SplatBitSize)
1673 return SDValue(); // Wasn't a constant vector or splat exceeded min
1675 uint64_t SplatBits = APSplatBits.getZExtValue();
1677 switch (VT.getSimpleVT().SimpleTy) {
1679 report_fatal_error("CellSPU: Unhandled VT in LowerBUILD_VECTOR, VT = " +
1680 Twine(VT.getEVTString()));
1683 uint32_t Value32 = uint32_t(SplatBits);
1684 assert(SplatBitSize == 32
1685 && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
1686 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1687 SDValue T = DAG.getConstant(Value32, MVT::i32);
1688 return DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,
1689 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
1693 uint64_t f64val = uint64_t(SplatBits);
1694 assert(SplatBitSize == 64
1695 && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
1696 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1697 SDValue T = DAG.getConstant(f64val, MVT::i64);
1698 return DAG.getNode(ISD::BITCAST, dl, MVT::v2f64,
1699 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
1703 // 8-bit constants have to be expanded to 16-bits
1704 unsigned short Value16 = SplatBits /* | (SplatBits << 8) */;
1705 SmallVector<SDValue, 8> Ops;
1707 Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
1708 return DAG.getNode(ISD::BITCAST, dl, VT,
1709 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
1712 unsigned short Value16 = SplatBits;
1713 SDValue T = DAG.getConstant(Value16, EltVT);
1714 SmallVector<SDValue, 8> Ops;
1717 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
1720 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1721 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T);
1724 return SPU::LowerV2I64Splat(VT, DAG, SplatBits, dl);
1734 SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
1736 uint32_t upper = uint32_t(SplatVal >> 32);
1737 uint32_t lower = uint32_t(SplatVal);
1739 if (upper == lower) {
1740 // Magic constant that can be matched by IL, ILA, et. al.
1741 SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
1742 return DAG.getNode(ISD::BITCAST, dl, OpVT,
1743 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1744 Val, Val, Val, Val));
1746 bool upper_special, lower_special;
1748 // NOTE: This code creates common-case shuffle masks that can be easily
1749 // detected as common expressions. It is not attempting to create highly
1750 // specialized masks to replace any and all 0's, 0xff's and 0x80's.
1752 // Detect if the upper or lower half is a special shuffle mask pattern:
1753 upper_special = (upper == 0 || upper == 0xffffffff || upper == 0x80000000);
1754 lower_special = (lower == 0 || lower == 0xffffffff || lower == 0x80000000);
1756 // Both upper and lower are special, lower to a constant pool load:
1757 if (lower_special && upper_special) {
1758 SDValue UpperVal = DAG.getConstant(upper, MVT::i32);
1759 SDValue LowerVal = DAG.getConstant(lower, MVT::i32);
1760 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1761 UpperVal, LowerVal, UpperVal, LowerVal);
1762 return DAG.getNode(ISD::BITCAST, dl, OpVT, BV);
1767 SmallVector<SDValue, 16> ShufBytes;
1770 // Create lower vector if not a special pattern
1771 if (!lower_special) {
1772 SDValue LO32C = DAG.getConstant(lower, MVT::i32);
1773 LO32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
1774 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1775 LO32C, LO32C, LO32C, LO32C));
1778 // Create upper vector if not a special pattern
1779 if (!upper_special) {
1780 SDValue HI32C = DAG.getConstant(upper, MVT::i32);
1781 HI32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
1782 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1783 HI32C, HI32C, HI32C, HI32C));
1786 // If either upper or lower are special, then the two input operands are
1787 // the same (basically, one of them is a "don't care")
1793 for (int i = 0; i < 4; ++i) {
1795 for (int j = 0; j < 4; ++j) {
1797 bool process_upper, process_lower;
1799 process_upper = (upper_special && (i & 1) == 0);
1800 process_lower = (lower_special && (i & 1) == 1);
1802 if (process_upper || process_lower) {
1803 if ((process_upper && upper == 0)
1804 || (process_lower && lower == 0))
1806 else if ((process_upper && upper == 0xffffffff)
1807 || (process_lower && lower == 0xffffffff))
1809 else if ((process_upper && upper == 0x80000000)
1810 || (process_lower && lower == 0x80000000))
1811 val |= (j == 0 ? 0xe0 : 0x80);
1813 val |= i * 4 + j + ((i & 1) * 16);
1816 ShufBytes.push_back(DAG.getConstant(val, MVT::i32));
1819 return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32,
1820 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1821 &ShufBytes[0], ShufBytes.size()));
1825 /// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3) to something on
1826 /// which the Cell can operate. The code inspects V3 to ascertain whether the
1827 /// permutation vector, V3, is monotonically increasing with one "exception"
1828 /// element, e.g., (0, 1, _, 3). If this is the case, then generate a
1829 /// SHUFFLE_MASK synthetic instruction. Otherwise, spill V3 to the constant pool.
1830 /// In either case, the net result is going to eventually invoke SHUFB to
1831 /// permute/shuffle the bytes from V1 and V2.
1833 /// SHUFFLE_MASK is eventually selected as one of the C*D instructions, generate
1834 /// control word for byte/halfword/word insertion. This takes care of a single
1835 /// element move from V2 into V1.
1837 /// SPUISD::SHUFB is eventually selected as Cell's <i>shufb</i> instructions.
1838 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
1839 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
1840 SDValue V1 = Op.getOperand(0);
1841 SDValue V2 = Op.getOperand(1);
1842 DebugLoc dl = Op.getDebugLoc();
1844 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1846 // If we have a single element being moved from V1 to V2, this can be handled
1847 // using the C*[DX] compute mask instructions, but the vector elements have
1848 // to be monotonically increasing with one exception element, and the source
1849 // slot of the element to move must be the same as the destination.
1850 EVT VecVT = V1.getValueType();
1851 EVT EltVT = VecVT.getVectorElementType();
1852 unsigned EltsFromV2 = 0;
1853 unsigned V2EltOffset = 0;
1854 unsigned V2EltIdx0 = 0;
1855 unsigned CurrElt = 0;
1856 unsigned MaxElts = VecVT.getVectorNumElements();
1857 unsigned PrevElt = 0;
1858 bool monotonic = true;
1861 EVT maskVT; // which of the c?d instructions to use
1863 if (EltVT == MVT::i8) {
1865 maskVT = MVT::v16i8;
1866 } else if (EltVT == MVT::i16) {
1868 maskVT = MVT::v8i16;
1869 } else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
1871 maskVT = MVT::v4i32;
1872 } else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
1874 maskVT = MVT::v2i64;
1876 llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
1878 for (unsigned i = 0; i != MaxElts; ++i) {
1879 if (SVN->getMaskElt(i) < 0)
1882 unsigned SrcElt = SVN->getMaskElt(i);
1885 if (SrcElt >= V2EltIdx0) {
1886 // TODO: optimize for the monotonic case when several consecutive
1887 // elements are taken form V2. Do we ever get such a case?
1888 if (EltsFromV2 == 0 && CurrElt == (SrcElt - V2EltIdx0))
1889 V2EltOffset = (SrcElt - V2EltIdx0) * (EltVT.getSizeInBits()/8);
1893 } else if (CurrElt != SrcElt) {
1901 if (PrevElt > 0 && SrcElt < MaxElts) {
1902 if ((PrevElt == SrcElt - 1)
1903 || (PrevElt == MaxElts - 1 && SrcElt == 0)) {
1908 } else if (i == 0 || (PrevElt==0 && SrcElt==1)) {
1909 // First time or after a "wrap around"
1913 // This isn't a rotation, takes elements from vector 2
1919 if (EltsFromV2 == 1 && monotonic) {
1920 // Compute mask and shuffle
1921 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1923 // As SHUFFLE_MASK becomes a c?d instruction, feed it an address
1924 // R1 ($sp) is used here only as it is guaranteed to have last bits zero
1925 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
1926 DAG.getRegister(SPU::R1, PtrVT),
1927 DAG.getConstant(V2EltOffset, MVT::i32));
1928 SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
1931 // Use shuffle mask in SHUFB synthetic instruction:
1932 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
1934 } else if (rotate) {
1937 rotamt *= EltVT.getSizeInBits()/8;
1938 return DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, V1.getValueType(),
1939 V1, DAG.getConstant(rotamt, MVT::i16));
1941 // Convert the SHUFFLE_VECTOR mask's input element units to the
1943 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
1945 SmallVector<SDValue, 16> ResultMask;
1946 for (unsigned i = 0, e = MaxElts; i != e; ++i) {
1947 unsigned SrcElt = SVN->getMaskElt(i) < 0 ? 0 : SVN->getMaskElt(i);
1949 for (unsigned j = 0; j < BytesPerElement; ++j)
1950 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,MVT::i8));
1952 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
1953 &ResultMask[0], ResultMask.size());
1954 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask);
1958 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
1959 SDValue Op0 = Op.getOperand(0); // Op0 = the scalar
1960 DebugLoc dl = Op.getDebugLoc();
1962 if (Op0.getNode()->getOpcode() == ISD::Constant) {
1963 // For a constant, build the appropriate constant vector, which will
1964 // eventually simplify to a vector register load.
1966 ConstantSDNode *CN = cast<ConstantSDNode>(Op0.getNode());
1967 SmallVector<SDValue, 16> ConstVecValues;
1971 // Create a constant vector:
1972 switch (Op.getValueType().getSimpleVT().SimpleTy) {
1973 default: llvm_unreachable("Unexpected constant value type in "
1974 "LowerSCALAR_TO_VECTOR");
1975 case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
1976 case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
1977 case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
1978 case MVT::v4f32: n_copies = 4; VT = MVT::f32; break;
1979 case MVT::v2i64: n_copies = 2; VT = MVT::i64; break;
1980 case MVT::v2f64: n_copies = 2; VT = MVT::f64; break;
1983 SDValue CValue = DAG.getConstant(CN->getZExtValue(), VT);
1984 for (size_t j = 0; j < n_copies; ++j)
1985 ConstVecValues.push_back(CValue);
1987 return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(),
1988 &ConstVecValues[0], ConstVecValues.size());
1990 // Otherwise, copy the value from one register to another:
1991 switch (Op0.getValueType().getSimpleVT().SimpleTy) {
1992 default: llvm_unreachable("Unexpected value type in LowerSCALAR_TO_VECTOR");
1999 return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0);
2006 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2007 EVT VT = Op.getValueType();
2008 SDValue N = Op.getOperand(0);
2009 SDValue Elt = Op.getOperand(1);
2010 DebugLoc dl = Op.getDebugLoc();
2013 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
2014 // Constant argument:
2015 int EltNo = (int) C->getZExtValue();
2018 if (VT == MVT::i8 && EltNo >= 16)
2019 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i8 extraction slot > 15");
2020 else if (VT == MVT::i16 && EltNo >= 8)
2021 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i16 extraction slot > 7");
2022 else if (VT == MVT::i32 && EltNo >= 4)
2023 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i32 extraction slot > 4");
2024 else if (VT == MVT::i64 && EltNo >= 2)
2025 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i64 extraction slot > 2");
2027 if (EltNo == 0 && (VT == MVT::i32 || VT == MVT::i64)) {
2028 // i32 and i64: Element 0 is the preferred slot
2029 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, N);
2032 // Need to generate shuffle mask and extract:
2033 int prefslot_begin = -1, prefslot_end = -1;
2034 int elt_byte = EltNo * VT.getSizeInBits() / 8;
2036 switch (VT.getSimpleVT().SimpleTy) {
2038 assert(false && "Invalid value type!");
2040 prefslot_begin = prefslot_end = 3;
2044 prefslot_begin = 2; prefslot_end = 3;
2049 prefslot_begin = 0; prefslot_end = 3;
2054 prefslot_begin = 0; prefslot_end = 7;
2059 assert(prefslot_begin != -1 && prefslot_end != -1 &&
2060 "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
2062 unsigned int ShufBytes[16] = {
2063 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2065 for (int i = 0; i < 16; ++i) {
2066 // zero fill uppper part of preferred slot, don't care about the
2068 unsigned int mask_val;
2069 if (i <= prefslot_end) {
2071 ((i < prefslot_begin)
2073 : elt_byte + (i - prefslot_begin));
2075 ShufBytes[i] = mask_val;
2077 ShufBytes[i] = ShufBytes[i % (prefslot_end + 1)];
2080 SDValue ShufMask[4];
2081 for (unsigned i = 0; i < sizeof(ShufMask)/sizeof(ShufMask[0]); ++i) {
2082 unsigned bidx = i * 4;
2083 unsigned int bits = ((ShufBytes[bidx] << 24) |
2084 (ShufBytes[bidx+1] << 16) |
2085 (ShufBytes[bidx+2] << 8) |
2087 ShufMask[i] = DAG.getConstant(bits, MVT::i32);
2090 SDValue ShufMaskVec =
2091 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2092 &ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0]));
2094 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2095 DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(),
2096 N, N, ShufMaskVec));
2098 // Variable index: Rotate the requested element into slot 0, then replicate
2099 // slot 0 across the vector
2100 EVT VecVT = N.getValueType();
2101 if (!VecVT.isSimple() || !VecVT.isVector()) {
2102 report_fatal_error("LowerEXTRACT_VECTOR_ELT: Must have a simple, 128-bit"
2106 // Make life easier by making sure the index is zero-extended to i32
2107 if (Elt.getValueType() != MVT::i32)
2108 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Elt);
2110 // Scale the index to a bit/byte shift quantity
2112 APInt(32, uint64_t(16 / N.getValueType().getVectorNumElements()), false);
2113 unsigned scaleShift = scaleFactor.logBase2();
2116 if (scaleShift > 0) {
2117 // Scale the shift factor:
2118 Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
2119 DAG.getConstant(scaleShift, MVT::i32));
2122 vecShift = DAG.getNode(SPUISD::SHL_BYTES, dl, VecVT, N, Elt);
2124 // Replicate the bytes starting at byte 0 across the entire vector (for
2125 // consistency with the notion of a unified register set)
2128 switch (VT.getSimpleVT().SimpleTy) {
2130 report_fatal_error("LowerEXTRACT_VECTOR_ELT(varable): Unhandled vector"
2134 SDValue factor = DAG.getConstant(0x00000000, MVT::i32);
2135 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2136 factor, factor, factor, factor);
2140 SDValue factor = DAG.getConstant(0x00010001, MVT::i32);
2141 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2142 factor, factor, factor, factor);
2147 SDValue factor = DAG.getConstant(0x00010203, MVT::i32);
2148 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2149 factor, factor, factor, factor);
2154 SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32);
2155 SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32);
2156 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2157 loFactor, hiFactor, loFactor, hiFactor);
2162 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2163 DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2164 vecShift, vecShift, replicate));
2170 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2171 SDValue VecOp = Op.getOperand(0);
2172 SDValue ValOp = Op.getOperand(1);
2173 SDValue IdxOp = Op.getOperand(2);
2174 DebugLoc dl = Op.getDebugLoc();
2175 EVT VT = Op.getValueType();
2176 EVT eltVT = ValOp.getValueType();
2178 // use 0 when the lane to insert to is 'undef'
2180 if (IdxOp.getOpcode() != ISD::UNDEF) {
2181 ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
2182 assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
2183 Offset = (CN->getSExtValue()) * eltVT.getSizeInBits()/8;
2186 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2187 // Use $sp ($1) because it's always 16-byte aligned and it's available:
2188 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
2189 DAG.getRegister(SPU::R1, PtrVT),
2190 DAG.getConstant(Offset, PtrVT));
2191 // widen the mask when dealing with half vectors
2192 EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
2193 128/ VT.getVectorElementType().getSizeInBits());
2194 SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, maskVT, Pointer);
2197 DAG.getNode(SPUISD::SHUFB, dl, VT,
2198 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
2200 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ShufMask));
2205 static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
2206 const TargetLowering &TLI)
2208 SDValue N0 = Op.getOperand(0); // Everything has at least one operand
2209 DebugLoc dl = Op.getDebugLoc();
2210 EVT ShiftVT = TLI.getShiftAmountTy(N0.getValueType());
2212 assert(Op.getValueType() == MVT::i8);
2215 llvm_unreachable("Unhandled i8 math operator");
2219 // 8-bit addition: Promote the arguments up to 16-bits and truncate
2221 SDValue N1 = Op.getOperand(1);
2222 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2223 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2224 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2225 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2230 // 8-bit subtraction: Promote the arguments up to 16-bits and truncate
2232 SDValue N1 = Op.getOperand(1);
2233 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2234 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2235 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2236 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2240 SDValue N1 = Op.getOperand(1);
2241 EVT N1VT = N1.getValueType();
2243 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2244 if (!N1VT.bitsEq(ShiftVT)) {
2245 unsigned N1Opc = N1.getValueType().bitsLT(ShiftVT)
2248 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2251 // Replicate lower 8-bits into upper 8:
2253 DAG.getNode(ISD::OR, dl, MVT::i16, N0,
2254 DAG.getNode(ISD::SHL, dl, MVT::i16,
2255 N0, DAG.getConstant(8, MVT::i32)));
2257 // Truncate back down to i8
2258 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2259 DAG.getNode(Opc, dl, MVT::i16, ExpandArg, N1));
2263 SDValue N1 = Op.getOperand(1);
2264 EVT N1VT = N1.getValueType();
2266 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2267 if (!N1VT.bitsEq(ShiftVT)) {
2268 unsigned N1Opc = ISD::ZERO_EXTEND;
2270 if (N1.getValueType().bitsGT(ShiftVT))
2271 N1Opc = ISD::TRUNCATE;
2273 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2276 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2277 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2280 SDValue N1 = Op.getOperand(1);
2281 EVT N1VT = N1.getValueType();
2283 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2284 if (!N1VT.bitsEq(ShiftVT)) {
2285 unsigned N1Opc = ISD::SIGN_EXTEND;
2287 if (N1VT.bitsGT(ShiftVT))
2288 N1Opc = ISD::TRUNCATE;
2289 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2292 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2293 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2296 SDValue N1 = Op.getOperand(1);
2298 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2299 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2300 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2301 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2309 //! Lower byte immediate operations for v16i8 vectors:
2311 LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
2314 EVT VT = Op.getValueType();
2315 DebugLoc dl = Op.getDebugLoc();
2317 ConstVec = Op.getOperand(0);
2318 Arg = Op.getOperand(1);
2319 if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
2320 if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
2321 ConstVec = ConstVec.getOperand(0);
2323 ConstVec = Op.getOperand(1);
2324 Arg = Op.getOperand(0);
2325 if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
2326 ConstVec = ConstVec.getOperand(0);
2331 if (ConstVec.getNode()->getOpcode() == ISD::BUILD_VECTOR) {
2332 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(ConstVec.getNode());
2333 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerByteImmed");
2335 APInt APSplatBits, APSplatUndef;
2336 unsigned SplatBitSize;
2338 unsigned minSplatBits = VT.getVectorElementType().getSizeInBits();
2340 if (BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
2341 HasAnyUndefs, minSplatBits)
2342 && minSplatBits <= SplatBitSize) {
2343 uint64_t SplatBits = APSplatBits.getZExtValue();
2344 SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
2346 SmallVector<SDValue, 16> tcVec;
2347 tcVec.assign(16, tc);
2348 return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg,
2349 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &tcVec[0], tcVec.size()));
2353 // These operations (AND, OR, XOR) are legal, they just couldn't be custom
2354 // lowered. Return the operation, rather than a null SDValue.
2358 //! Custom lowering for CTPOP (count population)
2360 Custom lowering code that counts the number ones in the input
2361 operand. SPU has such an instruction, but it counts the number of
2362 ones per byte, which then have to be accumulated.
2364 static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
2365 EVT VT = Op.getValueType();
2366 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
2367 VT, (128 / VT.getSizeInBits()));
2368 DebugLoc dl = Op.getDebugLoc();
2370 switch (VT.getSimpleVT().SimpleTy) {
2372 assert(false && "Invalid value type!");
2374 SDValue N = Op.getOperand(0);
2375 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2377 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2378 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2380 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, CNTB, Elt0);
2384 MachineFunction &MF = DAG.getMachineFunction();
2385 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2387 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R16CRegClass);
2389 SDValue N = Op.getOperand(0);
2390 SDValue Elt0 = DAG.getConstant(0, MVT::i16);
2391 SDValue Mask0 = DAG.getConstant(0x0f, MVT::i16);
2392 SDValue Shift1 = DAG.getConstant(8, MVT::i32);
2394 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2395 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2397 // CNTB_result becomes the chain to which all of the virtual registers
2398 // CNTB_reg, SUM1_reg become associated:
2399 SDValue CNTB_result =
2400 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, CNTB, Elt0);
2402 SDValue CNTB_rescopy =
2403 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2405 SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i16);
2407 return DAG.getNode(ISD::AND, dl, MVT::i16,
2408 DAG.getNode(ISD::ADD, dl, MVT::i16,
2409 DAG.getNode(ISD::SRL, dl, MVT::i16,
2416 MachineFunction &MF = DAG.getMachineFunction();
2417 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2419 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2420 unsigned SUM1_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2422 SDValue N = Op.getOperand(0);
2423 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2424 SDValue Mask0 = DAG.getConstant(0xff, MVT::i32);
2425 SDValue Shift1 = DAG.getConstant(16, MVT::i32);
2426 SDValue Shift2 = DAG.getConstant(8, MVT::i32);
2428 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2429 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2431 // CNTB_result becomes the chain to which all of the virtual registers
2432 // CNTB_reg, SUM1_reg become associated:
2433 SDValue CNTB_result =
2434 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, CNTB, Elt0);
2436 SDValue CNTB_rescopy =
2437 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2440 DAG.getNode(ISD::SRL, dl, MVT::i32,
2441 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32),
2445 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp1,
2446 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32));
2448 SDValue Sum1_rescopy =
2449 DAG.getCopyToReg(CNTB_result, dl, SUM1_reg, Sum1);
2452 DAG.getNode(ISD::SRL, dl, MVT::i32,
2453 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32),
2456 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp2,
2457 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32));
2459 return DAG.getNode(ISD::AND, dl, MVT::i32, Sum2, Mask0);
2469 //! Lower ISD::FP_TO_SINT, ISD::FP_TO_UINT for i32
2471 f32->i32 passes through unchanged, whereas f64->i32 expands to a libcall.
2472 All conversions to i64 are expanded to a libcall.
2474 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2475 const SPUTargetLowering &TLI) {
2476 EVT OpVT = Op.getValueType();
2477 SDValue Op0 = Op.getOperand(0);
2478 EVT Op0VT = Op0.getValueType();
2480 if ((OpVT == MVT::i32 && Op0VT == MVT::f64)
2481 || OpVT == MVT::i64) {
2482 // Convert f32 / f64 to i32 / i64 via libcall.
2484 (Op.getOpcode() == ISD::FP_TO_SINT)
2485 ? RTLIB::getFPTOSINT(Op0VT, OpVT)
2486 : RTLIB::getFPTOUINT(Op0VT, OpVT);
2487 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd fp-to-int conversion!");
2489 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2495 //! Lower ISD::SINT_TO_FP, ISD::UINT_TO_FP for i32
2497 i32->f32 passes through unchanged, whereas i32->f64 is expanded to a libcall.
2498 All conversions from i64 are expanded to a libcall.
2500 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2501 const SPUTargetLowering &TLI) {
2502 EVT OpVT = Op.getValueType();
2503 SDValue Op0 = Op.getOperand(0);
2504 EVT Op0VT = Op0.getValueType();
2506 if ((OpVT == MVT::f64 && Op0VT == MVT::i32)
2507 || Op0VT == MVT::i64) {
2508 // Convert i32, i64 to f64 via libcall:
2510 (Op.getOpcode() == ISD::SINT_TO_FP)
2511 ? RTLIB::getSINTTOFP(Op0VT, OpVT)
2512 : RTLIB::getUINTTOFP(Op0VT, OpVT);
2513 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd int-to-fp conversion!");
2515 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2521 //! Lower ISD::SETCC
2523 This handles MVT::f64 (double floating point) condition lowering
2525 static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
2526 const TargetLowering &TLI) {
2527 CondCodeSDNode *CC = dyn_cast<CondCodeSDNode>(Op.getOperand(2));
2528 DebugLoc dl = Op.getDebugLoc();
2529 assert(CC != 0 && "LowerSETCC: CondCodeSDNode should not be null here!\n");
2531 SDValue lhs = Op.getOperand(0);
2532 SDValue rhs = Op.getOperand(1);
2533 EVT lhsVT = lhs.getValueType();
2534 assert(lhsVT == MVT::f64 && "LowerSETCC: type other than MVT::64\n");
2536 EVT ccResultVT = TLI.getSetCCResultType(lhs.getValueType());
2537 APInt ccResultOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2538 EVT IntVT(MVT::i64);
2540 // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
2541 // selected to a NOP:
2542 SDValue i64lhs = DAG.getNode(ISD::BITCAST, dl, IntVT, lhs);
2544 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2545 DAG.getNode(ISD::SRL, dl, IntVT,
2546 i64lhs, DAG.getConstant(32, MVT::i32)));
2547 SDValue lhsHi32abs =
2548 DAG.getNode(ISD::AND, dl, MVT::i32,
2549 lhsHi32, DAG.getConstant(0x7fffffff, MVT::i32));
2551 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, i64lhs);
2553 // SETO and SETUO only use the lhs operand:
2554 if (CC->get() == ISD::SETO) {
2555 // Evaluates to true if Op0 is not [SQ]NaN - lowers to the inverse of
2557 APInt ccResultAllOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2558 return DAG.getNode(ISD::XOR, dl, ccResultVT,
2559 DAG.getSetCC(dl, ccResultVT,
2560 lhs, DAG.getConstantFP(0.0, lhsVT),
2562 DAG.getConstant(ccResultAllOnes, ccResultVT));
2563 } else if (CC->get() == ISD::SETUO) {
2564 // Evaluates to true if Op0 is [SQ]NaN
2565 return DAG.getNode(ISD::AND, dl, ccResultVT,
2566 DAG.getSetCC(dl, ccResultVT,
2568 DAG.getConstant(0x7ff00000, MVT::i32),
2570 DAG.getSetCC(dl, ccResultVT,
2572 DAG.getConstant(0, MVT::i32),
2576 SDValue i64rhs = DAG.getNode(ISD::BITCAST, dl, IntVT, rhs);
2578 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2579 DAG.getNode(ISD::SRL, dl, IntVT,
2580 i64rhs, DAG.getConstant(32, MVT::i32)));
2582 // If a value is negative, subtract from the sign magnitude constant:
2583 SDValue signMag2TC = DAG.getConstant(0x8000000000000000ULL, IntVT);
2585 // Convert the sign-magnitude representation into 2's complement:
2586 SDValue lhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2587 lhsHi32, DAG.getConstant(31, MVT::i32));
2588 SDValue lhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64lhs);
2590 DAG.getNode(ISD::SELECT, dl, IntVT,
2591 lhsSelectMask, lhsSignMag2TC, i64lhs);
2593 SDValue rhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2594 rhsHi32, DAG.getConstant(31, MVT::i32));
2595 SDValue rhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64rhs);
2597 DAG.getNode(ISD::SELECT, dl, IntVT,
2598 rhsSelectMask, rhsSignMag2TC, i64rhs);
2602 switch (CC->get()) {
2605 compareOp = ISD::SETEQ; break;
2608 compareOp = ISD::SETGT; break;
2611 compareOp = ISD::SETGE; break;
2614 compareOp = ISD::SETLT; break;
2617 compareOp = ISD::SETLE; break;
2620 compareOp = ISD::SETNE; break;
2622 report_fatal_error("CellSPU ISel Select: unimplemented f64 condition");
2626 DAG.getSetCC(dl, ccResultVT, lhsSelect, rhsSelect,
2627 (ISD::CondCode) compareOp);
2629 if ((CC->get() & 0x8) == 0) {
2630 // Ordered comparison:
2631 SDValue lhsNaN = DAG.getSetCC(dl, ccResultVT,
2632 lhs, DAG.getConstantFP(0.0, MVT::f64),
2634 SDValue rhsNaN = DAG.getSetCC(dl, ccResultVT,
2635 rhs, DAG.getConstantFP(0.0, MVT::f64),
2637 SDValue ordered = DAG.getNode(ISD::AND, dl, ccResultVT, lhsNaN, rhsNaN);
2639 result = DAG.getNode(ISD::AND, dl, ccResultVT, ordered, result);
2645 //! Lower ISD::SELECT_CC
2647 ISD::SELECT_CC can (generally) be implemented directly on the SPU using the
2650 \note Need to revisit this in the future: if the code path through the true
2651 and false value computations is longer than the latency of a branch (6
2652 cycles), then it would be more advantageous to branch and insert a new basic
2653 block and branch on the condition. However, this code does not make that
2654 assumption, given the simplisitc uses so far.
2657 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2658 const TargetLowering &TLI) {
2659 EVT VT = Op.getValueType();
2660 SDValue lhs = Op.getOperand(0);
2661 SDValue rhs = Op.getOperand(1);
2662 SDValue trueval = Op.getOperand(2);
2663 SDValue falseval = Op.getOperand(3);
2664 SDValue condition = Op.getOperand(4);
2665 DebugLoc dl = Op.getDebugLoc();
2667 // NOTE: SELB's arguments: $rA, $rB, $mask
2669 // SELB selects bits from $rA where bits in $mask are 0, bits from $rB
2670 // where bits in $mask are 1. CCond will be inverted, having 1s where the
2671 // condition was true and 0s where the condition was false. Hence, the
2672 // arguments to SELB get reversed.
2674 // Note: Really should be ISD::SELECT instead of SPUISD::SELB, but LLVM's
2675 // legalizer insists on combining SETCC/SELECT into SELECT_CC, so we end up
2676 // with another "cannot select select_cc" assert:
2678 SDValue compare = DAG.getNode(ISD::SETCC, dl,
2679 TLI.getSetCCResultType(Op.getValueType()),
2680 lhs, rhs, condition);
2681 return DAG.getNode(SPUISD::SELB, dl, VT, falseval, trueval, compare);
2684 //! Custom lower ISD::TRUNCATE
2685 static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
2687 // Type to truncate to
2688 EVT VT = Op.getValueType();
2689 MVT simpleVT = VT.getSimpleVT();
2690 EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
2691 VT, (128 / VT.getSizeInBits()));
2692 DebugLoc dl = Op.getDebugLoc();
2694 // Type to truncate from
2695 SDValue Op0 = Op.getOperand(0);
2696 EVT Op0VT = Op0.getValueType();
2698 if (Op0VT == MVT::i128 && simpleVT == MVT::i64) {
2699 // Create shuffle mask, least significant doubleword of quadword
2700 unsigned maskHigh = 0x08090a0b;
2701 unsigned maskLow = 0x0c0d0e0f;
2702 // Use a shuffle to perform the truncation
2703 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2704 DAG.getConstant(maskHigh, MVT::i32),
2705 DAG.getConstant(maskLow, MVT::i32),
2706 DAG.getConstant(maskHigh, MVT::i32),
2707 DAG.getConstant(maskLow, MVT::i32));
2709 SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2710 Op0, Op0, shufMask);
2712 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, truncShuffle);
2715 return SDValue(); // Leave the truncate unmolested
2719 * Emit the instruction sequence for i64/i32 -> i128 sign extend. The basic
2720 * algorithm is to duplicate the sign bit using rotmai to generate at
2721 * least one byte full of sign bits. Then propagate the "sign-byte" into
2722 * the leftmost words and the i64/i32 into the rightmost words using shufb.
2724 * @param Op The sext operand
2725 * @param DAG The current DAG
2726 * @return The SDValue with the entire instruction sequence
2728 static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
2730 DebugLoc dl = Op.getDebugLoc();
2732 // Type to extend to
2733 MVT OpVT = Op.getValueType().getSimpleVT();
2735 // Type to extend from
2736 SDValue Op0 = Op.getOperand(0);
2737 MVT Op0VT = Op0.getValueType().getSimpleVT();
2739 // extend i8 & i16 via i32
2740 if (Op0VT == MVT::i8 || Op0VT == MVT::i16) {
2741 Op0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Op0);
2745 // The type to extend to needs to be a i128 and
2746 // the type to extend from needs to be i64 or i32.
2747 assert((OpVT == MVT::i128 && (Op0VT == MVT::i64 || Op0VT == MVT::i32)) &&
2748 "LowerSIGN_EXTEND: input and/or output operand have wrong size");
2751 // Create shuffle mask
2752 unsigned mask1 = 0x10101010; // byte 0 - 3 and 4 - 7
2753 unsigned mask2 = Op0VT == MVT::i64 ? 0x00010203 : 0x10101010; // byte 8 - 11
2754 unsigned mask3 = Op0VT == MVT::i64 ? 0x04050607 : 0x00010203; // byte 12 - 15
2755 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2756 DAG.getConstant(mask1, MVT::i32),
2757 DAG.getConstant(mask1, MVT::i32),
2758 DAG.getConstant(mask2, MVT::i32),
2759 DAG.getConstant(mask3, MVT::i32));
2761 // Word wise arithmetic right shift to generate at least one byte
2762 // that contains sign bits.
2763 MVT mvt = Op0VT == MVT::i64 ? MVT::v2i64 : MVT::v4i32;
2764 SDValue sraVal = DAG.getNode(ISD::SRA,
2767 DAG.getNode(SPUISD::PREFSLOT2VEC, dl, mvt, Op0, Op0),
2768 DAG.getConstant(31, MVT::i32));
2770 // reinterpret as a i128 (SHUFB requires it). This gets lowered away.
2771 SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
2773 DAG.getTargetConstant(
2774 SPU::GPRCRegClass.getID(),
2776 // Shuffle bytes - Copy the sign bits into the upper 64 bits
2777 // and the input value into the lower 64 bits.
2778 SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
2779 extended, sraVal, shufMask);
2780 return DAG.getNode(ISD::BITCAST, dl, MVT::i128, extShuffle);
2783 //! Custom (target-specific) lowering entry point
2785 This is where LLVM's DAG selection process calls to do target-specific
2789 SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
2791 unsigned Opc = (unsigned) Op.getOpcode();
2792 EVT VT = Op.getValueType();
2797 errs() << "SPUTargetLowering::LowerOperation(): need to lower this!\n";
2798 errs() << "Op.getOpcode() = " << Opc << "\n";
2799 errs() << "*Op.getNode():\n";
2800 Op.getNode()->dump();
2802 llvm_unreachable(0);
2808 return LowerLOAD(Op, DAG, SPUTM.getSubtargetImpl());
2810 return LowerSTORE(Op, DAG, SPUTM.getSubtargetImpl());
2811 case ISD::ConstantPool:
2812 return LowerConstantPool(Op, DAG, SPUTM.getSubtargetImpl());
2813 case ISD::GlobalAddress:
2814 return LowerGlobalAddress(Op, DAG, SPUTM.getSubtargetImpl());
2815 case ISD::JumpTable:
2816 return LowerJumpTable(Op, DAG, SPUTM.getSubtargetImpl());
2817 case ISD::ConstantFP:
2818 return LowerConstantFP(Op, DAG);
2820 // i8, i64 math ops:
2829 return LowerI8Math(Op, DAG, Opc, *this);
2833 case ISD::FP_TO_SINT:
2834 case ISD::FP_TO_UINT:
2835 return LowerFP_TO_INT(Op, DAG, *this);
2837 case ISD::SINT_TO_FP:
2838 case ISD::UINT_TO_FP:
2839 return LowerINT_TO_FP(Op, DAG, *this);
2841 // Vector-related lowering.
2842 case ISD::BUILD_VECTOR:
2843 return LowerBUILD_VECTOR(Op, DAG);
2844 case ISD::SCALAR_TO_VECTOR:
2845 return LowerSCALAR_TO_VECTOR(Op, DAG);
2846 case ISD::VECTOR_SHUFFLE:
2847 return LowerVECTOR_SHUFFLE(Op, DAG);
2848 case ISD::EXTRACT_VECTOR_ELT:
2849 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2850 case ISD::INSERT_VECTOR_ELT:
2851 return LowerINSERT_VECTOR_ELT(Op, DAG);
2853 // Look for ANDBI, ORBI and XORBI opportunities and lower appropriately:
2857 return LowerByteImmed(Op, DAG);
2859 // Vector and i8 multiply:
2862 return LowerI8Math(Op, DAG, Opc, *this);
2865 return LowerCTPOP(Op, DAG);
2867 case ISD::SELECT_CC:
2868 return LowerSELECT_CC(Op, DAG, *this);
2871 return LowerSETCC(Op, DAG, *this);
2874 return LowerTRUNCATE(Op, DAG);
2876 case ISD::SIGN_EXTEND:
2877 return LowerSIGN_EXTEND(Op, DAG);
2883 void SPUTargetLowering::ReplaceNodeResults(SDNode *N,
2884 SmallVectorImpl<SDValue>&Results,
2885 SelectionDAG &DAG) const
2888 unsigned Opc = (unsigned) N->getOpcode();
2889 EVT OpVT = N->getValueType(0);
2893 errs() << "SPUTargetLowering::ReplaceNodeResults(): need to fix this!\n";
2894 errs() << "Op.getOpcode() = " << Opc << "\n";
2895 errs() << "*Op.getNode():\n";
2903 /* Otherwise, return unchanged */
2906 //===----------------------------------------------------------------------===//
2907 // Target Optimization Hooks
2908 //===----------------------------------------------------------------------===//
2911 SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
2914 TargetMachine &TM = getTargetMachine();
2916 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
2917 SelectionDAG &DAG = DCI.DAG;
2918 SDValue Op0 = N->getOperand(0); // everything has at least one operand
2919 EVT NodeVT = N->getValueType(0); // The node's value type
2920 EVT Op0VT = Op0.getValueType(); // The first operand's result
2921 SDValue Result; // Initially, empty result
2922 DebugLoc dl = N->getDebugLoc();
2924 switch (N->getOpcode()) {
2927 SDValue Op1 = N->getOperand(1);
2929 if (Op0.getOpcode() == SPUISD::IndirectAddr
2930 || Op1.getOpcode() == SPUISD::IndirectAddr) {
2931 // Normalize the operands to reduce repeated code
2932 SDValue IndirectArg = Op0, AddArg = Op1;
2934 if (Op1.getOpcode() == SPUISD::IndirectAddr) {
2939 if (isa<ConstantSDNode>(AddArg)) {
2940 ConstantSDNode *CN0 = cast<ConstantSDNode > (AddArg);
2941 SDValue IndOp1 = IndirectArg.getOperand(1);
2943 if (CN0->isNullValue()) {
2944 // (add (SPUindirect <arg>, <arg>), 0) ->
2945 // (SPUindirect <arg>, <arg>)
2947 #if !defined(NDEBUG)
2948 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2950 << "Replace: (add (SPUindirect <arg>, <arg>), 0)\n"
2951 << "With: (SPUindirect <arg>, <arg>)\n";
2956 } else if (isa<ConstantSDNode>(IndOp1)) {
2957 // (add (SPUindirect <arg>, <const>), <const>) ->
2958 // (SPUindirect <arg>, <const + const>)
2959 ConstantSDNode *CN1 = cast<ConstantSDNode > (IndOp1);
2960 int64_t combinedConst = CN0->getSExtValue() + CN1->getSExtValue();
2961 SDValue combinedValue = DAG.getConstant(combinedConst, Op0VT);
2963 #if !defined(NDEBUG)
2964 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2966 << "Replace: (add (SPUindirect <arg>, " << CN1->getSExtValue()
2967 << "), " << CN0->getSExtValue() << ")\n"
2968 << "With: (SPUindirect <arg>, "
2969 << combinedConst << ")\n";
2973 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2974 IndirectArg, combinedValue);
2980 case ISD::SIGN_EXTEND:
2981 case ISD::ZERO_EXTEND:
2982 case ISD::ANY_EXTEND: {
2983 if (Op0.getOpcode() == SPUISD::VEC2PREFSLOT && NodeVT == Op0VT) {
2984 // (any_extend (SPUextract_elt0 <arg>)) ->
2985 // (SPUextract_elt0 <arg>)
2986 // Types must match, however...
2987 #if !defined(NDEBUG)
2988 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2989 errs() << "\nReplace: ";
2991 errs() << "\nWith: ";
2992 Op0.getNode()->dump(&DAG);
3001 case SPUISD::IndirectAddr: {
3002 if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) {
3003 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1));
3004 if (CN != 0 && CN->isNullValue()) {
3005 // (SPUindirect (SPUaform <addr>, 0), 0) ->
3006 // (SPUaform <addr>, 0)
3008 DEBUG(errs() << "Replace: ");
3009 DEBUG(N->dump(&DAG));
3010 DEBUG(errs() << "\nWith: ");
3011 DEBUG(Op0.getNode()->dump(&DAG));
3012 DEBUG(errs() << "\n");
3016 } else if (Op0.getOpcode() == ISD::ADD) {
3017 SDValue Op1 = N->getOperand(1);
3018 if (ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(Op1)) {
3019 // (SPUindirect (add <arg>, <arg>), 0) ->
3020 // (SPUindirect <arg>, <arg>)
3021 if (CN1->isNullValue()) {
3023 #if !defined(NDEBUG)
3024 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
3026 << "Replace: (SPUindirect (add <arg>, <arg>), 0)\n"
3027 << "With: (SPUindirect <arg>, <arg>)\n";
3031 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
3032 Op0.getOperand(0), Op0.getOperand(1));
3038 case SPUISD::SHL_BITS:
3039 case SPUISD::SHL_BYTES:
3040 case SPUISD::ROTBYTES_LEFT: {
3041 SDValue Op1 = N->getOperand(1);
3043 // Kill degenerate vector shifts:
3044 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
3045 if (CN->isNullValue()) {
3051 case SPUISD::PREFSLOT2VEC: {
3052 switch (Op0.getOpcode()) {
3055 case ISD::ANY_EXTEND:
3056 case ISD::ZERO_EXTEND:
3057 case ISD::SIGN_EXTEND: {
3058 // (SPUprefslot2vec (any|zero|sign_extend (SPUvec2prefslot <arg>))) ->
3060 // but only if the SPUprefslot2vec and <arg> types match.
3061 SDValue Op00 = Op0.getOperand(0);
3062 if (Op00.getOpcode() == SPUISD::VEC2PREFSLOT) {
3063 SDValue Op000 = Op00.getOperand(0);
3064 if (Op000.getValueType() == NodeVT) {
3070 case SPUISD::VEC2PREFSLOT: {
3071 // (SPUprefslot2vec (SPUvec2prefslot <arg>)) ->
3073 Result = Op0.getOperand(0);
3081 // Otherwise, return unchanged.
3083 if (Result.getNode()) {
3084 DEBUG(errs() << "\nReplace.SPU: ");
3085 DEBUG(N->dump(&DAG));
3086 DEBUG(errs() << "\nWith: ");
3087 DEBUG(Result.getNode()->dump(&DAG));
3088 DEBUG(errs() << "\n");
3095 //===----------------------------------------------------------------------===//
3096 // Inline Assembly Support
3097 //===----------------------------------------------------------------------===//
3099 /// getConstraintType - Given a constraint letter, return the type of
3100 /// constraint it is for this target.
3101 SPUTargetLowering::ConstraintType
3102 SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const {
3103 if (ConstraintLetter.size() == 1) {
3104 switch (ConstraintLetter[0]) {
3111 return C_RegisterClass;
3114 return TargetLowering::getConstraintType(ConstraintLetter);
3117 /// Examine constraint type and operand type and determine a weight value.
3118 /// This object must already have been set up with the operand type
3119 /// and the current alternative constraint selected.
3120 TargetLowering::ConstraintWeight
3121 SPUTargetLowering::getSingleConstraintMatchWeight(
3122 AsmOperandInfo &info, const char *constraint) const {
3123 ConstraintWeight weight = CW_Invalid;
3124 Value *CallOperandVal = info.CallOperandVal;
3125 // If we don't have a value, we can't do a match,
3126 // but allow it at the lowest weight.
3127 if (CallOperandVal == NULL)
3129 // Look at the constraint type.
3130 switch (*constraint) {
3132 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3134 //FIXME: Seems like the supported constraint letters were just copied
3135 // from PPC, as the following doesn't correspond to the GCC docs.
3136 // I'm leaving it so until someone adds the corresponding lowering support.
3143 weight = CW_Register;
3149 std::pair<unsigned, const TargetRegisterClass*>
3150 SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
3153 if (Constraint.size() == 1) {
3154 // GCC RS6000 Constraint Letters
3155 switch (Constraint[0]) {
3159 return std::make_pair(0U, SPU::R64CRegisterClass);
3160 return std::make_pair(0U, SPU::R32CRegisterClass);
3163 return std::make_pair(0U, SPU::R32FPRegisterClass);
3164 else if (VT == MVT::f64)
3165 return std::make_pair(0U, SPU::R64FPRegisterClass);
3168 return std::make_pair(0U, SPU::GPRCRegisterClass);
3172 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
3175 //! Compute used/known bits for a SPU operand
3177 SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
3181 const SelectionDAG &DAG,
3182 unsigned Depth ) const {
3184 const uint64_t uint64_sizebits = sizeof(uint64_t) * CHAR_BIT;
3186 switch (Op.getOpcode()) {
3188 // KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
3194 case SPUISD::PREFSLOT2VEC:
3195 case SPUISD::LDRESULT:
3196 case SPUISD::VEC2PREFSLOT:
3197 case SPUISD::SHLQUAD_L_BITS:
3198 case SPUISD::SHLQUAD_L_BYTES:
3199 case SPUISD::VEC_ROTL:
3200 case SPUISD::VEC_ROTR:
3201 case SPUISD::ROTBYTES_LEFT:
3202 case SPUISD::SELECT_MASK:
3209 SPUTargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
3210 unsigned Depth) const {
3211 switch (Op.getOpcode()) {
3216 EVT VT = Op.getValueType();
3218 if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32) {
3221 return VT.getSizeInBits();
3226 // LowerAsmOperandForConstraint
3228 SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3229 std::string &Constraint,
3230 std::vector<SDValue> &Ops,
3231 SelectionDAG &DAG) const {
3232 // Default, for the time being, to the base class handler
3233 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3236 /// isLegalAddressImmediate - Return true if the integer value can be used
3237 /// as the offset of the target addressing mode.
3238 bool SPUTargetLowering::isLegalAddressImmediate(int64_t V,
3240 // SPU's addresses are 256K:
3241 return (V > -(1 << 18) && V < (1 << 18) - 1);
3244 bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
3249 SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3250 // The SPU target isn't yet aware of offsets.
3254 // can we compare to Imm without writing it into a register?
3255 bool SPUTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
3256 //ceqi, cgti, etc. all take s10 operand
3257 return isInt<10>(Imm);
3261 SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
3264 // A-form: 18bit absolute address.
3265 if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && AM.BaseOffs == 0)
3268 // D-form: reg + 14bit offset
3269 if (AM.BaseGV ==0 && AM.HasBaseReg && AM.Scale == 0 && isInt<14>(AM.BaseOffs))
3273 if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 1 && AM.BaseOffs ==0)