1 //===-- SPUISelLowering.cpp - Cell SPU DAG Lowering Implementation --------===//
2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file implements the SPUTargetLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "SPUISelLowering.h"
14 #include "SPUTargetMachine.h"
15 #include "SPUFrameLowering.h"
16 #include "SPUMachineFunction.h"
17 #include "llvm/Constants.h"
18 #include "llvm/Function.h"
19 #include "llvm/Intrinsics.h"
20 #include "llvm/CallingConv.h"
21 #include "llvm/Type.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/Target/TargetOptions.h"
30 #include "llvm/ADT/VectorExtras.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Support/raw_ostream.h"
39 // Used in getTargetNodeName() below
41 std::map<unsigned, const char *> node_names;
43 // Byte offset of the preferred slot (counted from the MSB)
44 int prefslotOffset(EVT VT) {
46 if (VT==MVT::i1) retval=3;
47 if (VT==MVT::i8) retval=3;
48 if (VT==MVT::i16) retval=2;
53 //! Expand a library call into an actual call DAG node
56 This code is taken from SelectionDAGLegalize, since it is not exposed as
57 part of the LLVM SelectionDAG API.
61 ExpandLibCall(RTLIB::Libcall LC, SDValue Op, SelectionDAG &DAG,
62 bool isSigned, SDValue &Hi, const SPUTargetLowering &TLI) {
63 // The input chain to this libcall is the entry node of the function.
64 // Legalizing the call will automatically add the previous call to the
66 SDValue InChain = DAG.getEntryNode();
68 TargetLowering::ArgListTy Args;
69 TargetLowering::ArgListEntry Entry;
70 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
71 EVT ArgVT = Op.getOperand(i).getValueType();
72 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
73 Entry.Node = Op.getOperand(i);
75 Entry.isSExt = isSigned;
76 Entry.isZExt = !isSigned;
77 Args.push_back(Entry);
79 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
82 // Splice the libcall in wherever FindInputOutputChains tells us to.
84 Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
85 std::pair<SDValue, SDValue> CallInfo =
86 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
87 0, TLI.getLibcallCallingConv(LC), false,
88 /*isReturnValueUsed=*/true,
89 Callee, Args, DAG, Op.getDebugLoc());
91 return CallInfo.first;
95 SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
96 : TargetLowering(TM, new TargetLoweringObjectFileELF()),
99 // Use _setjmp/_longjmp instead of setjmp/longjmp.
100 setUseUnderscoreSetJmp(true);
101 setUseUnderscoreLongJmp(true);
103 // Set RTLIB libcall names as used by SPU:
104 setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
106 // Set up the SPU's register classes:
107 addRegisterClass(MVT::i8, SPU::R8CRegisterClass);
108 addRegisterClass(MVT::i16, SPU::R16CRegisterClass);
109 addRegisterClass(MVT::i32, SPU::R32CRegisterClass);
110 addRegisterClass(MVT::i64, SPU::R64CRegisterClass);
111 addRegisterClass(MVT::f32, SPU::R32FPRegisterClass);
112 addRegisterClass(MVT::f64, SPU::R64FPRegisterClass);
113 addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
115 // SPU has no sign or zero extended loads for i1, i8, i16:
116 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
117 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
118 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
120 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
121 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
123 setTruncStoreAction(MVT::i128, MVT::i64, Expand);
124 setTruncStoreAction(MVT::i128, MVT::i32, Expand);
125 setTruncStoreAction(MVT::i128, MVT::i16, Expand);
126 setTruncStoreAction(MVT::i128, MVT::i8, Expand);
128 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
130 // SPU constant load actions are custom lowered:
131 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
132 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
134 // SPU's loads and stores have to be custom lowered:
135 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::i128;
137 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
139 setOperationAction(ISD::LOAD, VT, Custom);
140 setOperationAction(ISD::STORE, VT, Custom);
141 setLoadExtAction(ISD::EXTLOAD, VT, Custom);
142 setLoadExtAction(ISD::ZEXTLOAD, VT, Custom);
143 setLoadExtAction(ISD::SEXTLOAD, VT, Custom);
145 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::i8; --stype) {
146 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
147 setTruncStoreAction(VT, StoreVT, Expand);
151 for (unsigned sctype = (unsigned) MVT::f32; sctype < (unsigned) MVT::f64;
153 MVT::SimpleValueType VT = (MVT::SimpleValueType) sctype;
155 setOperationAction(ISD::LOAD, VT, Custom);
156 setOperationAction(ISD::STORE, VT, Custom);
158 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::f32; --stype) {
159 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
160 setTruncStoreAction(VT, StoreVT, Expand);
164 // Expand the jumptable branches
165 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
166 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
168 // Custom lower SELECT_CC for most cases, but expand by default
169 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
170 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
171 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
172 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
173 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
175 // SPU has no intrinsics for these particular operations:
176 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
177 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
179 // SPU has no division/remainder instructions
180 setOperationAction(ISD::SREM, MVT::i8, Expand);
181 setOperationAction(ISD::UREM, MVT::i8, Expand);
182 setOperationAction(ISD::SDIV, MVT::i8, Expand);
183 setOperationAction(ISD::UDIV, MVT::i8, Expand);
184 setOperationAction(ISD::SDIVREM, MVT::i8, Expand);
185 setOperationAction(ISD::UDIVREM, MVT::i8, Expand);
186 setOperationAction(ISD::SREM, MVT::i16, Expand);
187 setOperationAction(ISD::UREM, MVT::i16, Expand);
188 setOperationAction(ISD::SDIV, MVT::i16, Expand);
189 setOperationAction(ISD::UDIV, MVT::i16, Expand);
190 setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
191 setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
192 setOperationAction(ISD::SREM, MVT::i32, Expand);
193 setOperationAction(ISD::UREM, MVT::i32, Expand);
194 setOperationAction(ISD::SDIV, MVT::i32, Expand);
195 setOperationAction(ISD::UDIV, MVT::i32, Expand);
196 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
197 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
198 setOperationAction(ISD::SREM, MVT::i64, Expand);
199 setOperationAction(ISD::UREM, MVT::i64, Expand);
200 setOperationAction(ISD::SDIV, MVT::i64, Expand);
201 setOperationAction(ISD::UDIV, MVT::i64, Expand);
202 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
203 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
204 setOperationAction(ISD::SREM, MVT::i128, Expand);
205 setOperationAction(ISD::UREM, MVT::i128, Expand);
206 setOperationAction(ISD::SDIV, MVT::i128, Expand);
207 setOperationAction(ISD::UDIV, MVT::i128, Expand);
208 setOperationAction(ISD::SDIVREM, MVT::i128, Expand);
209 setOperationAction(ISD::UDIVREM, MVT::i128, Expand);
211 // We don't support sin/cos/sqrt/fmod
212 setOperationAction(ISD::FSIN , MVT::f64, Expand);
213 setOperationAction(ISD::FCOS , MVT::f64, Expand);
214 setOperationAction(ISD::FREM , MVT::f64, Expand);
215 setOperationAction(ISD::FSIN , MVT::f32, Expand);
216 setOperationAction(ISD::FCOS , MVT::f32, Expand);
217 setOperationAction(ISD::FREM , MVT::f32, Expand);
219 // Expand fsqrt to the appropriate libcall (NOTE: should use h/w fsqrt
221 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
222 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
224 setOperationAction(ISD::FMA, MVT::f64, Expand);
225 setOperationAction(ISD::FMA, MVT::f32, Expand);
227 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
228 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
230 // SPU can do rotate right and left, so legalize it... but customize for i8
231 // because instructions don't exist.
233 // FIXME: Change from "expand" to appropriate type once ROTR is supported in
235 setOperationAction(ISD::ROTR, MVT::i32, Expand /*Legal*/);
236 setOperationAction(ISD::ROTR, MVT::i16, Expand /*Legal*/);
237 setOperationAction(ISD::ROTR, MVT::i8, Expand /*Custom*/);
239 setOperationAction(ISD::ROTL, MVT::i32, Legal);
240 setOperationAction(ISD::ROTL, MVT::i16, Legal);
241 setOperationAction(ISD::ROTL, MVT::i8, Custom);
243 // SPU has no native version of shift left/right for i8
244 setOperationAction(ISD::SHL, MVT::i8, Custom);
245 setOperationAction(ISD::SRL, MVT::i8, Custom);
246 setOperationAction(ISD::SRA, MVT::i8, Custom);
248 // Make these operations legal and handle them during instruction selection:
249 setOperationAction(ISD::SHL, MVT::i64, Legal);
250 setOperationAction(ISD::SRL, MVT::i64, Legal);
251 setOperationAction(ISD::SRA, MVT::i64, Legal);
253 // Custom lower i8, i32 and i64 multiplications
254 setOperationAction(ISD::MUL, MVT::i8, Custom);
255 setOperationAction(ISD::MUL, MVT::i32, Legal);
256 setOperationAction(ISD::MUL, MVT::i64, Legal);
258 // Expand double-width multiplication
259 // FIXME: It would probably be reasonable to support some of these operations
260 setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
261 setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
262 setOperationAction(ISD::MULHU, MVT::i8, Expand);
263 setOperationAction(ISD::MULHS, MVT::i8, Expand);
264 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
265 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
266 setOperationAction(ISD::MULHU, MVT::i16, Expand);
267 setOperationAction(ISD::MULHS, MVT::i16, Expand);
268 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
269 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
270 setOperationAction(ISD::MULHU, MVT::i32, Expand);
271 setOperationAction(ISD::MULHS, MVT::i32, Expand);
272 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
273 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
274 setOperationAction(ISD::MULHU, MVT::i64, Expand);
275 setOperationAction(ISD::MULHS, MVT::i64, Expand);
277 // Need to custom handle (some) common i8, i64 math ops
278 setOperationAction(ISD::ADD, MVT::i8, Custom);
279 setOperationAction(ISD::ADD, MVT::i64, Legal);
280 setOperationAction(ISD::SUB, MVT::i8, Custom);
281 setOperationAction(ISD::SUB, MVT::i64, Legal);
283 // SPU does not have BSWAP. It does have i32 support CTLZ.
284 // CTPOP has to be custom lowered.
285 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
286 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
288 setOperationAction(ISD::CTPOP, MVT::i8, Custom);
289 setOperationAction(ISD::CTPOP, MVT::i16, Custom);
290 setOperationAction(ISD::CTPOP, MVT::i32, Custom);
291 setOperationAction(ISD::CTPOP, MVT::i64, Custom);
292 setOperationAction(ISD::CTPOP, MVT::i128, Expand);
294 setOperationAction(ISD::CTTZ , MVT::i8, Expand);
295 setOperationAction(ISD::CTTZ , MVT::i16, Expand);
296 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
297 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
298 setOperationAction(ISD::CTTZ , MVT::i128, Expand);
300 setOperationAction(ISD::CTLZ , MVT::i8, Promote);
301 setOperationAction(ISD::CTLZ , MVT::i16, Promote);
302 setOperationAction(ISD::CTLZ , MVT::i32, Legal);
303 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
304 setOperationAction(ISD::CTLZ , MVT::i128, Expand);
306 // SPU has a version of select that implements (a&~c)|(b&c), just like
307 // select ought to work:
308 setOperationAction(ISD::SELECT, MVT::i8, Legal);
309 setOperationAction(ISD::SELECT, MVT::i16, Legal);
310 setOperationAction(ISD::SELECT, MVT::i32, Legal);
311 setOperationAction(ISD::SELECT, MVT::i64, Legal);
313 setOperationAction(ISD::SETCC, MVT::i8, Legal);
314 setOperationAction(ISD::SETCC, MVT::i16, Legal);
315 setOperationAction(ISD::SETCC, MVT::i32, Legal);
316 setOperationAction(ISD::SETCC, MVT::i64, Legal);
317 setOperationAction(ISD::SETCC, MVT::f64, Custom);
319 // Custom lower i128 -> i64 truncates
320 setOperationAction(ISD::TRUNCATE, MVT::i64, Custom);
322 // Custom lower i32/i64 -> i128 sign extend
323 setOperationAction(ISD::SIGN_EXTEND, MVT::i128, Custom);
325 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
326 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
327 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
328 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
329 // SPU has a legal FP -> signed INT instruction for f32, but for f64, need
330 // to expand to a libcall, hence the custom lowering:
331 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
332 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
333 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
334 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
335 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Expand);
336 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Expand);
338 // FDIV on SPU requires custom lowering
339 setOperationAction(ISD::FDIV, MVT::f64, Expand); // to libcall
341 // SPU has [U|S]INT_TO_FP for f32->i32, but not for f64->i32, f64->i64:
342 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
343 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
344 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
345 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
346 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
347 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
348 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
349 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
351 setOperationAction(ISD::BITCAST, MVT::i32, Legal);
352 setOperationAction(ISD::BITCAST, MVT::f32, Legal);
353 setOperationAction(ISD::BITCAST, MVT::i64, Legal);
354 setOperationAction(ISD::BITCAST, MVT::f64, Legal);
356 // We cannot sextinreg(i1). Expand to shifts.
357 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
359 // We want to legalize GlobalAddress and ConstantPool nodes into the
360 // appropriate instructions to materialize the address.
361 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::f128;
363 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
365 setOperationAction(ISD::GlobalAddress, VT, Custom);
366 setOperationAction(ISD::ConstantPool, VT, Custom);
367 setOperationAction(ISD::JumpTable, VT, Custom);
370 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
371 setOperationAction(ISD::VASTART , MVT::Other, Custom);
373 // Use the default implementation.
374 setOperationAction(ISD::VAARG , MVT::Other, Expand);
375 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
376 setOperationAction(ISD::VAEND , MVT::Other, Expand);
377 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
378 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
379 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
380 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Expand);
382 // Cell SPU has instructions for converting between i64 and fp.
383 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
384 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
386 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
387 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
389 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
390 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
392 // First set operation action for all vector types to expand. Then we
393 // will selectively turn on ones that can be effectively codegen'd.
394 addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
395 addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
396 addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
397 addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
398 addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
399 addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
401 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
402 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
403 MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
405 // add/sub are legal for all supported vector VT's.
406 setOperationAction(ISD::ADD, VT, Legal);
407 setOperationAction(ISD::SUB, VT, Legal);
408 // mul has to be custom lowered.
409 setOperationAction(ISD::MUL, VT, Legal);
411 setOperationAction(ISD::AND, VT, Legal);
412 setOperationAction(ISD::OR, VT, Legal);
413 setOperationAction(ISD::XOR, VT, Legal);
414 setOperationAction(ISD::LOAD, VT, Custom);
415 setOperationAction(ISD::SELECT, VT, Legal);
416 setOperationAction(ISD::STORE, VT, Custom);
418 // These operations need to be expanded:
419 setOperationAction(ISD::SDIV, VT, Expand);
420 setOperationAction(ISD::SREM, VT, Expand);
421 setOperationAction(ISD::UDIV, VT, Expand);
422 setOperationAction(ISD::UREM, VT, Expand);
424 // Custom lower build_vector, constant pool spills, insert and
425 // extract vector elements:
426 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
427 setOperationAction(ISD::ConstantPool, VT, Custom);
428 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
429 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
430 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
431 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
434 setOperationAction(ISD::AND, MVT::v16i8, Custom);
435 setOperationAction(ISD::OR, MVT::v16i8, Custom);
436 setOperationAction(ISD::XOR, MVT::v16i8, Custom);
437 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
439 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
441 setBooleanContents(ZeroOrNegativeOneBooleanContent);
442 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); // FIXME: Is this correct?
444 setStackPointerRegisterToSaveRestore(SPU::R1);
446 // We have target-specific dag combine patterns for the following nodes:
447 setTargetDAGCombine(ISD::ADD);
448 setTargetDAGCombine(ISD::ZERO_EXTEND);
449 setTargetDAGCombine(ISD::SIGN_EXTEND);
450 setTargetDAGCombine(ISD::ANY_EXTEND);
452 setMinFunctionAlignment(3);
454 computeRegisterProperties();
456 // Set pre-RA register scheduler default to BURR, which produces slightly
457 // better code than the default (could also be TDRR, but TargetLowering.h
458 // needs a mod to support that model):
459 setSchedulingPreference(Sched::RegPressure);
463 SPUTargetLowering::getTargetNodeName(unsigned Opcode) const
465 if (node_names.empty()) {
466 node_names[(unsigned) SPUISD::RET_FLAG] = "SPUISD::RET_FLAG";
467 node_names[(unsigned) SPUISD::Hi] = "SPUISD::Hi";
468 node_names[(unsigned) SPUISD::Lo] = "SPUISD::Lo";
469 node_names[(unsigned) SPUISD::PCRelAddr] = "SPUISD::PCRelAddr";
470 node_names[(unsigned) SPUISD::AFormAddr] = "SPUISD::AFormAddr";
471 node_names[(unsigned) SPUISD::IndirectAddr] = "SPUISD::IndirectAddr";
472 node_names[(unsigned) SPUISD::LDRESULT] = "SPUISD::LDRESULT";
473 node_names[(unsigned) SPUISD::CALL] = "SPUISD::CALL";
474 node_names[(unsigned) SPUISD::SHUFB] = "SPUISD::SHUFB";
475 node_names[(unsigned) SPUISD::SHUFFLE_MASK] = "SPUISD::SHUFFLE_MASK";
476 node_names[(unsigned) SPUISD::CNTB] = "SPUISD::CNTB";
477 node_names[(unsigned) SPUISD::PREFSLOT2VEC] = "SPUISD::PREFSLOT2VEC";
478 node_names[(unsigned) SPUISD::VEC2PREFSLOT] = "SPUISD::VEC2PREFSLOT";
479 node_names[(unsigned) SPUISD::SHL_BITS] = "SPUISD::SHL_BITS";
480 node_names[(unsigned) SPUISD::SHL_BYTES] = "SPUISD::SHL_BYTES";
481 node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL";
482 node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR";
483 node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
484 node_names[(unsigned) SPUISD::ROTBYTES_LEFT_BITS] =
485 "SPUISD::ROTBYTES_LEFT_BITS";
486 node_names[(unsigned) SPUISD::SELECT_MASK] = "SPUISD::SELECT_MASK";
487 node_names[(unsigned) SPUISD::SELB] = "SPUISD::SELB";
488 node_names[(unsigned) SPUISD::ADD64_MARKER] = "SPUISD::ADD64_MARKER";
489 node_names[(unsigned) SPUISD::SUB64_MARKER] = "SPUISD::SUB64_MARKER";
490 node_names[(unsigned) SPUISD::MUL64_MARKER] = "SPUISD::MUL64_MARKER";
493 std::map<unsigned, const char *>::iterator i = node_names.find(Opcode);
495 return ((i != node_names.end()) ? i->second : 0);
498 //===----------------------------------------------------------------------===//
499 // Return the Cell SPU's SETCC result type
500 //===----------------------------------------------------------------------===//
502 EVT SPUTargetLowering::getSetCCResultType(EVT VT) const {
503 // i8, i16 and i32 are valid SETCC result types
504 MVT::SimpleValueType retval;
506 switch(VT.getSimpleVT().SimpleTy){
509 retval = MVT::i8; break;
511 retval = MVT::i16; break;
519 //===----------------------------------------------------------------------===//
520 // Calling convention code:
521 //===----------------------------------------------------------------------===//
523 #include "SPUGenCallingConv.inc"
525 //===----------------------------------------------------------------------===//
526 // LowerOperation implementation
527 //===----------------------------------------------------------------------===//
529 /// Custom lower loads for CellSPU
531 All CellSPU loads and stores are aligned to 16-byte boundaries, so for elements
532 within a 16-byte block, we have to rotate to extract the requested element.
534 For extending loads, we also want to ensure that the following sequence is
535 emitted, e.g. for MVT::f32 extending load to MVT::f64:
539 %2 v16i8,ch = rotate %1
540 %3 v4f8, ch = bitconvert %2
541 %4 f32 = vec2perfslot %3
542 %5 f64 = fp_extend %4
546 LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
547 LoadSDNode *LN = cast<LoadSDNode>(Op);
548 SDValue the_chain = LN->getChain();
549 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
550 EVT InVT = LN->getMemoryVT();
551 EVT OutVT = Op.getValueType();
552 ISD::LoadExtType ExtType = LN->getExtensionType();
553 unsigned alignment = LN->getAlignment();
554 int pso = prefslotOffset(InVT);
555 DebugLoc dl = Op.getDebugLoc();
556 EVT vecVT = InVT.isVector()? InVT: EVT::getVectorVT(*DAG.getContext(), InVT,
557 (128 / InVT.getSizeInBits()));
560 assert( LN->getAddressingMode() == ISD::UNINDEXED
561 && "we should get only UNINDEXED adresses");
562 // clean aligned loads can be selected as-is
563 if (InVT.getSizeInBits() == 128 && (alignment%16) == 0)
566 // Get pointerinfos to the memory chunk(s) that contain the data to load
567 uint64_t mpi_offset = LN->getPointerInfo().Offset;
568 mpi_offset -= mpi_offset%16;
569 MachinePointerInfo lowMemPtr(LN->getPointerInfo().V, mpi_offset);
570 MachinePointerInfo highMemPtr(LN->getPointerInfo().V, mpi_offset+16);
573 SDValue basePtr = LN->getBasePtr();
576 if ((alignment%16) == 0) {
579 // Special cases for a known aligned load to simplify the base pointer
580 // and the rotation amount:
581 if (basePtr.getOpcode() == ISD::ADD
582 && (CN = dyn_cast<ConstantSDNode > (basePtr.getOperand(1))) != 0) {
583 // Known offset into basePtr
584 int64_t offset = CN->getSExtValue();
585 int64_t rotamt = int64_t((offset & 0xf) - pso);
590 rotate = DAG.getConstant(rotamt, MVT::i16);
592 // Simplify the base pointer for this case:
593 basePtr = basePtr.getOperand(0);
594 if ((offset & ~0xf) > 0) {
595 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
597 DAG.getConstant((offset & ~0xf), PtrVT));
599 } else if ((basePtr.getOpcode() == SPUISD::AFormAddr)
600 || (basePtr.getOpcode() == SPUISD::IndirectAddr
601 && basePtr.getOperand(0).getOpcode() == SPUISD::Hi
602 && basePtr.getOperand(1).getOpcode() == SPUISD::Lo)) {
603 // Plain aligned a-form address: rotate into preferred slot
604 // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
605 int64_t rotamt = -pso;
608 rotate = DAG.getConstant(rotamt, MVT::i16);
610 // Offset the rotate amount by the basePtr and the preferred slot
612 int64_t rotamt = -pso;
615 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
617 DAG.getConstant(rotamt, PtrVT));
620 // Unaligned load: must be more pessimistic about addressing modes:
621 if (basePtr.getOpcode() == ISD::ADD) {
622 MachineFunction &MF = DAG.getMachineFunction();
623 MachineRegisterInfo &RegInfo = MF.getRegInfo();
624 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
627 SDValue Op0 = basePtr.getOperand(0);
628 SDValue Op1 = basePtr.getOperand(1);
630 if (isa<ConstantSDNode>(Op1)) {
631 // Convert the (add <ptr>, <const>) to an indirect address contained
632 // in a register. Note that this is done because we need to avoid
633 // creating a 0(reg) d-form address due to the SPU's block loads.
634 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
635 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
636 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
638 // Convert the (add <arg1>, <arg2>) to an indirect address, which
639 // will likely be lowered as a reg(reg) x-form address.
640 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
643 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
645 DAG.getConstant(0, PtrVT));
648 // Offset the rotate amount by the basePtr and the preferred slot
650 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
652 DAG.getConstant(-pso, PtrVT));
655 // Do the load as a i128 to allow possible shifting
656 SDValue low = DAG.getLoad(MVT::i128, dl, the_chain, basePtr,
658 LN->isVolatile(), LN->isNonTemporal(), 16);
660 // When the size is not greater than alignment we get all data with just
662 if (alignment >= InVT.getSizeInBits()/8) {
664 the_chain = low.getValue(1);
666 // Rotate into the preferred slot:
667 result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::i128,
668 low.getValue(0), rotate);
670 // Convert the loaded v16i8 vector to the appropriate vector type
671 // specified by the operand:
672 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
673 InVT, (128 / InVT.getSizeInBits()));
674 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
675 DAG.getNode(ISD::BITCAST, dl, vecVT, result));
677 // When alignment is less than the size, we might need (known only at
678 // run-time) two loads
679 // TODO: if the memory address is composed only from constants, we have
680 // extra kowledge, and might avoid the second load
682 // storage position offset from lower 16 byte aligned memory chunk
683 SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
684 basePtr, DAG.getConstant( 0xf, MVT::i32 ) );
685 // get a registerfull of ones. (this implementation is a workaround: LLVM
686 // cannot handle 128 bit signed int constants)
687 SDValue ones = DAG.getConstant(-1, MVT::v4i32 );
688 ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
690 SDValue high = DAG.getLoad(MVT::i128, dl, the_chain,
691 DAG.getNode(ISD::ADD, dl, PtrVT,
693 DAG.getConstant(16, PtrVT)),
695 LN->isVolatile(), LN->isNonTemporal(), 16);
697 the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
700 // Shift the (possible) high part right to compensate the misalignemnt.
701 // if there is no highpart (i.e. value is i64 and offset is 4), this
702 // will zero out the high value.
703 high = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, high,
704 DAG.getNode(ISD::SUB, dl, MVT::i32,
705 DAG.getConstant( 16, MVT::i32),
709 // Shift the low similarly
710 // TODO: add SPUISD::SHL_BYTES
711 low = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, low, offset );
713 // Merge the two parts
714 result = DAG.getNode(ISD::BITCAST, dl, vecVT,
715 DAG.getNode(ISD::OR, dl, MVT::i128, low, high));
717 if (!InVT.isVector()) {
718 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT, result );
722 // Handle extending loads by extending the scalar result:
723 if (ExtType == ISD::SEXTLOAD) {
724 result = DAG.getNode(ISD::SIGN_EXTEND, dl, OutVT, result);
725 } else if (ExtType == ISD::ZEXTLOAD) {
726 result = DAG.getNode(ISD::ZERO_EXTEND, dl, OutVT, result);
727 } else if (ExtType == ISD::EXTLOAD) {
728 unsigned NewOpc = ISD::ANY_EXTEND;
730 if (OutVT.isFloatingPoint())
731 NewOpc = ISD::FP_EXTEND;
733 result = DAG.getNode(NewOpc, dl, OutVT, result);
736 SDVTList retvts = DAG.getVTList(OutVT, MVT::Other);
737 SDValue retops[2] = {
742 result = DAG.getNode(SPUISD::LDRESULT, dl, retvts,
743 retops, sizeof(retops) / sizeof(retops[0]));
747 /// Custom lower stores for CellSPU
749 All CellSPU stores are aligned to 16-byte boundaries, so for elements
750 within a 16-byte block, we have to generate a shuffle to insert the
751 requested element into its place, then store the resulting block.
754 LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
755 StoreSDNode *SN = cast<StoreSDNode>(Op);
756 SDValue Value = SN->getValue();
757 EVT VT = Value.getValueType();
758 EVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
759 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
760 DebugLoc dl = Op.getDebugLoc();
761 unsigned alignment = SN->getAlignment();
763 EVT vecVT = StVT.isVector()? StVT: EVT::getVectorVT(*DAG.getContext(), StVT,
764 (128 / StVT.getSizeInBits()));
765 // Get pointerinfos to the memory chunk(s) that contain the data to load
766 uint64_t mpi_offset = SN->getPointerInfo().Offset;
767 mpi_offset -= mpi_offset%16;
768 MachinePointerInfo lowMemPtr(SN->getPointerInfo().V, mpi_offset);
769 MachinePointerInfo highMemPtr(SN->getPointerInfo().V, mpi_offset+16);
773 assert( SN->getAddressingMode() == ISD::UNINDEXED
774 && "we should get only UNINDEXED adresses");
775 // clean aligned loads can be selected as-is
776 if (StVT.getSizeInBits() == 128 && (alignment%16) == 0)
779 SDValue alignLoadVec;
780 SDValue basePtr = SN->getBasePtr();
781 SDValue the_chain = SN->getChain();
782 SDValue insertEltOffs;
784 if ((alignment%16) == 0) {
786 // Special cases for a known aligned load to simplify the base pointer
787 // and insertion byte:
788 if (basePtr.getOpcode() == ISD::ADD
789 && (CN = dyn_cast<ConstantSDNode>(basePtr.getOperand(1))) != 0) {
790 // Known offset into basePtr
791 int64_t offset = CN->getSExtValue();
793 // Simplify the base pointer for this case:
794 basePtr = basePtr.getOperand(0);
795 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
797 DAG.getConstant((offset & 0xf), PtrVT));
799 if ((offset & ~0xf) > 0) {
800 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
802 DAG.getConstant((offset & ~0xf), PtrVT));
805 // Otherwise, assume it's at byte 0 of basePtr
806 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
808 DAG.getConstant(0, PtrVT));
809 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
811 DAG.getConstant(0, PtrVT));
814 // Unaligned load: must be more pessimistic about addressing modes:
815 if (basePtr.getOpcode() == ISD::ADD) {
816 MachineFunction &MF = DAG.getMachineFunction();
817 MachineRegisterInfo &RegInfo = MF.getRegInfo();
818 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
821 SDValue Op0 = basePtr.getOperand(0);
822 SDValue Op1 = basePtr.getOperand(1);
824 if (isa<ConstantSDNode>(Op1)) {
825 // Convert the (add <ptr>, <const>) to an indirect address contained
826 // in a register. Note that this is done because we need to avoid
827 // creating a 0(reg) d-form address due to the SPU's block loads.
828 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
829 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
830 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
832 // Convert the (add <arg1>, <arg2>) to an indirect address, which
833 // will likely be lowered as a reg(reg) x-form address.
834 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
837 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
839 DAG.getConstant(0, PtrVT));
842 // Insertion point is solely determined by basePtr's contents
843 insertEltOffs = DAG.getNode(ISD::ADD, dl, PtrVT,
845 DAG.getConstant(0, PtrVT));
848 // Load the lower part of the memory to which to store.
849 SDValue low = DAG.getLoad(vecVT, dl, the_chain, basePtr,
850 lowMemPtr, SN->isVolatile(), SN->isNonTemporal(), 16);
852 // if we don't need to store over the 16 byte boundary, one store suffices
853 if (alignment >= StVT.getSizeInBits()/8) {
855 the_chain = low.getValue(1);
857 LoadSDNode *LN = cast<LoadSDNode>(low);
858 SDValue theValue = SN->getValue();
861 && (theValue.getOpcode() == ISD::AssertZext
862 || theValue.getOpcode() == ISD::AssertSext)) {
863 // Drill down and get the value for zero- and sign-extended
865 theValue = theValue.getOperand(0);
868 // If the base pointer is already a D-form address, then just create
869 // a new D-form address with a slot offset and the orignal base pointer.
870 // Otherwise generate a D-form address with the slot offset relative
871 // to the stack pointer, which is always aligned.
873 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
874 errs() << "CellSPU LowerSTORE: basePtr = ";
875 basePtr.getNode()->dump(&DAG);
880 SDValue insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT,
882 SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
885 result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
887 DAG.getNode(ISD::BITCAST, dl,
888 MVT::v4i32, insertEltOp));
890 result = DAG.getStore(the_chain, dl, result, basePtr,
892 LN->isVolatile(), LN->isNonTemporal(),
896 // do the store when it might cross the 16 byte memory access boundary.
898 // TODO issue a warning if SN->isVolatile()== true? This is likely not
899 // what the user wanted.
901 // address offset from nearest lower 16byte alinged address
902 SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
904 DAG.getConstant(0xf, MVT::i32));
906 SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
907 DAG.getConstant( 16, MVT::i32),
909 // 16 - sizeof(Value)
910 SDValue surplus = DAG.getNode(ISD::SUB, dl, MVT::i32,
911 DAG.getConstant( 16, MVT::i32),
912 DAG.getConstant( VT.getSizeInBits()/8,
914 // get a registerfull of ones
915 SDValue ones = DAG.getConstant(-1, MVT::v4i32);
916 ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
918 // Create the 128 bit masks that have ones where the data to store is
920 SDValue lowmask, himask;
921 // if the value to store don't fill up the an entire 128 bits, zero
922 // out the last bits of the mask so that only the value we want to store
924 // this is e.g. in the case of store i32, align 2
926 Value = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, Value);
927 lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, ones, surplus);
928 lowmask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
930 Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
931 Value = DAG.getNode(ISD::AND, dl, MVT::i128, Value, lowmask);
936 Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
938 // this will zero, if there are no data that goes to the high quad
939 himask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
941 lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, lowmask,
944 // Load in the old data and zero out the parts that will be overwritten with
945 // the new data to store.
946 SDValue hi = DAG.getLoad(MVT::i128, dl, the_chain,
947 DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
948 DAG.getConstant( 16, PtrVT)),
950 SN->isVolatile(), SN->isNonTemporal(), 16);
951 the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
954 low = DAG.getNode(ISD::AND, dl, MVT::i128,
955 DAG.getNode( ISD::BITCAST, dl, MVT::i128, low),
956 DAG.getNode( ISD::XOR, dl, MVT::i128, lowmask, ones));
957 hi = DAG.getNode(ISD::AND, dl, MVT::i128,
958 DAG.getNode( ISD::BITCAST, dl, MVT::i128, hi),
959 DAG.getNode( ISD::XOR, dl, MVT::i128, himask, ones));
961 // Shift the Value to store into place. rlow contains the parts that go to
962 // the lower memory chunk, rhi has the parts that go to the upper one.
963 SDValue rlow = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, Value, offset);
964 rlow = DAG.getNode(ISD::AND, dl, MVT::i128, rlow, lowmask);
965 SDValue rhi = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, Value,
968 // Merge the old data and the new data and store the results
969 // Need to convert vectors here to integer as 'OR'ing floats assert
970 rlow = DAG.getNode(ISD::OR, dl, MVT::i128,
971 DAG.getNode(ISD::BITCAST, dl, MVT::i128, low),
972 DAG.getNode(ISD::BITCAST, dl, MVT::i128, rlow));
973 rhi = DAG.getNode(ISD::OR, dl, MVT::i128,
974 DAG.getNode(ISD::BITCAST, dl, MVT::i128, hi),
975 DAG.getNode(ISD::BITCAST, dl, MVT::i128, rhi));
977 low = DAG.getStore(the_chain, dl, rlow, basePtr,
979 SN->isVolatile(), SN->isNonTemporal(), 16);
980 hi = DAG.getStore(the_chain, dl, rhi,
981 DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
982 DAG.getConstant( 16, PtrVT)),
984 SN->isVolatile(), SN->isNonTemporal(), 16);
985 result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(0),
992 //! Generate the address of a constant pool entry.
994 LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
995 EVT PtrVT = Op.getValueType();
996 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
997 const Constant *C = CP->getConstVal();
998 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
999 SDValue Zero = DAG.getConstant(0, PtrVT);
1000 const TargetMachine &TM = DAG.getTarget();
1001 // FIXME there is no actual debug info here
1002 DebugLoc dl = Op.getDebugLoc();
1004 if (TM.getRelocationModel() == Reloc::Static) {
1005 if (!ST->usingLargeMem()) {
1006 // Just return the SDValue with the constant pool address in it.
1007 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, CPI, Zero);
1009 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, CPI, Zero);
1010 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, CPI, Zero);
1011 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1015 llvm_unreachable("LowerConstantPool: Relocation model other than static"
1020 //! Alternate entry point for generating the address of a constant pool entry
1022 SPU::LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUTargetMachine &TM) {
1023 return ::LowerConstantPool(Op, DAG, TM.getSubtargetImpl());
1027 LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1028 EVT PtrVT = Op.getValueType();
1029 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1030 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
1031 SDValue Zero = DAG.getConstant(0, PtrVT);
1032 const TargetMachine &TM = DAG.getTarget();
1033 // FIXME there is no actual debug info here
1034 DebugLoc dl = Op.getDebugLoc();
1036 if (TM.getRelocationModel() == Reloc::Static) {
1037 if (!ST->usingLargeMem()) {
1038 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, JTI, Zero);
1040 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, JTI, Zero);
1041 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, JTI, Zero);
1042 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1046 llvm_unreachable("LowerJumpTable: Relocation model other than static"
1052 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1053 EVT PtrVT = Op.getValueType();
1054 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
1055 const GlobalValue *GV = GSDN->getGlobal();
1056 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
1057 PtrVT, GSDN->getOffset());
1058 const TargetMachine &TM = DAG.getTarget();
1059 SDValue Zero = DAG.getConstant(0, PtrVT);
1060 // FIXME there is no actual debug info here
1061 DebugLoc dl = Op.getDebugLoc();
1063 if (TM.getRelocationModel() == Reloc::Static) {
1064 if (!ST->usingLargeMem()) {
1065 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, GA, Zero);
1067 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, GA, Zero);
1068 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, GA, Zero);
1069 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1072 report_fatal_error("LowerGlobalAddress: Relocation model other than static"
1080 //! Custom lower double precision floating point constants
1082 LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
1083 EVT VT = Op.getValueType();
1084 // FIXME there is no actual debug info here
1085 DebugLoc dl = Op.getDebugLoc();
1087 if (VT == MVT::f64) {
1088 ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.getNode());
1091 "LowerConstantFP: Node is not ConstantFPSDNode");
1093 uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
1094 SDValue T = DAG.getConstant(dbits, MVT::i64);
1095 SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
1096 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
1097 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Tvec));
1104 SPUTargetLowering::LowerFormalArguments(SDValue Chain,
1105 CallingConv::ID CallConv, bool isVarArg,
1106 const SmallVectorImpl<ISD::InputArg>
1108 DebugLoc dl, SelectionDAG &DAG,
1109 SmallVectorImpl<SDValue> &InVals)
1112 MachineFunction &MF = DAG.getMachineFunction();
1113 MachineFrameInfo *MFI = MF.getFrameInfo();
1114 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1115 SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
1117 unsigned ArgOffset = SPUFrameLowering::minStackSize();
1118 unsigned ArgRegIdx = 0;
1119 unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
1121 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1123 SmallVector<CCValAssign, 16> ArgLocs;
1124 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1125 getTargetMachine(), ArgLocs, *DAG.getContext());
1126 // FIXME: allow for other calling conventions
1127 CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU);
1129 // Add DAG nodes to load the arguments or copy them out of registers.
1130 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
1131 EVT ObjectVT = Ins[ArgNo].VT;
1132 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1134 CCValAssign &VA = ArgLocs[ArgNo];
1136 if (VA.isRegLoc()) {
1137 const TargetRegisterClass *ArgRegClass;
1139 switch (ObjectVT.getSimpleVT().SimpleTy) {
1141 report_fatal_error("LowerFormalArguments Unhandled argument type: " +
1142 Twine(ObjectVT.getEVTString()));
1144 ArgRegClass = &SPU::R8CRegClass;
1147 ArgRegClass = &SPU::R16CRegClass;
1150 ArgRegClass = &SPU::R32CRegClass;
1153 ArgRegClass = &SPU::R64CRegClass;
1156 ArgRegClass = &SPU::GPRCRegClass;
1159 ArgRegClass = &SPU::R32FPRegClass;
1162 ArgRegClass = &SPU::R64FPRegClass;
1170 ArgRegClass = &SPU::VECREGRegClass;
1174 unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
1175 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1176 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
1179 // We need to load the argument to a virtual register if we determined
1180 // above that we ran out of physical registers of the appropriate type
1181 // or we're forced to do vararg
1182 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true);
1183 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1184 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
1186 ArgOffset += StackSlotSize;
1189 InVals.push_back(ArgVal);
1191 Chain = ArgVal.getOperand(0);
1196 // FIXME: we should be able to query the argument registers from
1197 // tablegen generated code.
1198 static const unsigned ArgRegs[] = {
1199 SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
1200 SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
1201 SPU::R17, SPU::R18, SPU::R19, SPU::R20, SPU::R21, SPU::R22, SPU::R23,
1202 SPU::R24, SPU::R25, SPU::R26, SPU::R27, SPU::R28, SPU::R29, SPU::R30,
1203 SPU::R31, SPU::R32, SPU::R33, SPU::R34, SPU::R35, SPU::R36, SPU::R37,
1204 SPU::R38, SPU::R39, SPU::R40, SPU::R41, SPU::R42, SPU::R43, SPU::R44,
1205 SPU::R45, SPU::R46, SPU::R47, SPU::R48, SPU::R49, SPU::R50, SPU::R51,
1206 SPU::R52, SPU::R53, SPU::R54, SPU::R55, SPU::R56, SPU::R57, SPU::R58,
1207 SPU::R59, SPU::R60, SPU::R61, SPU::R62, SPU::R63, SPU::R64, SPU::R65,
1208 SPU::R66, SPU::R67, SPU::R68, SPU::R69, SPU::R70, SPU::R71, SPU::R72,
1209 SPU::R73, SPU::R74, SPU::R75, SPU::R76, SPU::R77, SPU::R78, SPU::R79
1211 // size of ArgRegs array
1212 unsigned NumArgRegs = 77;
1214 // We will spill (79-3)+1 registers to the stack
1215 SmallVector<SDValue, 79-3+1> MemOps;
1217 // Create the frame slot
1218 for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
1219 FuncInfo->setVarArgsFrameIndex(
1220 MFI->CreateFixedObject(StackSlotSize, ArgOffset, true));
1221 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1222 unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::VECREGRegClass);
1223 SDValue ArgVal = DAG.getRegister(VReg, MVT::v16i8);
1224 SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, MachinePointerInfo(),
1226 Chain = Store.getOperand(0);
1227 MemOps.push_back(Store);
1229 // Increment address by stack slot size for the next stored argument
1230 ArgOffset += StackSlotSize;
1232 if (!MemOps.empty())
1233 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1234 &MemOps[0], MemOps.size());
1240 /// isLSAAddress - Return the immediate to use if the specified
1241 /// value is representable as a LSA address.
1242 static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
1243 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1246 int Addr = C->getZExtValue();
1247 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1248 (Addr << 14 >> 14) != Addr)
1249 return 0; // Top 14 bits have to be sext of immediate.
1251 return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode();
1255 SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1256 CallingConv::ID CallConv, bool isVarArg,
1258 const SmallVectorImpl<ISD::OutputArg> &Outs,
1259 const SmallVectorImpl<SDValue> &OutVals,
1260 const SmallVectorImpl<ISD::InputArg> &Ins,
1261 DebugLoc dl, SelectionDAG &DAG,
1262 SmallVectorImpl<SDValue> &InVals) const {
1263 // CellSPU target does not yet support tail call optimization.
1266 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
1267 unsigned NumOps = Outs.size();
1268 unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
1270 SmallVector<CCValAssign, 16> ArgLocs;
1271 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1272 getTargetMachine(), ArgLocs, *DAG.getContext());
1273 // FIXME: allow for other calling conventions
1274 CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
1276 const unsigned NumArgRegs = ArgLocs.size();
1279 // Handy pointer type
1280 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1282 // Set up a copy of the stack pointer for use loading and storing any
1283 // arguments that may not fit in the registers available for argument
1285 SDValue StackPtr = DAG.getRegister(SPU::R1, MVT::i32);
1287 // Figure out which arguments are going to go in registers, and which in
1289 unsigned ArgOffset = SPUFrameLowering::minStackSize(); // Just below [LR]
1290 unsigned ArgRegIdx = 0;
1292 // Keep track of registers passing arguments
1293 std::vector<std::pair<unsigned, SDValue> > RegsToPass;
1294 // And the arguments passed on the stack
1295 SmallVector<SDValue, 8> MemOpChains;
1297 for (; ArgRegIdx != NumOps; ++ArgRegIdx) {
1298 SDValue Arg = OutVals[ArgRegIdx];
1299 CCValAssign &VA = ArgLocs[ArgRegIdx];
1301 // PtrOff will be used to store the current argument to the stack if a
1302 // register cannot be found for it.
1303 SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1304 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
1306 switch (Arg.getValueType().getSimpleVT().SimpleTy) {
1307 default: llvm_unreachable("Unexpected ValueType for argument!");
1321 if (ArgRegIdx != NumArgRegs) {
1322 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1324 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
1325 MachinePointerInfo(),
1327 ArgOffset += StackSlotSize;
1333 // Accumulate how many bytes are to be pushed on the stack, including the
1334 // linkage area, and parameter passing area. According to the SPU ABI,
1335 // we minimally need space for [LR] and [SP].
1336 unsigned NumStackBytes = ArgOffset - SPUFrameLowering::minStackSize();
1338 // Insert a call sequence start
1339 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumStackBytes,
1342 if (!MemOpChains.empty()) {
1343 // Adjust the stack pointer for the stack arguments.
1344 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1345 &MemOpChains[0], MemOpChains.size());
1348 // Build a sequence of copy-to-reg nodes chained together with token chain
1349 // and flag operands which copy the outgoing args into the appropriate regs.
1351 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1352 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1353 RegsToPass[i].second, InFlag);
1354 InFlag = Chain.getValue(1);
1357 SmallVector<SDValue, 8> Ops;
1358 unsigned CallOpc = SPUISD::CALL;
1360 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1361 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1362 // node so that legalize doesn't hack it.
1363 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1364 const GlobalValue *GV = G->getGlobal();
1365 EVT CalleeVT = Callee.getValueType();
1366 SDValue Zero = DAG.getConstant(0, PtrVT);
1367 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, CalleeVT);
1369 if (!ST->usingLargeMem()) {
1370 // Turn calls to targets that are defined (i.e., have bodies) into BRSL
1371 // style calls, otherwise, external symbols are BRASL calls. This assumes
1372 // that declared/defined symbols are in the same compilation unit and can
1373 // be reached through PC-relative jumps.
1376 // This may be an unsafe assumption for JIT and really large compilation
1378 if (GV->isDeclaration()) {
1379 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, GA, Zero);
1381 Callee = DAG.getNode(SPUISD::PCRelAddr, dl, CalleeVT, GA, Zero);
1384 // "Large memory" mode: Turn all calls into indirect calls with a X-form
1386 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, GA, Zero);
1388 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1389 EVT CalleeVT = Callee.getValueType();
1390 SDValue Zero = DAG.getConstant(0, PtrVT);
1391 SDValue ExtSym = DAG.getTargetExternalSymbol(S->getSymbol(),
1392 Callee.getValueType());
1394 if (!ST->usingLargeMem()) {
1395 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, ExtSym, Zero);
1397 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, ExtSym, Zero);
1399 } else if (SDNode *Dest = isLSAAddress(Callee, DAG)) {
1400 // If this is an absolute destination address that appears to be a legal
1401 // local store address, use the munged value.
1402 Callee = SDValue(Dest, 0);
1405 Ops.push_back(Chain);
1406 Ops.push_back(Callee);
1408 // Add argument registers to the end of the list so that they are known live
1410 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1411 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1412 RegsToPass[i].second.getValueType()));
1414 if (InFlag.getNode())
1415 Ops.push_back(InFlag);
1416 // Returns a chain and a flag for retval copy to use.
1417 Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Glue),
1418 &Ops[0], Ops.size());
1419 InFlag = Chain.getValue(1);
1421 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumStackBytes, true),
1422 DAG.getIntPtrConstant(0, true), InFlag);
1424 InFlag = Chain.getValue(1);
1426 // If the function returns void, just return the chain.
1430 // Now handle the return value(s)
1431 SmallVector<CCValAssign, 16> RVLocs;
1432 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1433 getTargetMachine(), RVLocs, *DAG.getContext());
1434 CCRetInfo.AnalyzeCallResult(Ins, CCC_SPU);
1437 // If the call has results, copy the values out of the ret val registers.
1438 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1439 CCValAssign VA = RVLocs[i];
1441 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1443 Chain = Val.getValue(1);
1444 InFlag = Val.getValue(2);
1445 InVals.push_back(Val);
1452 SPUTargetLowering::LowerReturn(SDValue Chain,
1453 CallingConv::ID CallConv, bool isVarArg,
1454 const SmallVectorImpl<ISD::OutputArg> &Outs,
1455 const SmallVectorImpl<SDValue> &OutVals,
1456 DebugLoc dl, SelectionDAG &DAG) const {
1458 SmallVector<CCValAssign, 16> RVLocs;
1459 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1460 getTargetMachine(), RVLocs, *DAG.getContext());
1461 CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
1463 // If this is the first return lowered for this function, add the regs to the
1464 // liveout set for the function.
1465 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1466 for (unsigned i = 0; i != RVLocs.size(); ++i)
1467 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1472 // Copy the result values into the output registers.
1473 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1474 CCValAssign &VA = RVLocs[i];
1475 assert(VA.isRegLoc() && "Can only return in registers!");
1476 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1478 Flag = Chain.getValue(1);
1482 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1484 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain);
1488 //===----------------------------------------------------------------------===//
1489 // Vector related lowering:
1490 //===----------------------------------------------------------------------===//
1492 static ConstantSDNode *
1493 getVecImm(SDNode *N) {
1494 SDValue OpVal(0, 0);
1496 // Check to see if this buildvec has a single non-undef value in its elements.
1497 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1498 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
1499 if (OpVal.getNode() == 0)
1500 OpVal = N->getOperand(i);
1501 else if (OpVal != N->getOperand(i))
1505 if (OpVal.getNode() != 0) {
1506 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1514 /// get_vec_i18imm - Test if this vector is a vector filled with the same value
1515 /// and the value fits into an unsigned 18-bit constant, and if so, return the
1517 SDValue SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
1519 if (ConstantSDNode *CN = getVecImm(N)) {
1520 uint64_t Value = CN->getZExtValue();
1521 if (ValueType == MVT::i64) {
1522 uint64_t UValue = CN->getZExtValue();
1523 uint32_t upper = uint32_t(UValue >> 32);
1524 uint32_t lower = uint32_t(UValue);
1527 Value = Value >> 32;
1529 if (Value <= 0x3ffff)
1530 return DAG.getTargetConstant(Value, ValueType);
1536 /// get_vec_i16imm - Test if this vector is a vector filled with the same value
1537 /// and the value fits into a signed 16-bit constant, and if so, return the
1539 SDValue SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
1541 if (ConstantSDNode *CN = getVecImm(N)) {
1542 int64_t Value = CN->getSExtValue();
1543 if (ValueType == MVT::i64) {
1544 uint64_t UValue = CN->getZExtValue();
1545 uint32_t upper = uint32_t(UValue >> 32);
1546 uint32_t lower = uint32_t(UValue);
1549 Value = Value >> 32;
1551 if (Value >= -(1 << 15) && Value <= ((1 << 15) - 1)) {
1552 return DAG.getTargetConstant(Value, ValueType);
1559 /// get_vec_i10imm - Test if this vector is a vector filled with the same value
1560 /// and the value fits into a signed 10-bit constant, and if so, return the
1562 SDValue SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
1564 if (ConstantSDNode *CN = getVecImm(N)) {
1565 int64_t Value = CN->getSExtValue();
1566 if (ValueType == MVT::i64) {
1567 uint64_t UValue = CN->getZExtValue();
1568 uint32_t upper = uint32_t(UValue >> 32);
1569 uint32_t lower = uint32_t(UValue);
1572 Value = Value >> 32;
1574 if (isInt<10>(Value))
1575 return DAG.getTargetConstant(Value, ValueType);
1581 /// get_vec_i8imm - Test if this vector is a vector filled with the same value
1582 /// and the value fits into a signed 8-bit constant, and if so, return the
1585 /// @note: The incoming vector is v16i8 because that's the only way we can load
1586 /// constant vectors. Thus, we test to see if the upper and lower bytes are the
1588 SDValue SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
1590 if (ConstantSDNode *CN = getVecImm(N)) {
1591 int Value = (int) CN->getZExtValue();
1592 if (ValueType == MVT::i16
1593 && Value <= 0xffff /* truncated from uint64_t */
1594 && ((short) Value >> 8) == ((short) Value & 0xff))
1595 return DAG.getTargetConstant(Value & 0xff, ValueType);
1596 else if (ValueType == MVT::i8
1597 && (Value & 0xff) == Value)
1598 return DAG.getTargetConstant(Value, ValueType);
1604 /// get_ILHUvec_imm - Test if this vector is a vector filled with the same value
1605 /// and the value fits into a signed 16-bit constant, and if so, return the
1607 SDValue SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
1609 if (ConstantSDNode *CN = getVecImm(N)) {
1610 uint64_t Value = CN->getZExtValue();
1611 if ((ValueType == MVT::i32
1612 && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
1613 || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
1614 return DAG.getTargetConstant(Value >> 16, ValueType);
1620 /// get_v4i32_imm - Catch-all for general 32-bit constant vectors
1621 SDValue SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) {
1622 if (ConstantSDNode *CN = getVecImm(N)) {
1623 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i32);
1629 /// get_v4i32_imm - Catch-all for general 64-bit constant vectors
1630 SDValue SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) {
1631 if (ConstantSDNode *CN = getVecImm(N)) {
1632 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i64);
1638 //! Lower a BUILD_VECTOR instruction creatively:
1640 LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
1641 EVT VT = Op.getValueType();
1642 EVT EltVT = VT.getVectorElementType();
1643 DebugLoc dl = Op.getDebugLoc();
1644 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(Op.getNode());
1645 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerBUILD_VECTOR");
1646 unsigned minSplatBits = EltVT.getSizeInBits();
1648 if (minSplatBits < 16)
1651 APInt APSplatBits, APSplatUndef;
1652 unsigned SplatBitSize;
1655 if (!BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
1656 HasAnyUndefs, minSplatBits)
1657 || minSplatBits < SplatBitSize)
1658 return SDValue(); // Wasn't a constant vector or splat exceeded min
1660 uint64_t SplatBits = APSplatBits.getZExtValue();
1662 switch (VT.getSimpleVT().SimpleTy) {
1664 report_fatal_error("CellSPU: Unhandled VT in LowerBUILD_VECTOR, VT = " +
1665 Twine(VT.getEVTString()));
1668 uint32_t Value32 = uint32_t(SplatBits);
1669 assert(SplatBitSize == 32
1670 && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
1671 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1672 SDValue T = DAG.getConstant(Value32, MVT::i32);
1673 return DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,
1674 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
1678 uint64_t f64val = uint64_t(SplatBits);
1679 assert(SplatBitSize == 64
1680 && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
1681 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1682 SDValue T = DAG.getConstant(f64val, MVT::i64);
1683 return DAG.getNode(ISD::BITCAST, dl, MVT::v2f64,
1684 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
1688 // 8-bit constants have to be expanded to 16-bits
1689 unsigned short Value16 = SplatBits /* | (SplatBits << 8) */;
1690 SmallVector<SDValue, 8> Ops;
1692 Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
1693 return DAG.getNode(ISD::BITCAST, dl, VT,
1694 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
1697 unsigned short Value16 = SplatBits;
1698 SDValue T = DAG.getConstant(Value16, EltVT);
1699 SmallVector<SDValue, 8> Ops;
1702 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
1705 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1706 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T);
1709 return SPU::LowerV2I64Splat(VT, DAG, SplatBits, dl);
1719 SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
1721 uint32_t upper = uint32_t(SplatVal >> 32);
1722 uint32_t lower = uint32_t(SplatVal);
1724 if (upper == lower) {
1725 // Magic constant that can be matched by IL, ILA, et. al.
1726 SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
1727 return DAG.getNode(ISD::BITCAST, dl, OpVT,
1728 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1729 Val, Val, Val, Val));
1731 bool upper_special, lower_special;
1733 // NOTE: This code creates common-case shuffle masks that can be easily
1734 // detected as common expressions. It is not attempting to create highly
1735 // specialized masks to replace any and all 0's, 0xff's and 0x80's.
1737 // Detect if the upper or lower half is a special shuffle mask pattern:
1738 upper_special = (upper == 0 || upper == 0xffffffff || upper == 0x80000000);
1739 lower_special = (lower == 0 || lower == 0xffffffff || lower == 0x80000000);
1741 // Both upper and lower are special, lower to a constant pool load:
1742 if (lower_special && upper_special) {
1743 SDValue SplatValCN = DAG.getConstant(SplatVal, MVT::i64);
1744 return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64,
1745 SplatValCN, SplatValCN);
1750 SmallVector<SDValue, 16> ShufBytes;
1753 // Create lower vector if not a special pattern
1754 if (!lower_special) {
1755 SDValue LO32C = DAG.getConstant(lower, MVT::i32);
1756 LO32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
1757 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1758 LO32C, LO32C, LO32C, LO32C));
1761 // Create upper vector if not a special pattern
1762 if (!upper_special) {
1763 SDValue HI32C = DAG.getConstant(upper, MVT::i32);
1764 HI32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
1765 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1766 HI32C, HI32C, HI32C, HI32C));
1769 // If either upper or lower are special, then the two input operands are
1770 // the same (basically, one of them is a "don't care")
1776 for (int i = 0; i < 4; ++i) {
1778 for (int j = 0; j < 4; ++j) {
1780 bool process_upper, process_lower;
1782 process_upper = (upper_special && (i & 1) == 0);
1783 process_lower = (lower_special && (i & 1) == 1);
1785 if (process_upper || process_lower) {
1786 if ((process_upper && upper == 0)
1787 || (process_lower && lower == 0))
1789 else if ((process_upper && upper == 0xffffffff)
1790 || (process_lower && lower == 0xffffffff))
1792 else if ((process_upper && upper == 0x80000000)
1793 || (process_lower && lower == 0x80000000))
1794 val |= (j == 0 ? 0xe0 : 0x80);
1796 val |= i * 4 + j + ((i & 1) * 16);
1799 ShufBytes.push_back(DAG.getConstant(val, MVT::i32));
1802 return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32,
1803 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1804 &ShufBytes[0], ShufBytes.size()));
1808 /// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3) to something on
1809 /// which the Cell can operate. The code inspects V3 to ascertain whether the
1810 /// permutation vector, V3, is monotonically increasing with one "exception"
1811 /// element, e.g., (0, 1, _, 3). If this is the case, then generate a
1812 /// SHUFFLE_MASK synthetic instruction. Otherwise, spill V3 to the constant pool.
1813 /// In either case, the net result is going to eventually invoke SHUFB to
1814 /// permute/shuffle the bytes from V1 and V2.
1816 /// SHUFFLE_MASK is eventually selected as one of the C*D instructions, generate
1817 /// control word for byte/halfword/word insertion. This takes care of a single
1818 /// element move from V2 into V1.
1820 /// SPUISD::SHUFB is eventually selected as Cell's <i>shufb</i> instructions.
1821 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
1822 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
1823 SDValue V1 = Op.getOperand(0);
1824 SDValue V2 = Op.getOperand(1);
1825 DebugLoc dl = Op.getDebugLoc();
1827 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1829 // If we have a single element being moved from V1 to V2, this can be handled
1830 // using the C*[DX] compute mask instructions, but the vector elements have
1831 // to be monotonically increasing with one exception element, and the source
1832 // slot of the element to move must be the same as the destination.
1833 EVT VecVT = V1.getValueType();
1834 EVT EltVT = VecVT.getVectorElementType();
1835 unsigned EltsFromV2 = 0;
1836 unsigned V2EltOffset = 0;
1837 unsigned V2EltIdx0 = 0;
1838 unsigned CurrElt = 0;
1839 unsigned MaxElts = VecVT.getVectorNumElements();
1840 unsigned PrevElt = 0;
1841 bool monotonic = true;
1844 EVT maskVT; // which of the c?d instructions to use
1846 if (EltVT == MVT::i8) {
1848 maskVT = MVT::v16i8;
1849 } else if (EltVT == MVT::i16) {
1851 maskVT = MVT::v8i16;
1852 } else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
1854 maskVT = MVT::v4i32;
1855 } else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
1857 maskVT = MVT::v2i64;
1859 llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
1861 for (unsigned i = 0; i != MaxElts; ++i) {
1862 if (SVN->getMaskElt(i) < 0)
1865 unsigned SrcElt = SVN->getMaskElt(i);
1868 if (SrcElt >= V2EltIdx0) {
1869 // TODO: optimize for the monotonic case when several consecutive
1870 // elements are taken form V2. Do we ever get such a case?
1871 if (EltsFromV2 == 0 && CurrElt == (SrcElt - V2EltIdx0))
1872 V2EltOffset = (SrcElt - V2EltIdx0) * (EltVT.getSizeInBits()/8);
1876 } else if (CurrElt != SrcElt) {
1884 if (PrevElt > 0 && SrcElt < MaxElts) {
1885 if ((PrevElt == SrcElt - 1)
1886 || (PrevElt == MaxElts - 1 && SrcElt == 0)) {
1891 } else if (i == 0 || (PrevElt==0 && SrcElt==1)) {
1892 // First time or after a "wrap around"
1896 // This isn't a rotation, takes elements from vector 2
1902 if (EltsFromV2 == 1 && monotonic) {
1903 // Compute mask and shuffle
1904 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1906 // As SHUFFLE_MASK becomes a c?d instruction, feed it an address
1907 // R1 ($sp) is used here only as it is guaranteed to have last bits zero
1908 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
1909 DAG.getRegister(SPU::R1, PtrVT),
1910 DAG.getConstant(V2EltOffset, MVT::i32));
1911 SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
1914 // Use shuffle mask in SHUFB synthetic instruction:
1915 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
1917 } else if (rotate) {
1920 rotamt *= EltVT.getSizeInBits()/8;
1921 return DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, V1.getValueType(),
1922 V1, DAG.getConstant(rotamt, MVT::i16));
1924 // Convert the SHUFFLE_VECTOR mask's input element units to the
1926 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
1928 SmallVector<SDValue, 16> ResultMask;
1929 for (unsigned i = 0, e = MaxElts; i != e; ++i) {
1930 unsigned SrcElt = SVN->getMaskElt(i) < 0 ? 0 : SVN->getMaskElt(i);
1932 for (unsigned j = 0; j < BytesPerElement; ++j)
1933 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,MVT::i8));
1935 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
1936 &ResultMask[0], ResultMask.size());
1937 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask);
1941 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
1942 SDValue Op0 = Op.getOperand(0); // Op0 = the scalar
1943 DebugLoc dl = Op.getDebugLoc();
1945 if (Op0.getNode()->getOpcode() == ISD::Constant) {
1946 // For a constant, build the appropriate constant vector, which will
1947 // eventually simplify to a vector register load.
1949 ConstantSDNode *CN = cast<ConstantSDNode>(Op0.getNode());
1950 SmallVector<SDValue, 16> ConstVecValues;
1954 // Create a constant vector:
1955 switch (Op.getValueType().getSimpleVT().SimpleTy) {
1956 default: llvm_unreachable("Unexpected constant value type in "
1957 "LowerSCALAR_TO_VECTOR");
1958 case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
1959 case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
1960 case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
1961 case MVT::v4f32: n_copies = 4; VT = MVT::f32; break;
1962 case MVT::v2i64: n_copies = 2; VT = MVT::i64; break;
1963 case MVT::v2f64: n_copies = 2; VT = MVT::f64; break;
1966 SDValue CValue = DAG.getConstant(CN->getZExtValue(), VT);
1967 for (size_t j = 0; j < n_copies; ++j)
1968 ConstVecValues.push_back(CValue);
1970 return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(),
1971 &ConstVecValues[0], ConstVecValues.size());
1973 // Otherwise, copy the value from one register to another:
1974 switch (Op0.getValueType().getSimpleVT().SimpleTy) {
1975 default: llvm_unreachable("Unexpected value type in LowerSCALAR_TO_VECTOR");
1982 return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0);
1989 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
1990 EVT VT = Op.getValueType();
1991 SDValue N = Op.getOperand(0);
1992 SDValue Elt = Op.getOperand(1);
1993 DebugLoc dl = Op.getDebugLoc();
1996 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
1997 // Constant argument:
1998 int EltNo = (int) C->getZExtValue();
2001 if (VT == MVT::i8 && EltNo >= 16)
2002 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i8 extraction slot > 15");
2003 else if (VT == MVT::i16 && EltNo >= 8)
2004 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i16 extraction slot > 7");
2005 else if (VT == MVT::i32 && EltNo >= 4)
2006 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i32 extraction slot > 4");
2007 else if (VT == MVT::i64 && EltNo >= 2)
2008 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i64 extraction slot > 2");
2010 if (EltNo == 0 && (VT == MVT::i32 || VT == MVT::i64)) {
2011 // i32 and i64: Element 0 is the preferred slot
2012 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, N);
2015 // Need to generate shuffle mask and extract:
2016 int prefslot_begin = -1, prefslot_end = -1;
2017 int elt_byte = EltNo * VT.getSizeInBits() / 8;
2019 switch (VT.getSimpleVT().SimpleTy) {
2021 assert(false && "Invalid value type!");
2023 prefslot_begin = prefslot_end = 3;
2027 prefslot_begin = 2; prefslot_end = 3;
2032 prefslot_begin = 0; prefslot_end = 3;
2037 prefslot_begin = 0; prefslot_end = 7;
2042 assert(prefslot_begin != -1 && prefslot_end != -1 &&
2043 "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
2045 unsigned int ShufBytes[16] = {
2046 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2048 for (int i = 0; i < 16; ++i) {
2049 // zero fill uppper part of preferred slot, don't care about the
2051 unsigned int mask_val;
2052 if (i <= prefslot_end) {
2054 ((i < prefslot_begin)
2056 : elt_byte + (i - prefslot_begin));
2058 ShufBytes[i] = mask_val;
2060 ShufBytes[i] = ShufBytes[i % (prefslot_end + 1)];
2063 SDValue ShufMask[4];
2064 for (unsigned i = 0; i < sizeof(ShufMask)/sizeof(ShufMask[0]); ++i) {
2065 unsigned bidx = i * 4;
2066 unsigned int bits = ((ShufBytes[bidx] << 24) |
2067 (ShufBytes[bidx+1] << 16) |
2068 (ShufBytes[bidx+2] << 8) |
2070 ShufMask[i] = DAG.getConstant(bits, MVT::i32);
2073 SDValue ShufMaskVec =
2074 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2075 &ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0]));
2077 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2078 DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(),
2079 N, N, ShufMaskVec));
2081 // Variable index: Rotate the requested element into slot 0, then replicate
2082 // slot 0 across the vector
2083 EVT VecVT = N.getValueType();
2084 if (!VecVT.isSimple() || !VecVT.isVector()) {
2085 report_fatal_error("LowerEXTRACT_VECTOR_ELT: Must have a simple, 128-bit"
2089 // Make life easier by making sure the index is zero-extended to i32
2090 if (Elt.getValueType() != MVT::i32)
2091 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Elt);
2093 // Scale the index to a bit/byte shift quantity
2095 APInt(32, uint64_t(16 / N.getValueType().getVectorNumElements()), false);
2096 unsigned scaleShift = scaleFactor.logBase2();
2099 if (scaleShift > 0) {
2100 // Scale the shift factor:
2101 Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
2102 DAG.getConstant(scaleShift, MVT::i32));
2105 vecShift = DAG.getNode(SPUISD::SHL_BYTES, dl, VecVT, N, Elt);
2107 // Replicate the bytes starting at byte 0 across the entire vector (for
2108 // consistency with the notion of a unified register set)
2111 switch (VT.getSimpleVT().SimpleTy) {
2113 report_fatal_error("LowerEXTRACT_VECTOR_ELT(varable): Unhandled vector"
2117 SDValue factor = DAG.getConstant(0x00000000, MVT::i32);
2118 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2119 factor, factor, factor, factor);
2123 SDValue factor = DAG.getConstant(0x00010001, MVT::i32);
2124 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2125 factor, factor, factor, factor);
2130 SDValue factor = DAG.getConstant(0x00010203, MVT::i32);
2131 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2132 factor, factor, factor, factor);
2137 SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32);
2138 SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32);
2139 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2140 loFactor, hiFactor, loFactor, hiFactor);
2145 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2146 DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2147 vecShift, vecShift, replicate));
2153 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2154 SDValue VecOp = Op.getOperand(0);
2155 SDValue ValOp = Op.getOperand(1);
2156 SDValue IdxOp = Op.getOperand(2);
2157 DebugLoc dl = Op.getDebugLoc();
2158 EVT VT = Op.getValueType();
2159 EVT eltVT = ValOp.getValueType();
2161 // use 0 when the lane to insert to is 'undef'
2163 if (IdxOp.getOpcode() != ISD::UNDEF) {
2164 ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
2165 assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
2166 Offset = (CN->getSExtValue()) * eltVT.getSizeInBits()/8;
2169 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2170 // Use $sp ($1) because it's always 16-byte aligned and it's available:
2171 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
2172 DAG.getRegister(SPU::R1, PtrVT),
2173 DAG.getConstant(Offset, PtrVT));
2174 // widen the mask when dealing with half vectors
2175 EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
2176 128/ VT.getVectorElementType().getSizeInBits());
2177 SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, maskVT, Pointer);
2180 DAG.getNode(SPUISD::SHUFB, dl, VT,
2181 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
2183 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ShufMask));
2188 static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
2189 const TargetLowering &TLI)
2191 SDValue N0 = Op.getOperand(0); // Everything has at least one operand
2192 DebugLoc dl = Op.getDebugLoc();
2193 EVT ShiftVT = TLI.getShiftAmountTy(N0.getValueType());
2195 assert(Op.getValueType() == MVT::i8);
2198 llvm_unreachable("Unhandled i8 math operator");
2202 // 8-bit addition: Promote the arguments up to 16-bits and truncate
2204 SDValue N1 = Op.getOperand(1);
2205 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2206 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2207 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2208 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2213 // 8-bit subtraction: Promote the arguments up to 16-bits and truncate
2215 SDValue N1 = Op.getOperand(1);
2216 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2217 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2218 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2219 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2223 SDValue N1 = Op.getOperand(1);
2224 EVT N1VT = N1.getValueType();
2226 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2227 if (!N1VT.bitsEq(ShiftVT)) {
2228 unsigned N1Opc = N1.getValueType().bitsLT(ShiftVT)
2231 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2234 // Replicate lower 8-bits into upper 8:
2236 DAG.getNode(ISD::OR, dl, MVT::i16, N0,
2237 DAG.getNode(ISD::SHL, dl, MVT::i16,
2238 N0, DAG.getConstant(8, MVT::i32)));
2240 // Truncate back down to i8
2241 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2242 DAG.getNode(Opc, dl, MVT::i16, ExpandArg, N1));
2246 SDValue N1 = Op.getOperand(1);
2247 EVT N1VT = N1.getValueType();
2249 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2250 if (!N1VT.bitsEq(ShiftVT)) {
2251 unsigned N1Opc = ISD::ZERO_EXTEND;
2253 if (N1.getValueType().bitsGT(ShiftVT))
2254 N1Opc = ISD::TRUNCATE;
2256 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2259 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2260 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2263 SDValue N1 = Op.getOperand(1);
2264 EVT N1VT = N1.getValueType();
2266 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2267 if (!N1VT.bitsEq(ShiftVT)) {
2268 unsigned N1Opc = ISD::SIGN_EXTEND;
2270 if (N1VT.bitsGT(ShiftVT))
2271 N1Opc = ISD::TRUNCATE;
2272 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2275 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2276 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2279 SDValue N1 = Op.getOperand(1);
2281 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2282 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2283 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2284 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2292 //! Lower byte immediate operations for v16i8 vectors:
2294 LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
2297 EVT VT = Op.getValueType();
2298 DebugLoc dl = Op.getDebugLoc();
2300 ConstVec = Op.getOperand(0);
2301 Arg = Op.getOperand(1);
2302 if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
2303 if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
2304 ConstVec = ConstVec.getOperand(0);
2306 ConstVec = Op.getOperand(1);
2307 Arg = Op.getOperand(0);
2308 if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
2309 ConstVec = ConstVec.getOperand(0);
2314 if (ConstVec.getNode()->getOpcode() == ISD::BUILD_VECTOR) {
2315 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(ConstVec.getNode());
2316 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerByteImmed");
2318 APInt APSplatBits, APSplatUndef;
2319 unsigned SplatBitSize;
2321 unsigned minSplatBits = VT.getVectorElementType().getSizeInBits();
2323 if (BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
2324 HasAnyUndefs, minSplatBits)
2325 && minSplatBits <= SplatBitSize) {
2326 uint64_t SplatBits = APSplatBits.getZExtValue();
2327 SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
2329 SmallVector<SDValue, 16> tcVec;
2330 tcVec.assign(16, tc);
2331 return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg,
2332 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &tcVec[0], tcVec.size()));
2336 // These operations (AND, OR, XOR) are legal, they just couldn't be custom
2337 // lowered. Return the operation, rather than a null SDValue.
2341 //! Custom lowering for CTPOP (count population)
2343 Custom lowering code that counts the number ones in the input
2344 operand. SPU has such an instruction, but it counts the number of
2345 ones per byte, which then have to be accumulated.
2347 static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
2348 EVT VT = Op.getValueType();
2349 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
2350 VT, (128 / VT.getSizeInBits()));
2351 DebugLoc dl = Op.getDebugLoc();
2353 switch (VT.getSimpleVT().SimpleTy) {
2355 assert(false && "Invalid value type!");
2357 SDValue N = Op.getOperand(0);
2358 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2360 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2361 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2363 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, CNTB, Elt0);
2367 MachineFunction &MF = DAG.getMachineFunction();
2368 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2370 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R16CRegClass);
2372 SDValue N = Op.getOperand(0);
2373 SDValue Elt0 = DAG.getConstant(0, MVT::i16);
2374 SDValue Mask0 = DAG.getConstant(0x0f, MVT::i16);
2375 SDValue Shift1 = DAG.getConstant(8, MVT::i32);
2377 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2378 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2380 // CNTB_result becomes the chain to which all of the virtual registers
2381 // CNTB_reg, SUM1_reg become associated:
2382 SDValue CNTB_result =
2383 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, CNTB, Elt0);
2385 SDValue CNTB_rescopy =
2386 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2388 SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i16);
2390 return DAG.getNode(ISD::AND, dl, MVT::i16,
2391 DAG.getNode(ISD::ADD, dl, MVT::i16,
2392 DAG.getNode(ISD::SRL, dl, MVT::i16,
2399 MachineFunction &MF = DAG.getMachineFunction();
2400 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2402 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2403 unsigned SUM1_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2405 SDValue N = Op.getOperand(0);
2406 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2407 SDValue Mask0 = DAG.getConstant(0xff, MVT::i32);
2408 SDValue Shift1 = DAG.getConstant(16, MVT::i32);
2409 SDValue Shift2 = DAG.getConstant(8, MVT::i32);
2411 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2412 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2414 // CNTB_result becomes the chain to which all of the virtual registers
2415 // CNTB_reg, SUM1_reg become associated:
2416 SDValue CNTB_result =
2417 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, CNTB, Elt0);
2419 SDValue CNTB_rescopy =
2420 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2423 DAG.getNode(ISD::SRL, dl, MVT::i32,
2424 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32),
2428 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp1,
2429 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32));
2431 SDValue Sum1_rescopy =
2432 DAG.getCopyToReg(CNTB_result, dl, SUM1_reg, Sum1);
2435 DAG.getNode(ISD::SRL, dl, MVT::i32,
2436 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32),
2439 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp2,
2440 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32));
2442 return DAG.getNode(ISD::AND, dl, MVT::i32, Sum2, Mask0);
2452 //! Lower ISD::FP_TO_SINT, ISD::FP_TO_UINT for i32
2454 f32->i32 passes through unchanged, whereas f64->i32 expands to a libcall.
2455 All conversions to i64 are expanded to a libcall.
2457 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2458 const SPUTargetLowering &TLI) {
2459 EVT OpVT = Op.getValueType();
2460 SDValue Op0 = Op.getOperand(0);
2461 EVT Op0VT = Op0.getValueType();
2463 if ((OpVT == MVT::i32 && Op0VT == MVT::f64)
2464 || OpVT == MVT::i64) {
2465 // Convert f32 / f64 to i32 / i64 via libcall.
2467 (Op.getOpcode() == ISD::FP_TO_SINT)
2468 ? RTLIB::getFPTOSINT(Op0VT, OpVT)
2469 : RTLIB::getFPTOUINT(Op0VT, OpVT);
2470 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd fp-to-int conversion!");
2472 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2478 //! Lower ISD::SINT_TO_FP, ISD::UINT_TO_FP for i32
2480 i32->f32 passes through unchanged, whereas i32->f64 is expanded to a libcall.
2481 All conversions from i64 are expanded to a libcall.
2483 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2484 const SPUTargetLowering &TLI) {
2485 EVT OpVT = Op.getValueType();
2486 SDValue Op0 = Op.getOperand(0);
2487 EVT Op0VT = Op0.getValueType();
2489 if ((OpVT == MVT::f64 && Op0VT == MVT::i32)
2490 || Op0VT == MVT::i64) {
2491 // Convert i32, i64 to f64 via libcall:
2493 (Op.getOpcode() == ISD::SINT_TO_FP)
2494 ? RTLIB::getSINTTOFP(Op0VT, OpVT)
2495 : RTLIB::getUINTTOFP(Op0VT, OpVT);
2496 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd int-to-fp conversion!");
2498 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2504 //! Lower ISD::SETCC
2506 This handles MVT::f64 (double floating point) condition lowering
2508 static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
2509 const TargetLowering &TLI) {
2510 CondCodeSDNode *CC = dyn_cast<CondCodeSDNode>(Op.getOperand(2));
2511 DebugLoc dl = Op.getDebugLoc();
2512 assert(CC != 0 && "LowerSETCC: CondCodeSDNode should not be null here!\n");
2514 SDValue lhs = Op.getOperand(0);
2515 SDValue rhs = Op.getOperand(1);
2516 EVT lhsVT = lhs.getValueType();
2517 assert(lhsVT == MVT::f64 && "LowerSETCC: type other than MVT::64\n");
2519 EVT ccResultVT = TLI.getSetCCResultType(lhs.getValueType());
2520 APInt ccResultOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2521 EVT IntVT(MVT::i64);
2523 // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
2524 // selected to a NOP:
2525 SDValue i64lhs = DAG.getNode(ISD::BITCAST, dl, IntVT, lhs);
2527 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2528 DAG.getNode(ISD::SRL, dl, IntVT,
2529 i64lhs, DAG.getConstant(32, MVT::i32)));
2530 SDValue lhsHi32abs =
2531 DAG.getNode(ISD::AND, dl, MVT::i32,
2532 lhsHi32, DAG.getConstant(0x7fffffff, MVT::i32));
2534 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, i64lhs);
2536 // SETO and SETUO only use the lhs operand:
2537 if (CC->get() == ISD::SETO) {
2538 // Evaluates to true if Op0 is not [SQ]NaN - lowers to the inverse of
2540 APInt ccResultAllOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2541 return DAG.getNode(ISD::XOR, dl, ccResultVT,
2542 DAG.getSetCC(dl, ccResultVT,
2543 lhs, DAG.getConstantFP(0.0, lhsVT),
2545 DAG.getConstant(ccResultAllOnes, ccResultVT));
2546 } else if (CC->get() == ISD::SETUO) {
2547 // Evaluates to true if Op0 is [SQ]NaN
2548 return DAG.getNode(ISD::AND, dl, ccResultVT,
2549 DAG.getSetCC(dl, ccResultVT,
2551 DAG.getConstant(0x7ff00000, MVT::i32),
2553 DAG.getSetCC(dl, ccResultVT,
2555 DAG.getConstant(0, MVT::i32),
2559 SDValue i64rhs = DAG.getNode(ISD::BITCAST, dl, IntVT, rhs);
2561 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2562 DAG.getNode(ISD::SRL, dl, IntVT,
2563 i64rhs, DAG.getConstant(32, MVT::i32)));
2565 // If a value is negative, subtract from the sign magnitude constant:
2566 SDValue signMag2TC = DAG.getConstant(0x8000000000000000ULL, IntVT);
2568 // Convert the sign-magnitude representation into 2's complement:
2569 SDValue lhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2570 lhsHi32, DAG.getConstant(31, MVT::i32));
2571 SDValue lhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64lhs);
2573 DAG.getNode(ISD::SELECT, dl, IntVT,
2574 lhsSelectMask, lhsSignMag2TC, i64lhs);
2576 SDValue rhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2577 rhsHi32, DAG.getConstant(31, MVT::i32));
2578 SDValue rhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64rhs);
2580 DAG.getNode(ISD::SELECT, dl, IntVT,
2581 rhsSelectMask, rhsSignMag2TC, i64rhs);
2585 switch (CC->get()) {
2588 compareOp = ISD::SETEQ; break;
2591 compareOp = ISD::SETGT; break;
2594 compareOp = ISD::SETGE; break;
2597 compareOp = ISD::SETLT; break;
2600 compareOp = ISD::SETLE; break;
2603 compareOp = ISD::SETNE; break;
2605 report_fatal_error("CellSPU ISel Select: unimplemented f64 condition");
2609 DAG.getSetCC(dl, ccResultVT, lhsSelect, rhsSelect,
2610 (ISD::CondCode) compareOp);
2612 if ((CC->get() & 0x8) == 0) {
2613 // Ordered comparison:
2614 SDValue lhsNaN = DAG.getSetCC(dl, ccResultVT,
2615 lhs, DAG.getConstantFP(0.0, MVT::f64),
2617 SDValue rhsNaN = DAG.getSetCC(dl, ccResultVT,
2618 rhs, DAG.getConstantFP(0.0, MVT::f64),
2620 SDValue ordered = DAG.getNode(ISD::AND, dl, ccResultVT, lhsNaN, rhsNaN);
2622 result = DAG.getNode(ISD::AND, dl, ccResultVT, ordered, result);
2628 //! Lower ISD::SELECT_CC
2630 ISD::SELECT_CC can (generally) be implemented directly on the SPU using the
2633 \note Need to revisit this in the future: if the code path through the true
2634 and false value computations is longer than the latency of a branch (6
2635 cycles), then it would be more advantageous to branch and insert a new basic
2636 block and branch on the condition. However, this code does not make that
2637 assumption, given the simplisitc uses so far.
2640 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2641 const TargetLowering &TLI) {
2642 EVT VT = Op.getValueType();
2643 SDValue lhs = Op.getOperand(0);
2644 SDValue rhs = Op.getOperand(1);
2645 SDValue trueval = Op.getOperand(2);
2646 SDValue falseval = Op.getOperand(3);
2647 SDValue condition = Op.getOperand(4);
2648 DebugLoc dl = Op.getDebugLoc();
2650 // NOTE: SELB's arguments: $rA, $rB, $mask
2652 // SELB selects bits from $rA where bits in $mask are 0, bits from $rB
2653 // where bits in $mask are 1. CCond will be inverted, having 1s where the
2654 // condition was true and 0s where the condition was false. Hence, the
2655 // arguments to SELB get reversed.
2657 // Note: Really should be ISD::SELECT instead of SPUISD::SELB, but LLVM's
2658 // legalizer insists on combining SETCC/SELECT into SELECT_CC, so we end up
2659 // with another "cannot select select_cc" assert:
2661 SDValue compare = DAG.getNode(ISD::SETCC, dl,
2662 TLI.getSetCCResultType(Op.getValueType()),
2663 lhs, rhs, condition);
2664 return DAG.getNode(SPUISD::SELB, dl, VT, falseval, trueval, compare);
2667 //! Custom lower ISD::TRUNCATE
2668 static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
2670 // Type to truncate to
2671 EVT VT = Op.getValueType();
2672 MVT simpleVT = VT.getSimpleVT();
2673 EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
2674 VT, (128 / VT.getSizeInBits()));
2675 DebugLoc dl = Op.getDebugLoc();
2677 // Type to truncate from
2678 SDValue Op0 = Op.getOperand(0);
2679 EVT Op0VT = Op0.getValueType();
2681 if (Op0VT == MVT::i128 && simpleVT == MVT::i64) {
2682 // Create shuffle mask, least significant doubleword of quadword
2683 unsigned maskHigh = 0x08090a0b;
2684 unsigned maskLow = 0x0c0d0e0f;
2685 // Use a shuffle to perform the truncation
2686 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2687 DAG.getConstant(maskHigh, MVT::i32),
2688 DAG.getConstant(maskLow, MVT::i32),
2689 DAG.getConstant(maskHigh, MVT::i32),
2690 DAG.getConstant(maskLow, MVT::i32));
2692 SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2693 Op0, Op0, shufMask);
2695 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, truncShuffle);
2698 return SDValue(); // Leave the truncate unmolested
2702 * Emit the instruction sequence for i64/i32 -> i128 sign extend. The basic
2703 * algorithm is to duplicate the sign bit using rotmai to generate at
2704 * least one byte full of sign bits. Then propagate the "sign-byte" into
2705 * the leftmost words and the i64/i32 into the rightmost words using shufb.
2707 * @param Op The sext operand
2708 * @param DAG The current DAG
2709 * @return The SDValue with the entire instruction sequence
2711 static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
2713 DebugLoc dl = Op.getDebugLoc();
2715 // Type to extend to
2716 MVT OpVT = Op.getValueType().getSimpleVT();
2718 // Type to extend from
2719 SDValue Op0 = Op.getOperand(0);
2720 MVT Op0VT = Op0.getValueType().getSimpleVT();
2722 // extend i8 & i16 via i32
2723 if (Op0VT == MVT::i8 || Op0VT == MVT::i16) {
2724 Op0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Op0);
2728 // The type to extend to needs to be a i128 and
2729 // the type to extend from needs to be i64 or i32.
2730 assert((OpVT == MVT::i128 && (Op0VT == MVT::i64 || Op0VT == MVT::i32)) &&
2731 "LowerSIGN_EXTEND: input and/or output operand have wrong size");
2734 // Create shuffle mask
2735 unsigned mask1 = 0x10101010; // byte 0 - 3 and 4 - 7
2736 unsigned mask2 = Op0VT == MVT::i64 ? 0x00010203 : 0x10101010; // byte 8 - 11
2737 unsigned mask3 = Op0VT == MVT::i64 ? 0x04050607 : 0x00010203; // byte 12 - 15
2738 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2739 DAG.getConstant(mask1, MVT::i32),
2740 DAG.getConstant(mask1, MVT::i32),
2741 DAG.getConstant(mask2, MVT::i32),
2742 DAG.getConstant(mask3, MVT::i32));
2744 // Word wise arithmetic right shift to generate at least one byte
2745 // that contains sign bits.
2746 MVT mvt = Op0VT == MVT::i64 ? MVT::v2i64 : MVT::v4i32;
2747 SDValue sraVal = DAG.getNode(ISD::SRA,
2750 DAG.getNode(SPUISD::PREFSLOT2VEC, dl, mvt, Op0, Op0),
2751 DAG.getConstant(31, MVT::i32));
2753 // reinterpret as a i128 (SHUFB requires it). This gets lowered away.
2754 SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
2756 DAG.getTargetConstant(
2757 SPU::GPRCRegClass.getID(),
2759 // Shuffle bytes - Copy the sign bits into the upper 64 bits
2760 // and the input value into the lower 64 bits.
2761 SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
2762 extended, sraVal, shufMask);
2763 return DAG.getNode(ISD::BITCAST, dl, MVT::i128, extShuffle);
2766 //! Custom (target-specific) lowering entry point
2768 This is where LLVM's DAG selection process calls to do target-specific
2772 SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
2774 unsigned Opc = (unsigned) Op.getOpcode();
2775 EVT VT = Op.getValueType();
2780 errs() << "SPUTargetLowering::LowerOperation(): need to lower this!\n";
2781 errs() << "Op.getOpcode() = " << Opc << "\n";
2782 errs() << "*Op.getNode():\n";
2783 Op.getNode()->dump();
2785 llvm_unreachable(0);
2791 return LowerLOAD(Op, DAG, SPUTM.getSubtargetImpl());
2793 return LowerSTORE(Op, DAG, SPUTM.getSubtargetImpl());
2794 case ISD::ConstantPool:
2795 return LowerConstantPool(Op, DAG, SPUTM.getSubtargetImpl());
2796 case ISD::GlobalAddress:
2797 return LowerGlobalAddress(Op, DAG, SPUTM.getSubtargetImpl());
2798 case ISD::JumpTable:
2799 return LowerJumpTable(Op, DAG, SPUTM.getSubtargetImpl());
2800 case ISD::ConstantFP:
2801 return LowerConstantFP(Op, DAG);
2803 // i8, i64 math ops:
2812 return LowerI8Math(Op, DAG, Opc, *this);
2816 case ISD::FP_TO_SINT:
2817 case ISD::FP_TO_UINT:
2818 return LowerFP_TO_INT(Op, DAG, *this);
2820 case ISD::SINT_TO_FP:
2821 case ISD::UINT_TO_FP:
2822 return LowerINT_TO_FP(Op, DAG, *this);
2824 // Vector-related lowering.
2825 case ISD::BUILD_VECTOR:
2826 return LowerBUILD_VECTOR(Op, DAG);
2827 case ISD::SCALAR_TO_VECTOR:
2828 return LowerSCALAR_TO_VECTOR(Op, DAG);
2829 case ISD::VECTOR_SHUFFLE:
2830 return LowerVECTOR_SHUFFLE(Op, DAG);
2831 case ISD::EXTRACT_VECTOR_ELT:
2832 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2833 case ISD::INSERT_VECTOR_ELT:
2834 return LowerINSERT_VECTOR_ELT(Op, DAG);
2836 // Look for ANDBI, ORBI and XORBI opportunities and lower appropriately:
2840 return LowerByteImmed(Op, DAG);
2842 // Vector and i8 multiply:
2845 return LowerI8Math(Op, DAG, Opc, *this);
2848 return LowerCTPOP(Op, DAG);
2850 case ISD::SELECT_CC:
2851 return LowerSELECT_CC(Op, DAG, *this);
2854 return LowerSETCC(Op, DAG, *this);
2857 return LowerTRUNCATE(Op, DAG);
2859 case ISD::SIGN_EXTEND:
2860 return LowerSIGN_EXTEND(Op, DAG);
2866 void SPUTargetLowering::ReplaceNodeResults(SDNode *N,
2867 SmallVectorImpl<SDValue>&Results,
2868 SelectionDAG &DAG) const
2871 unsigned Opc = (unsigned) N->getOpcode();
2872 EVT OpVT = N->getValueType(0);
2876 errs() << "SPUTargetLowering::ReplaceNodeResults(): need to fix this!\n";
2877 errs() << "Op.getOpcode() = " << Opc << "\n";
2878 errs() << "*Op.getNode():\n";
2886 /* Otherwise, return unchanged */
2889 //===----------------------------------------------------------------------===//
2890 // Target Optimization Hooks
2891 //===----------------------------------------------------------------------===//
2894 SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
2897 TargetMachine &TM = getTargetMachine();
2899 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
2900 SelectionDAG &DAG = DCI.DAG;
2901 SDValue Op0 = N->getOperand(0); // everything has at least one operand
2902 EVT NodeVT = N->getValueType(0); // The node's value type
2903 EVT Op0VT = Op0.getValueType(); // The first operand's result
2904 SDValue Result; // Initially, empty result
2905 DebugLoc dl = N->getDebugLoc();
2907 switch (N->getOpcode()) {
2910 SDValue Op1 = N->getOperand(1);
2912 if (Op0.getOpcode() == SPUISD::IndirectAddr
2913 || Op1.getOpcode() == SPUISD::IndirectAddr) {
2914 // Normalize the operands to reduce repeated code
2915 SDValue IndirectArg = Op0, AddArg = Op1;
2917 if (Op1.getOpcode() == SPUISD::IndirectAddr) {
2922 if (isa<ConstantSDNode>(AddArg)) {
2923 ConstantSDNode *CN0 = cast<ConstantSDNode > (AddArg);
2924 SDValue IndOp1 = IndirectArg.getOperand(1);
2926 if (CN0->isNullValue()) {
2927 // (add (SPUindirect <arg>, <arg>), 0) ->
2928 // (SPUindirect <arg>, <arg>)
2930 #if !defined(NDEBUG)
2931 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2933 << "Replace: (add (SPUindirect <arg>, <arg>), 0)\n"
2934 << "With: (SPUindirect <arg>, <arg>)\n";
2939 } else if (isa<ConstantSDNode>(IndOp1)) {
2940 // (add (SPUindirect <arg>, <const>), <const>) ->
2941 // (SPUindirect <arg>, <const + const>)
2942 ConstantSDNode *CN1 = cast<ConstantSDNode > (IndOp1);
2943 int64_t combinedConst = CN0->getSExtValue() + CN1->getSExtValue();
2944 SDValue combinedValue = DAG.getConstant(combinedConst, Op0VT);
2946 #if !defined(NDEBUG)
2947 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2949 << "Replace: (add (SPUindirect <arg>, " << CN1->getSExtValue()
2950 << "), " << CN0->getSExtValue() << ")\n"
2951 << "With: (SPUindirect <arg>, "
2952 << combinedConst << ")\n";
2956 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2957 IndirectArg, combinedValue);
2963 case ISD::SIGN_EXTEND:
2964 case ISD::ZERO_EXTEND:
2965 case ISD::ANY_EXTEND: {
2966 if (Op0.getOpcode() == SPUISD::VEC2PREFSLOT && NodeVT == Op0VT) {
2967 // (any_extend (SPUextract_elt0 <arg>)) ->
2968 // (SPUextract_elt0 <arg>)
2969 // Types must match, however...
2970 #if !defined(NDEBUG)
2971 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2972 errs() << "\nReplace: ";
2974 errs() << "\nWith: ";
2975 Op0.getNode()->dump(&DAG);
2984 case SPUISD::IndirectAddr: {
2985 if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) {
2986 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1));
2987 if (CN != 0 && CN->isNullValue()) {
2988 // (SPUindirect (SPUaform <addr>, 0), 0) ->
2989 // (SPUaform <addr>, 0)
2991 DEBUG(errs() << "Replace: ");
2992 DEBUG(N->dump(&DAG));
2993 DEBUG(errs() << "\nWith: ");
2994 DEBUG(Op0.getNode()->dump(&DAG));
2995 DEBUG(errs() << "\n");
2999 } else if (Op0.getOpcode() == ISD::ADD) {
3000 SDValue Op1 = N->getOperand(1);
3001 if (ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(Op1)) {
3002 // (SPUindirect (add <arg>, <arg>), 0) ->
3003 // (SPUindirect <arg>, <arg>)
3004 if (CN1->isNullValue()) {
3006 #if !defined(NDEBUG)
3007 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
3009 << "Replace: (SPUindirect (add <arg>, <arg>), 0)\n"
3010 << "With: (SPUindirect <arg>, <arg>)\n";
3014 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
3015 Op0.getOperand(0), Op0.getOperand(1));
3021 case SPUISD::SHL_BITS:
3022 case SPUISD::SHL_BYTES:
3023 case SPUISD::ROTBYTES_LEFT: {
3024 SDValue Op1 = N->getOperand(1);
3026 // Kill degenerate vector shifts:
3027 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
3028 if (CN->isNullValue()) {
3034 case SPUISD::PREFSLOT2VEC: {
3035 switch (Op0.getOpcode()) {
3038 case ISD::ANY_EXTEND:
3039 case ISD::ZERO_EXTEND:
3040 case ISD::SIGN_EXTEND: {
3041 // (SPUprefslot2vec (any|zero|sign_extend (SPUvec2prefslot <arg>))) ->
3043 // but only if the SPUprefslot2vec and <arg> types match.
3044 SDValue Op00 = Op0.getOperand(0);
3045 if (Op00.getOpcode() == SPUISD::VEC2PREFSLOT) {
3046 SDValue Op000 = Op00.getOperand(0);
3047 if (Op000.getValueType() == NodeVT) {
3053 case SPUISD::VEC2PREFSLOT: {
3054 // (SPUprefslot2vec (SPUvec2prefslot <arg>)) ->
3056 Result = Op0.getOperand(0);
3064 // Otherwise, return unchanged.
3066 if (Result.getNode()) {
3067 DEBUG(errs() << "\nReplace.SPU: ");
3068 DEBUG(N->dump(&DAG));
3069 DEBUG(errs() << "\nWith: ");
3070 DEBUG(Result.getNode()->dump(&DAG));
3071 DEBUG(errs() << "\n");
3078 //===----------------------------------------------------------------------===//
3079 // Inline Assembly Support
3080 //===----------------------------------------------------------------------===//
3082 /// getConstraintType - Given a constraint letter, return the type of
3083 /// constraint it is for this target.
3084 SPUTargetLowering::ConstraintType
3085 SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const {
3086 if (ConstraintLetter.size() == 1) {
3087 switch (ConstraintLetter[0]) {
3094 return C_RegisterClass;
3097 return TargetLowering::getConstraintType(ConstraintLetter);
3100 /// Examine constraint type and operand type and determine a weight value.
3101 /// This object must already have been set up with the operand type
3102 /// and the current alternative constraint selected.
3103 TargetLowering::ConstraintWeight
3104 SPUTargetLowering::getSingleConstraintMatchWeight(
3105 AsmOperandInfo &info, const char *constraint) const {
3106 ConstraintWeight weight = CW_Invalid;
3107 Value *CallOperandVal = info.CallOperandVal;
3108 // If we don't have a value, we can't do a match,
3109 // but allow it at the lowest weight.
3110 if (CallOperandVal == NULL)
3112 // Look at the constraint type.
3113 switch (*constraint) {
3115 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3117 //FIXME: Seems like the supported constraint letters were just copied
3118 // from PPC, as the following doesn't correspond to the GCC docs.
3119 // I'm leaving it so until someone adds the corresponding lowering support.
3126 weight = CW_Register;
3132 std::pair<unsigned, const TargetRegisterClass*>
3133 SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
3136 if (Constraint.size() == 1) {
3137 // GCC RS6000 Constraint Letters
3138 switch (Constraint[0]) {
3142 return std::make_pair(0U, SPU::R64CRegisterClass);
3143 return std::make_pair(0U, SPU::R32CRegisterClass);
3146 return std::make_pair(0U, SPU::R32FPRegisterClass);
3147 else if (VT == MVT::f64)
3148 return std::make_pair(0U, SPU::R64FPRegisterClass);
3151 return std::make_pair(0U, SPU::GPRCRegisterClass);
3155 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
3158 //! Compute used/known bits for a SPU operand
3160 SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
3164 const SelectionDAG &DAG,
3165 unsigned Depth ) const {
3167 const uint64_t uint64_sizebits = sizeof(uint64_t) * CHAR_BIT;
3169 switch (Op.getOpcode()) {
3171 // KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
3177 case SPUISD::PREFSLOT2VEC:
3178 case SPUISD::LDRESULT:
3179 case SPUISD::VEC2PREFSLOT:
3180 case SPUISD::SHLQUAD_L_BITS:
3181 case SPUISD::SHLQUAD_L_BYTES:
3182 case SPUISD::VEC_ROTL:
3183 case SPUISD::VEC_ROTR:
3184 case SPUISD::ROTBYTES_LEFT:
3185 case SPUISD::SELECT_MASK:
3192 SPUTargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
3193 unsigned Depth) const {
3194 switch (Op.getOpcode()) {
3199 EVT VT = Op.getValueType();
3201 if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32) {
3204 return VT.getSizeInBits();
3209 // LowerAsmOperandForConstraint
3211 SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3212 std::string &Constraint,
3213 std::vector<SDValue> &Ops,
3214 SelectionDAG &DAG) const {
3215 // Default, for the time being, to the base class handler
3216 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3219 /// isLegalAddressImmediate - Return true if the integer value can be used
3220 /// as the offset of the target addressing mode.
3221 bool SPUTargetLowering::isLegalAddressImmediate(int64_t V,
3223 // SPU's addresses are 256K:
3224 return (V > -(1 << 18) && V < (1 << 18) - 1);
3227 bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
3232 SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3233 // The SPU target isn't yet aware of offsets.
3237 // can we compare to Imm without writing it into a register?
3238 bool SPUTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
3239 //ceqi, cgti, etc. all take s10 operand
3240 return isInt<10>(Imm);
3244 SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
3247 // A-form: 18bit absolute address.
3248 if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && AM.BaseOffs == 0)
3251 // D-form: reg + 14bit offset
3252 if (AM.BaseGV ==0 && AM.HasBaseReg && AM.Scale == 0 && isInt<14>(AM.BaseOffs))
3256 if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 1 && AM.BaseOffs ==0)