1 //===-- SPUISelLowering.cpp - Cell SPU DAG Lowering Implementation --------===//
2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file implements the SPUTargetLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "SPURegisterNames.h"
14 #include "SPUISelLowering.h"
15 #include "SPUTargetMachine.h"
16 #include "SPUFrameLowering.h"
17 #include "SPUMachineFunction.h"
18 #include "llvm/Constants.h"
19 #include "llvm/Function.h"
20 #include "llvm/Intrinsics.h"
21 #include "llvm/CallingConv.h"
22 #include "llvm/Type.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/ADT/VectorExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/raw_ostream.h"
40 // Used in getTargetNodeName() below
42 std::map<unsigned, const char *> node_names;
44 // Byte offset of the preferred slot (counted from the MSB)
45 int prefslotOffset(EVT VT) {
47 if (VT==MVT::i1) retval=3;
48 if (VT==MVT::i8) retval=3;
49 if (VT==MVT::i16) retval=2;
54 //! Expand a library call into an actual call DAG node
57 This code is taken from SelectionDAGLegalize, since it is not exposed as
58 part of the LLVM SelectionDAG API.
62 ExpandLibCall(RTLIB::Libcall LC, SDValue Op, SelectionDAG &DAG,
63 bool isSigned, SDValue &Hi, const SPUTargetLowering &TLI) {
64 // The input chain to this libcall is the entry node of the function.
65 // Legalizing the call will automatically add the previous call to the
67 SDValue InChain = DAG.getEntryNode();
69 TargetLowering::ArgListTy Args;
70 TargetLowering::ArgListEntry Entry;
71 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
72 EVT ArgVT = Op.getOperand(i).getValueType();
73 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
74 Entry.Node = Op.getOperand(i);
76 Entry.isSExt = isSigned;
77 Entry.isZExt = !isSigned;
78 Args.push_back(Entry);
80 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
83 // Splice the libcall in wherever FindInputOutputChains tells us to.
85 Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
86 std::pair<SDValue, SDValue> CallInfo =
87 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
88 0, TLI.getLibcallCallingConv(LC), false,
89 /*isReturnValueUsed=*/true,
90 Callee, Args, DAG, Op.getDebugLoc());
92 return CallInfo.first;
96 SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
97 : TargetLowering(TM, new TargetLoweringObjectFileELF()),
100 // Use _setjmp/_longjmp instead of setjmp/longjmp.
101 setUseUnderscoreSetJmp(true);
102 setUseUnderscoreLongJmp(true);
104 // Set RTLIB libcall names as used by SPU:
105 setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
107 // Set up the SPU's register classes:
108 addRegisterClass(MVT::i8, SPU::R8CRegisterClass);
109 addRegisterClass(MVT::i16, SPU::R16CRegisterClass);
110 addRegisterClass(MVT::i32, SPU::R32CRegisterClass);
111 addRegisterClass(MVT::i64, SPU::R64CRegisterClass);
112 addRegisterClass(MVT::f32, SPU::R32FPRegisterClass);
113 addRegisterClass(MVT::f64, SPU::R64FPRegisterClass);
114 addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
116 // SPU has no sign or zero extended loads for i1, i8, i16:
117 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
118 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
119 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
121 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
122 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
124 setTruncStoreAction(MVT::i128, MVT::i64, Expand);
125 setTruncStoreAction(MVT::i128, MVT::i32, Expand);
126 setTruncStoreAction(MVT::i128, MVT::i16, Expand);
127 setTruncStoreAction(MVT::i128, MVT::i8, Expand);
129 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
131 // SPU constant load actions are custom lowered:
132 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
133 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
135 // SPU's loads and stores have to be custom lowered:
136 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::i128;
138 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
140 setOperationAction(ISD::LOAD, VT, Custom);
141 setOperationAction(ISD::STORE, VT, Custom);
142 setLoadExtAction(ISD::EXTLOAD, VT, Custom);
143 setLoadExtAction(ISD::ZEXTLOAD, VT, Custom);
144 setLoadExtAction(ISD::SEXTLOAD, VT, Custom);
146 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::i8; --stype) {
147 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
148 setTruncStoreAction(VT, StoreVT, Expand);
152 for (unsigned sctype = (unsigned) MVT::f32; sctype < (unsigned) MVT::f64;
154 MVT::SimpleValueType VT = (MVT::SimpleValueType) sctype;
156 setOperationAction(ISD::LOAD, VT, Custom);
157 setOperationAction(ISD::STORE, VT, Custom);
159 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::f32; --stype) {
160 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
161 setTruncStoreAction(VT, StoreVT, Expand);
165 // Expand the jumptable branches
166 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
167 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
169 // Custom lower SELECT_CC for most cases, but expand by default
170 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
171 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
172 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
173 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
174 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
176 // SPU has no intrinsics for these particular operations:
177 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
179 // SPU has no division/remainder instructions
180 setOperationAction(ISD::SREM, MVT::i8, Expand);
181 setOperationAction(ISD::UREM, MVT::i8, Expand);
182 setOperationAction(ISD::SDIV, MVT::i8, Expand);
183 setOperationAction(ISD::UDIV, MVT::i8, Expand);
184 setOperationAction(ISD::SDIVREM, MVT::i8, Expand);
185 setOperationAction(ISD::UDIVREM, MVT::i8, Expand);
186 setOperationAction(ISD::SREM, MVT::i16, Expand);
187 setOperationAction(ISD::UREM, MVT::i16, Expand);
188 setOperationAction(ISD::SDIV, MVT::i16, Expand);
189 setOperationAction(ISD::UDIV, MVT::i16, Expand);
190 setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
191 setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
192 setOperationAction(ISD::SREM, MVT::i32, Expand);
193 setOperationAction(ISD::UREM, MVT::i32, Expand);
194 setOperationAction(ISD::SDIV, MVT::i32, Expand);
195 setOperationAction(ISD::UDIV, MVT::i32, Expand);
196 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
197 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
198 setOperationAction(ISD::SREM, MVT::i64, Expand);
199 setOperationAction(ISD::UREM, MVT::i64, Expand);
200 setOperationAction(ISD::SDIV, MVT::i64, Expand);
201 setOperationAction(ISD::UDIV, MVT::i64, Expand);
202 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
203 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
204 setOperationAction(ISD::SREM, MVT::i128, Expand);
205 setOperationAction(ISD::UREM, MVT::i128, Expand);
206 setOperationAction(ISD::SDIV, MVT::i128, Expand);
207 setOperationAction(ISD::UDIV, MVT::i128, Expand);
208 setOperationAction(ISD::SDIVREM, MVT::i128, Expand);
209 setOperationAction(ISD::UDIVREM, MVT::i128, Expand);
211 // We don't support sin/cos/sqrt/fmod
212 setOperationAction(ISD::FSIN , MVT::f64, Expand);
213 setOperationAction(ISD::FCOS , MVT::f64, Expand);
214 setOperationAction(ISD::FREM , MVT::f64, Expand);
215 setOperationAction(ISD::FSIN , MVT::f32, Expand);
216 setOperationAction(ISD::FCOS , MVT::f32, Expand);
217 setOperationAction(ISD::FREM , MVT::f32, Expand);
219 // Expand fsqrt to the appropriate libcall (NOTE: should use h/w fsqrt
221 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
222 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
224 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
225 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
227 // SPU can do rotate right and left, so legalize it... but customize for i8
228 // because instructions don't exist.
230 // FIXME: Change from "expand" to appropriate type once ROTR is supported in
232 setOperationAction(ISD::ROTR, MVT::i32, Expand /*Legal*/);
233 setOperationAction(ISD::ROTR, MVT::i16, Expand /*Legal*/);
234 setOperationAction(ISD::ROTR, MVT::i8, Expand /*Custom*/);
236 setOperationAction(ISD::ROTL, MVT::i32, Legal);
237 setOperationAction(ISD::ROTL, MVT::i16, Legal);
238 setOperationAction(ISD::ROTL, MVT::i8, Custom);
240 // SPU has no native version of shift left/right for i8
241 setOperationAction(ISD::SHL, MVT::i8, Custom);
242 setOperationAction(ISD::SRL, MVT::i8, Custom);
243 setOperationAction(ISD::SRA, MVT::i8, Custom);
245 // Make these operations legal and handle them during instruction selection:
246 setOperationAction(ISD::SHL, MVT::i64, Legal);
247 setOperationAction(ISD::SRL, MVT::i64, Legal);
248 setOperationAction(ISD::SRA, MVT::i64, Legal);
250 // Custom lower i8, i32 and i64 multiplications
251 setOperationAction(ISD::MUL, MVT::i8, Custom);
252 setOperationAction(ISD::MUL, MVT::i32, Legal);
253 setOperationAction(ISD::MUL, MVT::i64, Legal);
255 // Expand double-width multiplication
256 // FIXME: It would probably be reasonable to support some of these operations
257 setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
258 setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
259 setOperationAction(ISD::MULHU, MVT::i8, Expand);
260 setOperationAction(ISD::MULHS, MVT::i8, Expand);
261 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
262 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
263 setOperationAction(ISD::MULHU, MVT::i16, Expand);
264 setOperationAction(ISD::MULHS, MVT::i16, Expand);
265 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
266 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
267 setOperationAction(ISD::MULHU, MVT::i32, Expand);
268 setOperationAction(ISD::MULHS, MVT::i32, Expand);
269 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
270 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
271 setOperationAction(ISD::MULHU, MVT::i64, Expand);
272 setOperationAction(ISD::MULHS, MVT::i64, Expand);
274 // Need to custom handle (some) common i8, i64 math ops
275 setOperationAction(ISD::ADD, MVT::i8, Custom);
276 setOperationAction(ISD::ADD, MVT::i64, Legal);
277 setOperationAction(ISD::SUB, MVT::i8, Custom);
278 setOperationAction(ISD::SUB, MVT::i64, Legal);
280 // SPU does not have BSWAP. It does have i32 support CTLZ.
281 // CTPOP has to be custom lowered.
282 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
283 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
285 setOperationAction(ISD::CTPOP, MVT::i8, Custom);
286 setOperationAction(ISD::CTPOP, MVT::i16, Custom);
287 setOperationAction(ISD::CTPOP, MVT::i32, Custom);
288 setOperationAction(ISD::CTPOP, MVT::i64, Custom);
289 setOperationAction(ISD::CTPOP, MVT::i128, Expand);
291 setOperationAction(ISD::CTTZ , MVT::i8, Expand);
292 setOperationAction(ISD::CTTZ , MVT::i16, Expand);
293 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
294 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
295 setOperationAction(ISD::CTTZ , MVT::i128, Expand);
297 setOperationAction(ISD::CTLZ , MVT::i8, Promote);
298 setOperationAction(ISD::CTLZ , MVT::i16, Promote);
299 setOperationAction(ISD::CTLZ , MVT::i32, Legal);
300 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
301 setOperationAction(ISD::CTLZ , MVT::i128, Expand);
303 // SPU has a version of select that implements (a&~c)|(b&c), just like
304 // select ought to work:
305 setOperationAction(ISD::SELECT, MVT::i8, Legal);
306 setOperationAction(ISD::SELECT, MVT::i16, Legal);
307 setOperationAction(ISD::SELECT, MVT::i32, Legal);
308 setOperationAction(ISD::SELECT, MVT::i64, Legal);
310 setOperationAction(ISD::SETCC, MVT::i8, Legal);
311 setOperationAction(ISD::SETCC, MVT::i16, Legal);
312 setOperationAction(ISD::SETCC, MVT::i32, Legal);
313 setOperationAction(ISD::SETCC, MVT::i64, Legal);
314 setOperationAction(ISD::SETCC, MVT::f64, Custom);
316 // Custom lower i128 -> i64 truncates
317 setOperationAction(ISD::TRUNCATE, MVT::i64, Custom);
319 // Custom lower i32/i64 -> i128 sign extend
320 setOperationAction(ISD::SIGN_EXTEND, MVT::i128, Custom);
322 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
323 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
324 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
325 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
326 // SPU has a legal FP -> signed INT instruction for f32, but for f64, need
327 // to expand to a libcall, hence the custom lowering:
328 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
329 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
330 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
331 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
332 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Expand);
333 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Expand);
335 // FDIV on SPU requires custom lowering
336 setOperationAction(ISD::FDIV, MVT::f64, Expand); // to libcall
338 // SPU has [U|S]INT_TO_FP for f32->i32, but not for f64->i32, f64->i64:
339 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
340 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
341 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
342 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
343 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
344 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
345 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
346 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
348 setOperationAction(ISD::BITCAST, MVT::i32, Legal);
349 setOperationAction(ISD::BITCAST, MVT::f32, Legal);
350 setOperationAction(ISD::BITCAST, MVT::i64, Legal);
351 setOperationAction(ISD::BITCAST, MVT::f64, Legal);
353 // We cannot sextinreg(i1). Expand to shifts.
354 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
356 // We want to legalize GlobalAddress and ConstantPool nodes into the
357 // appropriate instructions to materialize the address.
358 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::f128;
360 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
362 setOperationAction(ISD::GlobalAddress, VT, Custom);
363 setOperationAction(ISD::ConstantPool, VT, Custom);
364 setOperationAction(ISD::JumpTable, VT, Custom);
367 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
368 setOperationAction(ISD::VASTART , MVT::Other, Custom);
370 // Use the default implementation.
371 setOperationAction(ISD::VAARG , MVT::Other, Expand);
372 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
373 setOperationAction(ISD::VAEND , MVT::Other, Expand);
374 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
375 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
376 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
377 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Expand);
379 // Cell SPU has instructions for converting between i64 and fp.
380 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
381 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
383 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
384 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
386 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
387 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
389 // First set operation action for all vector types to expand. Then we
390 // will selectively turn on ones that can be effectively codegen'd.
391 addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
392 addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
393 addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
394 addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
395 addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
396 addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
398 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
399 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
400 MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
402 // add/sub are legal for all supported vector VT's.
403 setOperationAction(ISD::ADD, VT, Legal);
404 setOperationAction(ISD::SUB, VT, Legal);
405 // mul has to be custom lowered.
406 setOperationAction(ISD::MUL, VT, Legal);
408 setOperationAction(ISD::AND, VT, Legal);
409 setOperationAction(ISD::OR, VT, Legal);
410 setOperationAction(ISD::XOR, VT, Legal);
411 setOperationAction(ISD::LOAD, VT, Custom);
412 setOperationAction(ISD::SELECT, VT, Legal);
413 setOperationAction(ISD::STORE, VT, Custom);
415 // These operations need to be expanded:
416 setOperationAction(ISD::SDIV, VT, Expand);
417 setOperationAction(ISD::SREM, VT, Expand);
418 setOperationAction(ISD::UDIV, VT, Expand);
419 setOperationAction(ISD::UREM, VT, Expand);
421 // Custom lower build_vector, constant pool spills, insert and
422 // extract vector elements:
423 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
424 setOperationAction(ISD::ConstantPool, VT, Custom);
425 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
426 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
427 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
428 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
431 setOperationAction(ISD::AND, MVT::v16i8, Custom);
432 setOperationAction(ISD::OR, MVT::v16i8, Custom);
433 setOperationAction(ISD::XOR, MVT::v16i8, Custom);
434 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
436 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
438 setShiftAmountType(MVT::i32);
439 setBooleanContents(ZeroOrNegativeOneBooleanContent);
441 setStackPointerRegisterToSaveRestore(SPU::R1);
443 // We have target-specific dag combine patterns for the following nodes:
444 setTargetDAGCombine(ISD::ADD);
445 setTargetDAGCombine(ISD::ZERO_EXTEND);
446 setTargetDAGCombine(ISD::SIGN_EXTEND);
447 setTargetDAGCombine(ISD::ANY_EXTEND);
449 computeRegisterProperties();
451 // Set pre-RA register scheduler default to BURR, which produces slightly
452 // better code than the default (could also be TDRR, but TargetLowering.h
453 // needs a mod to support that model):
454 setSchedulingPreference(Sched::RegPressure);
458 SPUTargetLowering::getTargetNodeName(unsigned Opcode) const
460 if (node_names.empty()) {
461 node_names[(unsigned) SPUISD::RET_FLAG] = "SPUISD::RET_FLAG";
462 node_names[(unsigned) SPUISD::Hi] = "SPUISD::Hi";
463 node_names[(unsigned) SPUISD::Lo] = "SPUISD::Lo";
464 node_names[(unsigned) SPUISD::PCRelAddr] = "SPUISD::PCRelAddr";
465 node_names[(unsigned) SPUISD::AFormAddr] = "SPUISD::AFormAddr";
466 node_names[(unsigned) SPUISD::IndirectAddr] = "SPUISD::IndirectAddr";
467 node_names[(unsigned) SPUISD::LDRESULT] = "SPUISD::LDRESULT";
468 node_names[(unsigned) SPUISD::CALL] = "SPUISD::CALL";
469 node_names[(unsigned) SPUISD::SHUFB] = "SPUISD::SHUFB";
470 node_names[(unsigned) SPUISD::SHUFFLE_MASK] = "SPUISD::SHUFFLE_MASK";
471 node_names[(unsigned) SPUISD::CNTB] = "SPUISD::CNTB";
472 node_names[(unsigned) SPUISD::PREFSLOT2VEC] = "SPUISD::PREFSLOT2VEC";
473 node_names[(unsigned) SPUISD::VEC2PREFSLOT] = "SPUISD::VEC2PREFSLOT";
474 node_names[(unsigned) SPUISD::SHL_BITS] = "SPUISD::SHL_BITS";
475 node_names[(unsigned) SPUISD::SHL_BYTES] = "SPUISD::SHL_BYTES";
476 node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL";
477 node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR";
478 node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
479 node_names[(unsigned) SPUISD::ROTBYTES_LEFT_BITS] =
480 "SPUISD::ROTBYTES_LEFT_BITS";
481 node_names[(unsigned) SPUISD::SELECT_MASK] = "SPUISD::SELECT_MASK";
482 node_names[(unsigned) SPUISD::SELB] = "SPUISD::SELB";
483 node_names[(unsigned) SPUISD::ADD64_MARKER] = "SPUISD::ADD64_MARKER";
484 node_names[(unsigned) SPUISD::SUB64_MARKER] = "SPUISD::SUB64_MARKER";
485 node_names[(unsigned) SPUISD::MUL64_MARKER] = "SPUISD::MUL64_MARKER";
488 std::map<unsigned, const char *>::iterator i = node_names.find(Opcode);
490 return ((i != node_names.end()) ? i->second : 0);
493 /// getFunctionAlignment - Return the Log2 alignment of this function.
494 unsigned SPUTargetLowering::getFunctionAlignment(const Function *) const {
498 //===----------------------------------------------------------------------===//
499 // Return the Cell SPU's SETCC result type
500 //===----------------------------------------------------------------------===//
502 MVT::SimpleValueType SPUTargetLowering::getSetCCResultType(EVT VT) const {
503 // i8, i16 and i32 are valid SETCC result types
504 MVT::SimpleValueType retval;
506 switch(VT.getSimpleVT().SimpleTy){
509 retval = MVT::i8; break;
511 retval = MVT::i16; break;
519 //===----------------------------------------------------------------------===//
520 // Calling convention code:
521 //===----------------------------------------------------------------------===//
523 #include "SPUGenCallingConv.inc"
525 //===----------------------------------------------------------------------===//
526 // LowerOperation implementation
527 //===----------------------------------------------------------------------===//
529 /// Custom lower loads for CellSPU
531 All CellSPU loads and stores are aligned to 16-byte boundaries, so for elements
532 within a 16-byte block, we have to rotate to extract the requested element.
534 For extending loads, we also want to ensure that the following sequence is
535 emitted, e.g. for MVT::f32 extending load to MVT::f64:
539 %2 v16i8,ch = rotate %1
540 %3 v4f8, ch = bitconvert %2
541 %4 f32 = vec2perfslot %3
542 %5 f64 = fp_extend %4
546 LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
547 LoadSDNode *LN = cast<LoadSDNode>(Op);
548 SDValue the_chain = LN->getChain();
549 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
550 EVT InVT = LN->getMemoryVT();
551 EVT OutVT = Op.getValueType();
552 ISD::LoadExtType ExtType = LN->getExtensionType();
553 unsigned alignment = LN->getAlignment();
554 int pso = prefslotOffset(InVT);
555 DebugLoc dl = Op.getDebugLoc();
556 EVT vecVT = InVT.isVector()? InVT: EVT::getVectorVT(*DAG.getContext(), InVT,
557 (128 / InVT.getSizeInBits()));
560 assert( LN->getAddressingMode() == ISD::UNINDEXED
561 && "we should get only UNINDEXED adresses");
562 // clean aligned loads can be selected as-is
563 if (InVT.getSizeInBits() == 128 && (alignment%16) == 0)
566 // Get pointerinfos to the memory chunk(s) that contain the data to load
567 uint64_t mpi_offset = LN->getPointerInfo().Offset;
568 mpi_offset -= mpi_offset%16;
569 MachinePointerInfo lowMemPtr(LN->getPointerInfo().V, mpi_offset);
570 MachinePointerInfo highMemPtr(LN->getPointerInfo().V, mpi_offset+16);
573 SDValue basePtr = LN->getBasePtr();
576 if ((alignment%16) == 0) {
579 // Special cases for a known aligned load to simplify the base pointer
580 // and the rotation amount:
581 if (basePtr.getOpcode() == ISD::ADD
582 && (CN = dyn_cast<ConstantSDNode > (basePtr.getOperand(1))) != 0) {
583 // Known offset into basePtr
584 int64_t offset = CN->getSExtValue();
585 int64_t rotamt = int64_t((offset & 0xf) - pso);
590 rotate = DAG.getConstant(rotamt, MVT::i16);
592 // Simplify the base pointer for this case:
593 basePtr = basePtr.getOperand(0);
594 if ((offset & ~0xf) > 0) {
595 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
597 DAG.getConstant((offset & ~0xf), PtrVT));
599 } else if ((basePtr.getOpcode() == SPUISD::AFormAddr)
600 || (basePtr.getOpcode() == SPUISD::IndirectAddr
601 && basePtr.getOperand(0).getOpcode() == SPUISD::Hi
602 && basePtr.getOperand(1).getOpcode() == SPUISD::Lo)) {
603 // Plain aligned a-form address: rotate into preferred slot
604 // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
605 int64_t rotamt = -pso;
608 rotate = DAG.getConstant(rotamt, MVT::i16);
610 // Offset the rotate amount by the basePtr and the preferred slot
612 int64_t rotamt = -pso;
615 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
617 DAG.getConstant(rotamt, PtrVT));
620 // Unaligned load: must be more pessimistic about addressing modes:
621 if (basePtr.getOpcode() == ISD::ADD) {
622 MachineFunction &MF = DAG.getMachineFunction();
623 MachineRegisterInfo &RegInfo = MF.getRegInfo();
624 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
627 SDValue Op0 = basePtr.getOperand(0);
628 SDValue Op1 = basePtr.getOperand(1);
630 if (isa<ConstantSDNode>(Op1)) {
631 // Convert the (add <ptr>, <const>) to an indirect address contained
632 // in a register. Note that this is done because we need to avoid
633 // creating a 0(reg) d-form address due to the SPU's block loads.
634 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
635 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
636 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
638 // Convert the (add <arg1>, <arg2>) to an indirect address, which
639 // will likely be lowered as a reg(reg) x-form address.
640 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
643 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
645 DAG.getConstant(0, PtrVT));
648 // Offset the rotate amount by the basePtr and the preferred slot
650 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
652 DAG.getConstant(-pso, PtrVT));
655 // Do the load as a i128 to allow possible shifting
656 SDValue low = DAG.getLoad(MVT::i128, dl, the_chain, basePtr,
658 LN->isVolatile(), LN->isNonTemporal(), 16);
660 // When the size is not greater than alignment we get all data with just
662 if (alignment >= InVT.getSizeInBits()/8) {
664 the_chain = low.getValue(1);
666 // Rotate into the preferred slot:
667 result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::i128,
668 low.getValue(0), rotate);
670 // Convert the loaded v16i8 vector to the appropriate vector type
671 // specified by the operand:
672 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
673 InVT, (128 / InVT.getSizeInBits()));
674 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
675 DAG.getNode(ISD::BITCAST, dl, vecVT, result));
677 // When alignment is less than the size, we might need (known only at
678 // run-time) two loads
679 // TODO: if the memory address is composed only from constants, we have
680 // extra kowledge, and might avoid the second load
682 // storage position offset from lower 16 byte aligned memory chunk
683 SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
684 basePtr, DAG.getConstant( 0xf, MVT::i32 ) );
686 SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
687 DAG.getConstant( 16, MVT::i32),
689 // get a registerfull of ones. (this implementation is a workaround: LLVM
690 // cannot handle 128 bit signed int constants)
691 SDValue ones = DAG.getConstant(-1, MVT::v4i32 );
692 ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
694 SDValue high = DAG.getLoad(MVT::i128, dl, the_chain,
695 DAG.getNode(ISD::ADD, dl, PtrVT,
697 DAG.getConstant(16, PtrVT)),
699 LN->isVolatile(), LN->isNonTemporal(), 16);
701 the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
704 // Shift the (possible) high part right to compensate the misalignemnt.
705 // if there is no highpart (i.e. value is i64 and offset is 4), this
706 // will zero out the high value.
707 high = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, high,
708 DAG.getNode(ISD::SUB, dl, MVT::i32,
709 DAG.getConstant( 16, MVT::i32),
713 // Shift the low similarily
714 // TODO: add SPUISD::SHL_BYTES
715 low = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, low, offset );
717 // Merge the two parts
718 result = DAG.getNode(ISD::BITCAST, dl, vecVT,
719 DAG.getNode(ISD::OR, dl, MVT::i128, low, high));
721 if (!InVT.isVector()) {
722 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT, result );
726 // Handle extending loads by extending the scalar result:
727 if (ExtType == ISD::SEXTLOAD) {
728 result = DAG.getNode(ISD::SIGN_EXTEND, dl, OutVT, result);
729 } else if (ExtType == ISD::ZEXTLOAD) {
730 result = DAG.getNode(ISD::ZERO_EXTEND, dl, OutVT, result);
731 } else if (ExtType == ISD::EXTLOAD) {
732 unsigned NewOpc = ISD::ANY_EXTEND;
734 if (OutVT.isFloatingPoint())
735 NewOpc = ISD::FP_EXTEND;
737 result = DAG.getNode(NewOpc, dl, OutVT, result);
740 SDVTList retvts = DAG.getVTList(OutVT, MVT::Other);
741 SDValue retops[2] = {
746 result = DAG.getNode(SPUISD::LDRESULT, dl, retvts,
747 retops, sizeof(retops) / sizeof(retops[0]));
751 /// Custom lower stores for CellSPU
753 All CellSPU stores are aligned to 16-byte boundaries, so for elements
754 within a 16-byte block, we have to generate a shuffle to insert the
755 requested element into its place, then store the resulting block.
758 LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
759 StoreSDNode *SN = cast<StoreSDNode>(Op);
760 SDValue Value = SN->getValue();
761 EVT VT = Value.getValueType();
762 EVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
763 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
764 DebugLoc dl = Op.getDebugLoc();
765 unsigned alignment = SN->getAlignment();
767 EVT vecVT = StVT.isVector()? StVT: EVT::getVectorVT(*DAG.getContext(), StVT,
768 (128 / StVT.getSizeInBits()));
769 // Get pointerinfos to the memory chunk(s) that contain the data to load
770 uint64_t mpi_offset = SN->getPointerInfo().Offset;
771 mpi_offset -= mpi_offset%16;
772 MachinePointerInfo lowMemPtr(SN->getPointerInfo().V, mpi_offset);
773 MachinePointerInfo highMemPtr(SN->getPointerInfo().V, mpi_offset+16);
777 assert( SN->getAddressingMode() == ISD::UNINDEXED
778 && "we should get only UNINDEXED adresses");
779 // clean aligned loads can be selected as-is
780 if (StVT.getSizeInBits() == 128 && (alignment%16) == 0)
783 SDValue alignLoadVec;
784 SDValue basePtr = SN->getBasePtr();
785 SDValue the_chain = SN->getChain();
786 SDValue insertEltOffs;
788 if ((alignment%16) == 0) {
790 // Special cases for a known aligned load to simplify the base pointer
791 // and insertion byte:
792 if (basePtr.getOpcode() == ISD::ADD
793 && (CN = dyn_cast<ConstantSDNode>(basePtr.getOperand(1))) != 0) {
794 // Known offset into basePtr
795 int64_t offset = CN->getSExtValue();
797 // Simplify the base pointer for this case:
798 basePtr = basePtr.getOperand(0);
799 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
801 DAG.getConstant((offset & 0xf), PtrVT));
803 if ((offset & ~0xf) > 0) {
804 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
806 DAG.getConstant((offset & ~0xf), PtrVT));
809 // Otherwise, assume it's at byte 0 of basePtr
810 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
812 DAG.getConstant(0, PtrVT));
813 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
815 DAG.getConstant(0, PtrVT));
818 // Unaligned load: must be more pessimistic about addressing modes:
819 if (basePtr.getOpcode() == ISD::ADD) {
820 MachineFunction &MF = DAG.getMachineFunction();
821 MachineRegisterInfo &RegInfo = MF.getRegInfo();
822 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
825 SDValue Op0 = basePtr.getOperand(0);
826 SDValue Op1 = basePtr.getOperand(1);
828 if (isa<ConstantSDNode>(Op1)) {
829 // Convert the (add <ptr>, <const>) to an indirect address contained
830 // in a register. Note that this is done because we need to avoid
831 // creating a 0(reg) d-form address due to the SPU's block loads.
832 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
833 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
834 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
836 // Convert the (add <arg1>, <arg2>) to an indirect address, which
837 // will likely be lowered as a reg(reg) x-form address.
838 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
841 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
843 DAG.getConstant(0, PtrVT));
846 // Insertion point is solely determined by basePtr's contents
847 insertEltOffs = DAG.getNode(ISD::ADD, dl, PtrVT,
849 DAG.getConstant(0, PtrVT));
852 // Load the lower part of the memory to which to store.
853 SDValue low = DAG.getLoad(vecVT, dl, the_chain, basePtr,
854 lowMemPtr, SN->isVolatile(), SN->isNonTemporal(), 16);
856 // if we don't need to store over the 16 byte boundary, one store suffices
857 if (alignment >= StVT.getSizeInBits()/8) {
859 the_chain = low.getValue(1);
861 LoadSDNode *LN = cast<LoadSDNode>(low);
862 SDValue theValue = SN->getValue();
865 && (theValue.getOpcode() == ISD::AssertZext
866 || theValue.getOpcode() == ISD::AssertSext)) {
867 // Drill down and get the value for zero- and sign-extended
869 theValue = theValue.getOperand(0);
872 // If the base pointer is already a D-form address, then just create
873 // a new D-form address with a slot offset and the orignal base pointer.
874 // Otherwise generate a D-form address with the slot offset relative
875 // to the stack pointer, which is always aligned.
877 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
878 errs() << "CellSPU LowerSTORE: basePtr = ";
879 basePtr.getNode()->dump(&DAG);
884 SDValue insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT,
886 SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
889 result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
891 DAG.getNode(ISD::BITCAST, dl,
892 MVT::v4i32, insertEltOp));
894 result = DAG.getStore(the_chain, dl, result, basePtr,
896 LN->isVolatile(), LN->isNonTemporal(),
900 // do the store when it might cross the 16 byte memory access boundary.
902 // TODO issue a warning if SN->isVolatile()== true? This is likely not
903 // what the user wanted.
905 // address offset from nearest lower 16byte alinged address
906 SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
908 DAG.getConstant(0xf, MVT::i32));
910 SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
911 DAG.getConstant( 16, MVT::i32),
913 SDValue hi_shift = DAG.getNode(ISD::SUB, dl, MVT::i32,
914 DAG.getConstant( VT.getSizeInBits()/8,
917 // 16 - sizeof(Value)
918 SDValue surplus = DAG.getNode(ISD::SUB, dl, MVT::i32,
919 DAG.getConstant( 16, MVT::i32),
920 DAG.getConstant( VT.getSizeInBits()/8,
922 // get a registerfull of ones
923 SDValue ones = DAG.getConstant(-1, MVT::v4i32);
924 ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
926 // Create the 128 bit masks that have ones where the data to store is
928 SDValue lowmask, himask;
929 // if the value to store don't fill up the an entire 128 bits, zero
930 // out the last bits of the mask so that only the value we want to store
932 // this is e.g. in the case of store i32, align 2
934 Value = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, Value);
935 lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, ones, surplus);
936 lowmask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
938 Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
939 Value = DAG.getNode(ISD::AND, dl, MVT::i128, Value, lowmask);
944 Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
946 // this will zero, if there are no data that goes to the high quad
947 himask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
949 lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, lowmask,
952 // Load in the old data and zero out the parts that will be overwritten with
953 // the new data to store.
954 SDValue hi = DAG.getLoad(MVT::i128, dl, the_chain,
955 DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
956 DAG.getConstant( 16, PtrVT)),
958 SN->isVolatile(), SN->isNonTemporal(), 16);
959 the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
962 low = DAG.getNode(ISD::AND, dl, MVT::i128,
963 DAG.getNode( ISD::BITCAST, dl, MVT::i128, low),
964 DAG.getNode( ISD::XOR, dl, MVT::i128, lowmask, ones));
965 hi = DAG.getNode(ISD::AND, dl, MVT::i128,
966 DAG.getNode( ISD::BITCAST, dl, MVT::i128, hi),
967 DAG.getNode( ISD::XOR, dl, MVT::i128, himask, ones));
969 // Shift the Value to store into place. rlow contains the parts that go to
970 // the lower memory chunk, rhi has the parts that go to the upper one.
971 SDValue rlow = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, Value, offset);
972 rlow = DAG.getNode(ISD::AND, dl, MVT::i128, rlow, lowmask);
973 SDValue rhi = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, Value,
976 // Merge the old data and the new data and store the results
977 // Need to convert vectors here to integer as 'OR'ing floats assert
978 rlow = DAG.getNode(ISD::OR, dl, MVT::i128,
979 DAG.getNode(ISD::BITCAST, dl, MVT::i128, low),
980 DAG.getNode(ISD::BITCAST, dl, MVT::i128, rlow));
981 rhi = DAG.getNode(ISD::OR, dl, MVT::i128,
982 DAG.getNode(ISD::BITCAST, dl, MVT::i128, hi),
983 DAG.getNode(ISD::BITCAST, dl, MVT::i128, rhi));
985 low = DAG.getStore(the_chain, dl, rlow, basePtr,
987 SN->isVolatile(), SN->isNonTemporal(), 16);
988 hi = DAG.getStore(the_chain, dl, rhi,
989 DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
990 DAG.getConstant( 16, PtrVT)),
992 SN->isVolatile(), SN->isNonTemporal(), 16);
993 result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(0),
1000 //! Generate the address of a constant pool entry.
1002 LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1003 EVT PtrVT = Op.getValueType();
1004 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1005 const Constant *C = CP->getConstVal();
1006 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
1007 SDValue Zero = DAG.getConstant(0, PtrVT);
1008 const TargetMachine &TM = DAG.getTarget();
1009 // FIXME there is no actual debug info here
1010 DebugLoc dl = Op.getDebugLoc();
1012 if (TM.getRelocationModel() == Reloc::Static) {
1013 if (!ST->usingLargeMem()) {
1014 // Just return the SDValue with the constant pool address in it.
1015 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, CPI, Zero);
1017 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, CPI, Zero);
1018 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, CPI, Zero);
1019 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1023 llvm_unreachable("LowerConstantPool: Relocation model other than static"
1028 //! Alternate entry point for generating the address of a constant pool entry
1030 SPU::LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUTargetMachine &TM) {
1031 return ::LowerConstantPool(Op, DAG, TM.getSubtargetImpl());
1035 LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1036 EVT PtrVT = Op.getValueType();
1037 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1038 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
1039 SDValue Zero = DAG.getConstant(0, PtrVT);
1040 const TargetMachine &TM = DAG.getTarget();
1041 // FIXME there is no actual debug info here
1042 DebugLoc dl = Op.getDebugLoc();
1044 if (TM.getRelocationModel() == Reloc::Static) {
1045 if (!ST->usingLargeMem()) {
1046 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, JTI, Zero);
1048 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, JTI, Zero);
1049 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, JTI, Zero);
1050 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1054 llvm_unreachable("LowerJumpTable: Relocation model other than static"
1060 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1061 EVT PtrVT = Op.getValueType();
1062 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
1063 const GlobalValue *GV = GSDN->getGlobal();
1064 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
1065 PtrVT, GSDN->getOffset());
1066 const TargetMachine &TM = DAG.getTarget();
1067 SDValue Zero = DAG.getConstant(0, PtrVT);
1068 // FIXME there is no actual debug info here
1069 DebugLoc dl = Op.getDebugLoc();
1071 if (TM.getRelocationModel() == Reloc::Static) {
1072 if (!ST->usingLargeMem()) {
1073 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, GA, Zero);
1075 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, GA, Zero);
1076 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, GA, Zero);
1077 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1080 report_fatal_error("LowerGlobalAddress: Relocation model other than static"
1088 //! Custom lower double precision floating point constants
1090 LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
1091 EVT VT = Op.getValueType();
1092 // FIXME there is no actual debug info here
1093 DebugLoc dl = Op.getDebugLoc();
1095 if (VT == MVT::f64) {
1096 ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.getNode());
1099 "LowerConstantFP: Node is not ConstantFPSDNode");
1101 uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
1102 SDValue T = DAG.getConstant(dbits, MVT::i64);
1103 SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
1104 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
1105 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Tvec));
1112 SPUTargetLowering::LowerFormalArguments(SDValue Chain,
1113 CallingConv::ID CallConv, bool isVarArg,
1114 const SmallVectorImpl<ISD::InputArg>
1116 DebugLoc dl, SelectionDAG &DAG,
1117 SmallVectorImpl<SDValue> &InVals)
1120 MachineFunction &MF = DAG.getMachineFunction();
1121 MachineFrameInfo *MFI = MF.getFrameInfo();
1122 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1123 SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
1125 unsigned ArgOffset = SPUFrameLowering::minStackSize();
1126 unsigned ArgRegIdx = 0;
1127 unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
1129 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1131 SmallVector<CCValAssign, 16> ArgLocs;
1132 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
1134 // FIXME: allow for other calling conventions
1135 CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU);
1137 // Add DAG nodes to load the arguments or copy them out of registers.
1138 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
1139 EVT ObjectVT = Ins[ArgNo].VT;
1140 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1142 CCValAssign &VA = ArgLocs[ArgNo];
1144 if (VA.isRegLoc()) {
1145 const TargetRegisterClass *ArgRegClass;
1147 switch (ObjectVT.getSimpleVT().SimpleTy) {
1149 report_fatal_error("LowerFormalArguments Unhandled argument type: " +
1150 Twine(ObjectVT.getEVTString()));
1152 ArgRegClass = &SPU::R8CRegClass;
1155 ArgRegClass = &SPU::R16CRegClass;
1158 ArgRegClass = &SPU::R32CRegClass;
1161 ArgRegClass = &SPU::R64CRegClass;
1164 ArgRegClass = &SPU::GPRCRegClass;
1167 ArgRegClass = &SPU::R32FPRegClass;
1170 ArgRegClass = &SPU::R64FPRegClass;
1178 ArgRegClass = &SPU::VECREGRegClass;
1182 unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
1183 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1184 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
1187 // We need to load the argument to a virtual register if we determined
1188 // above that we ran out of physical registers of the appropriate type
1189 // or we're forced to do vararg
1190 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true);
1191 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1192 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
1194 ArgOffset += StackSlotSize;
1197 InVals.push_back(ArgVal);
1199 Chain = ArgVal.getOperand(0);
1204 // FIXME: we should be able to query the argument registers from
1205 // tablegen generated code.
1206 static const unsigned ArgRegs[] = {
1207 SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
1208 SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
1209 SPU::R17, SPU::R18, SPU::R19, SPU::R20, SPU::R21, SPU::R22, SPU::R23,
1210 SPU::R24, SPU::R25, SPU::R26, SPU::R27, SPU::R28, SPU::R29, SPU::R30,
1211 SPU::R31, SPU::R32, SPU::R33, SPU::R34, SPU::R35, SPU::R36, SPU::R37,
1212 SPU::R38, SPU::R39, SPU::R40, SPU::R41, SPU::R42, SPU::R43, SPU::R44,
1213 SPU::R45, SPU::R46, SPU::R47, SPU::R48, SPU::R49, SPU::R50, SPU::R51,
1214 SPU::R52, SPU::R53, SPU::R54, SPU::R55, SPU::R56, SPU::R57, SPU::R58,
1215 SPU::R59, SPU::R60, SPU::R61, SPU::R62, SPU::R63, SPU::R64, SPU::R65,
1216 SPU::R66, SPU::R67, SPU::R68, SPU::R69, SPU::R70, SPU::R71, SPU::R72,
1217 SPU::R73, SPU::R74, SPU::R75, SPU::R76, SPU::R77, SPU::R78, SPU::R79
1219 // size of ArgRegs array
1220 unsigned NumArgRegs = 77;
1222 // We will spill (79-3)+1 registers to the stack
1223 SmallVector<SDValue, 79-3+1> MemOps;
1225 // Create the frame slot
1226 for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
1227 FuncInfo->setVarArgsFrameIndex(
1228 MFI->CreateFixedObject(StackSlotSize, ArgOffset, true));
1229 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1230 unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::R32CRegClass);
1231 SDValue ArgVal = DAG.getRegister(VReg, MVT::v16i8);
1232 SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, MachinePointerInfo(),
1234 Chain = Store.getOperand(0);
1235 MemOps.push_back(Store);
1237 // Increment address by stack slot size for the next stored argument
1238 ArgOffset += StackSlotSize;
1240 if (!MemOps.empty())
1241 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1242 &MemOps[0], MemOps.size());
1248 /// isLSAAddress - Return the immediate to use if the specified
1249 /// value is representable as a LSA address.
1250 static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
1251 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1254 int Addr = C->getZExtValue();
1255 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1256 (Addr << 14 >> 14) != Addr)
1257 return 0; // Top 14 bits have to be sext of immediate.
1259 return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode();
1263 SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1264 CallingConv::ID CallConv, bool isVarArg,
1266 const SmallVectorImpl<ISD::OutputArg> &Outs,
1267 const SmallVectorImpl<SDValue> &OutVals,
1268 const SmallVectorImpl<ISD::InputArg> &Ins,
1269 DebugLoc dl, SelectionDAG &DAG,
1270 SmallVectorImpl<SDValue> &InVals) const {
1271 // CellSPU target does not yet support tail call optimization.
1274 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
1275 unsigned NumOps = Outs.size();
1276 unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
1278 SmallVector<CCValAssign, 16> ArgLocs;
1279 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
1281 // FIXME: allow for other calling conventions
1282 CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
1284 const unsigned NumArgRegs = ArgLocs.size();
1287 // Handy pointer type
1288 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1290 // Set up a copy of the stack pointer for use loading and storing any
1291 // arguments that may not fit in the registers available for argument
1293 SDValue StackPtr = DAG.getRegister(SPU::R1, MVT::i32);
1295 // Figure out which arguments are going to go in registers, and which in
1297 unsigned ArgOffset = SPUFrameLowering::minStackSize(); // Just below [LR]
1298 unsigned ArgRegIdx = 0;
1300 // Keep track of registers passing arguments
1301 std::vector<std::pair<unsigned, SDValue> > RegsToPass;
1302 // And the arguments passed on the stack
1303 SmallVector<SDValue, 8> MemOpChains;
1305 for (; ArgRegIdx != NumOps; ++ArgRegIdx) {
1306 SDValue Arg = OutVals[ArgRegIdx];
1307 CCValAssign &VA = ArgLocs[ArgRegIdx];
1309 // PtrOff will be used to store the current argument to the stack if a
1310 // register cannot be found for it.
1311 SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1312 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
1314 switch (Arg.getValueType().getSimpleVT().SimpleTy) {
1315 default: llvm_unreachable("Unexpected ValueType for argument!");
1329 if (ArgRegIdx != NumArgRegs) {
1330 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1332 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
1333 MachinePointerInfo(),
1335 ArgOffset += StackSlotSize;
1341 // Accumulate how many bytes are to be pushed on the stack, including the
1342 // linkage area, and parameter passing area. According to the SPU ABI,
1343 // we minimally need space for [LR] and [SP].
1344 unsigned NumStackBytes = ArgOffset - SPUFrameLowering::minStackSize();
1346 // Insert a call sequence start
1347 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumStackBytes,
1350 if (!MemOpChains.empty()) {
1351 // Adjust the stack pointer for the stack arguments.
1352 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1353 &MemOpChains[0], MemOpChains.size());
1356 // Build a sequence of copy-to-reg nodes chained together with token chain
1357 // and flag operands which copy the outgoing args into the appropriate regs.
1359 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1360 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1361 RegsToPass[i].second, InFlag);
1362 InFlag = Chain.getValue(1);
1365 SmallVector<SDValue, 8> Ops;
1366 unsigned CallOpc = SPUISD::CALL;
1368 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1369 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1370 // node so that legalize doesn't hack it.
1371 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1372 const GlobalValue *GV = G->getGlobal();
1373 EVT CalleeVT = Callee.getValueType();
1374 SDValue Zero = DAG.getConstant(0, PtrVT);
1375 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, CalleeVT);
1377 if (!ST->usingLargeMem()) {
1378 // Turn calls to targets that are defined (i.e., have bodies) into BRSL
1379 // style calls, otherwise, external symbols are BRASL calls. This assumes
1380 // that declared/defined symbols are in the same compilation unit and can
1381 // be reached through PC-relative jumps.
1384 // This may be an unsafe assumption for JIT and really large compilation
1386 if (GV->isDeclaration()) {
1387 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, GA, Zero);
1389 Callee = DAG.getNode(SPUISD::PCRelAddr, dl, CalleeVT, GA, Zero);
1392 // "Large memory" mode: Turn all calls into indirect calls with a X-form
1394 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, GA, Zero);
1396 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1397 EVT CalleeVT = Callee.getValueType();
1398 SDValue Zero = DAG.getConstant(0, PtrVT);
1399 SDValue ExtSym = DAG.getTargetExternalSymbol(S->getSymbol(),
1400 Callee.getValueType());
1402 if (!ST->usingLargeMem()) {
1403 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, ExtSym, Zero);
1405 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, ExtSym, Zero);
1407 } else if (SDNode *Dest = isLSAAddress(Callee, DAG)) {
1408 // If this is an absolute destination address that appears to be a legal
1409 // local store address, use the munged value.
1410 Callee = SDValue(Dest, 0);
1413 Ops.push_back(Chain);
1414 Ops.push_back(Callee);
1416 // Add argument registers to the end of the list so that they are known live
1418 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1419 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1420 RegsToPass[i].second.getValueType()));
1422 if (InFlag.getNode())
1423 Ops.push_back(InFlag);
1424 // Returns a chain and a flag for retval copy to use.
1425 Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Glue),
1426 &Ops[0], Ops.size());
1427 InFlag = Chain.getValue(1);
1429 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumStackBytes, true),
1430 DAG.getIntPtrConstant(0, true), InFlag);
1432 InFlag = Chain.getValue(1);
1434 // If the function returns void, just return the chain.
1438 // Now handle the return value(s)
1439 SmallVector<CCValAssign, 16> RVLocs;
1440 CCState CCRetInfo(CallConv, isVarArg, getTargetMachine(),
1441 RVLocs, *DAG.getContext());
1442 CCRetInfo.AnalyzeCallResult(Ins, CCC_SPU);
1445 // If the call has results, copy the values out of the ret val registers.
1446 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1447 CCValAssign VA = RVLocs[i];
1449 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1451 Chain = Val.getValue(1);
1452 InFlag = Val.getValue(2);
1453 InVals.push_back(Val);
1460 SPUTargetLowering::LowerReturn(SDValue Chain,
1461 CallingConv::ID CallConv, bool isVarArg,
1462 const SmallVectorImpl<ISD::OutputArg> &Outs,
1463 const SmallVectorImpl<SDValue> &OutVals,
1464 DebugLoc dl, SelectionDAG &DAG) const {
1466 SmallVector<CCValAssign, 16> RVLocs;
1467 CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
1468 RVLocs, *DAG.getContext());
1469 CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
1471 // If this is the first return lowered for this function, add the regs to the
1472 // liveout set for the function.
1473 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1474 for (unsigned i = 0; i != RVLocs.size(); ++i)
1475 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1480 // Copy the result values into the output registers.
1481 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1482 CCValAssign &VA = RVLocs[i];
1483 assert(VA.isRegLoc() && "Can only return in registers!");
1484 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1486 Flag = Chain.getValue(1);
1490 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1492 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain);
1496 //===----------------------------------------------------------------------===//
1497 // Vector related lowering:
1498 //===----------------------------------------------------------------------===//
1500 static ConstantSDNode *
1501 getVecImm(SDNode *N) {
1502 SDValue OpVal(0, 0);
1504 // Check to see if this buildvec has a single non-undef value in its elements.
1505 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1506 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
1507 if (OpVal.getNode() == 0)
1508 OpVal = N->getOperand(i);
1509 else if (OpVal != N->getOperand(i))
1513 if (OpVal.getNode() != 0) {
1514 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1522 /// get_vec_i18imm - Test if this vector is a vector filled with the same value
1523 /// and the value fits into an unsigned 18-bit constant, and if so, return the
1525 SDValue SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
1527 if (ConstantSDNode *CN = getVecImm(N)) {
1528 uint64_t Value = CN->getZExtValue();
1529 if (ValueType == MVT::i64) {
1530 uint64_t UValue = CN->getZExtValue();
1531 uint32_t upper = uint32_t(UValue >> 32);
1532 uint32_t lower = uint32_t(UValue);
1535 Value = Value >> 32;
1537 if (Value <= 0x3ffff)
1538 return DAG.getTargetConstant(Value, ValueType);
1544 /// get_vec_i16imm - Test if this vector is a vector filled with the same value
1545 /// and the value fits into a signed 16-bit constant, and if so, return the
1547 SDValue SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
1549 if (ConstantSDNode *CN = getVecImm(N)) {
1550 int64_t Value = CN->getSExtValue();
1551 if (ValueType == MVT::i64) {
1552 uint64_t UValue = CN->getZExtValue();
1553 uint32_t upper = uint32_t(UValue >> 32);
1554 uint32_t lower = uint32_t(UValue);
1557 Value = Value >> 32;
1559 if (Value >= -(1 << 15) && Value <= ((1 << 15) - 1)) {
1560 return DAG.getTargetConstant(Value, ValueType);
1567 /// get_vec_i10imm - Test if this vector is a vector filled with the same value
1568 /// and the value fits into a signed 10-bit constant, and if so, return the
1570 SDValue SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
1572 if (ConstantSDNode *CN = getVecImm(N)) {
1573 int64_t Value = CN->getSExtValue();
1574 if (ValueType == MVT::i64) {
1575 uint64_t UValue = CN->getZExtValue();
1576 uint32_t upper = uint32_t(UValue >> 32);
1577 uint32_t lower = uint32_t(UValue);
1580 Value = Value >> 32;
1582 if (isInt<10>(Value))
1583 return DAG.getTargetConstant(Value, ValueType);
1589 /// get_vec_i8imm - Test if this vector is a vector filled with the same value
1590 /// and the value fits into a signed 8-bit constant, and if so, return the
1593 /// @note: The incoming vector is v16i8 because that's the only way we can load
1594 /// constant vectors. Thus, we test to see if the upper and lower bytes are the
1596 SDValue SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
1598 if (ConstantSDNode *CN = getVecImm(N)) {
1599 int Value = (int) CN->getZExtValue();
1600 if (ValueType == MVT::i16
1601 && Value <= 0xffff /* truncated from uint64_t */
1602 && ((short) Value >> 8) == ((short) Value & 0xff))
1603 return DAG.getTargetConstant(Value & 0xff, ValueType);
1604 else if (ValueType == MVT::i8
1605 && (Value & 0xff) == Value)
1606 return DAG.getTargetConstant(Value, ValueType);
1612 /// get_ILHUvec_imm - Test if this vector is a vector filled with the same value
1613 /// and the value fits into a signed 16-bit constant, and if so, return the
1615 SDValue SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
1617 if (ConstantSDNode *CN = getVecImm(N)) {
1618 uint64_t Value = CN->getZExtValue();
1619 if ((ValueType == MVT::i32
1620 && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
1621 || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
1622 return DAG.getTargetConstant(Value >> 16, ValueType);
1628 /// get_v4i32_imm - Catch-all for general 32-bit constant vectors
1629 SDValue SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) {
1630 if (ConstantSDNode *CN = getVecImm(N)) {
1631 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i32);
1637 /// get_v4i32_imm - Catch-all for general 64-bit constant vectors
1638 SDValue SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) {
1639 if (ConstantSDNode *CN = getVecImm(N)) {
1640 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i64);
1646 //! Lower a BUILD_VECTOR instruction creatively:
1648 LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
1649 EVT VT = Op.getValueType();
1650 EVT EltVT = VT.getVectorElementType();
1651 DebugLoc dl = Op.getDebugLoc();
1652 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(Op.getNode());
1653 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerBUILD_VECTOR");
1654 unsigned minSplatBits = EltVT.getSizeInBits();
1656 if (minSplatBits < 16)
1659 APInt APSplatBits, APSplatUndef;
1660 unsigned SplatBitSize;
1663 if (!BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
1664 HasAnyUndefs, minSplatBits)
1665 || minSplatBits < SplatBitSize)
1666 return SDValue(); // Wasn't a constant vector or splat exceeded min
1668 uint64_t SplatBits = APSplatBits.getZExtValue();
1670 switch (VT.getSimpleVT().SimpleTy) {
1672 report_fatal_error("CellSPU: Unhandled VT in LowerBUILD_VECTOR, VT = " +
1673 Twine(VT.getEVTString()));
1676 uint32_t Value32 = uint32_t(SplatBits);
1677 assert(SplatBitSize == 32
1678 && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
1679 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1680 SDValue T = DAG.getConstant(Value32, MVT::i32);
1681 return DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,
1682 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
1686 uint64_t f64val = uint64_t(SplatBits);
1687 assert(SplatBitSize == 64
1688 && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
1689 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1690 SDValue T = DAG.getConstant(f64val, MVT::i64);
1691 return DAG.getNode(ISD::BITCAST, dl, MVT::v2f64,
1692 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
1696 // 8-bit constants have to be expanded to 16-bits
1697 unsigned short Value16 = SplatBits /* | (SplatBits << 8) */;
1698 SmallVector<SDValue, 8> Ops;
1700 Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
1701 return DAG.getNode(ISD::BITCAST, dl, VT,
1702 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
1705 unsigned short Value16 = SplatBits;
1706 SDValue T = DAG.getConstant(Value16, EltVT);
1707 SmallVector<SDValue, 8> Ops;
1710 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
1713 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1714 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T);
1717 return SPU::LowerV2I64Splat(VT, DAG, SplatBits, dl);
1727 SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
1729 uint32_t upper = uint32_t(SplatVal >> 32);
1730 uint32_t lower = uint32_t(SplatVal);
1732 if (upper == lower) {
1733 // Magic constant that can be matched by IL, ILA, et. al.
1734 SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
1735 return DAG.getNode(ISD::BITCAST, dl, OpVT,
1736 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1737 Val, Val, Val, Val));
1739 bool upper_special, lower_special;
1741 // NOTE: This code creates common-case shuffle masks that can be easily
1742 // detected as common expressions. It is not attempting to create highly
1743 // specialized masks to replace any and all 0's, 0xff's and 0x80's.
1745 // Detect if the upper or lower half is a special shuffle mask pattern:
1746 upper_special = (upper == 0 || upper == 0xffffffff || upper == 0x80000000);
1747 lower_special = (lower == 0 || lower == 0xffffffff || lower == 0x80000000);
1749 // Both upper and lower are special, lower to a constant pool load:
1750 if (lower_special && upper_special) {
1751 SDValue SplatValCN = DAG.getConstant(SplatVal, MVT::i64);
1752 return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64,
1753 SplatValCN, SplatValCN);
1758 SmallVector<SDValue, 16> ShufBytes;
1761 // Create lower vector if not a special pattern
1762 if (!lower_special) {
1763 SDValue LO32C = DAG.getConstant(lower, MVT::i32);
1764 LO32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
1765 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1766 LO32C, LO32C, LO32C, LO32C));
1769 // Create upper vector if not a special pattern
1770 if (!upper_special) {
1771 SDValue HI32C = DAG.getConstant(upper, MVT::i32);
1772 HI32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
1773 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1774 HI32C, HI32C, HI32C, HI32C));
1777 // If either upper or lower are special, then the two input operands are
1778 // the same (basically, one of them is a "don't care")
1784 for (int i = 0; i < 4; ++i) {
1786 for (int j = 0; j < 4; ++j) {
1788 bool process_upper, process_lower;
1790 process_upper = (upper_special && (i & 1) == 0);
1791 process_lower = (lower_special && (i & 1) == 1);
1793 if (process_upper || process_lower) {
1794 if ((process_upper && upper == 0)
1795 || (process_lower && lower == 0))
1797 else if ((process_upper && upper == 0xffffffff)
1798 || (process_lower && lower == 0xffffffff))
1800 else if ((process_upper && upper == 0x80000000)
1801 || (process_lower && lower == 0x80000000))
1802 val |= (j == 0 ? 0xe0 : 0x80);
1804 val |= i * 4 + j + ((i & 1) * 16);
1807 ShufBytes.push_back(DAG.getConstant(val, MVT::i32));
1810 return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32,
1811 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1812 &ShufBytes[0], ShufBytes.size()));
1816 /// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3) to something on
1817 /// which the Cell can operate. The code inspects V3 to ascertain whether the
1818 /// permutation vector, V3, is monotonically increasing with one "exception"
1819 /// element, e.g., (0, 1, _, 3). If this is the case, then generate a
1820 /// SHUFFLE_MASK synthetic instruction. Otherwise, spill V3 to the constant pool.
1821 /// In either case, the net result is going to eventually invoke SHUFB to
1822 /// permute/shuffle the bytes from V1 and V2.
1824 /// SHUFFLE_MASK is eventually selected as one of the C*D instructions, generate
1825 /// control word for byte/halfword/word insertion. This takes care of a single
1826 /// element move from V2 into V1.
1828 /// SPUISD::SHUFB is eventually selected as Cell's <i>shufb</i> instructions.
1829 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
1830 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
1831 SDValue V1 = Op.getOperand(0);
1832 SDValue V2 = Op.getOperand(1);
1833 DebugLoc dl = Op.getDebugLoc();
1835 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1837 // If we have a single element being moved from V1 to V2, this can be handled
1838 // using the C*[DX] compute mask instructions, but the vector elements have
1839 // to be monotonically increasing with one exception element, and the source
1840 // slot of the element to move must be the same as the destination.
1841 EVT VecVT = V1.getValueType();
1842 EVT EltVT = VecVT.getVectorElementType();
1843 unsigned EltsFromV2 = 0;
1844 unsigned V2EltOffset = 0;
1845 unsigned V2EltIdx0 = 0;
1846 unsigned CurrElt = 0;
1847 unsigned MaxElts = VecVT.getVectorNumElements();
1848 unsigned PrevElt = 0;
1849 bool monotonic = true;
1852 EVT maskVT; // which of the c?d instructions to use
1854 if (EltVT == MVT::i8) {
1856 maskVT = MVT::v16i8;
1857 } else if (EltVT == MVT::i16) {
1859 maskVT = MVT::v8i16;
1860 } else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
1862 maskVT = MVT::v4i32;
1863 } else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
1865 maskVT = MVT::v2i64;
1867 llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
1869 for (unsigned i = 0; i != MaxElts; ++i) {
1870 if (SVN->getMaskElt(i) < 0)
1873 unsigned SrcElt = SVN->getMaskElt(i);
1876 if (SrcElt >= V2EltIdx0) {
1877 // TODO: optimize for the monotonic case when several consecutive
1878 // elements are taken form V2. Do we ever get such a case?
1879 if (EltsFromV2 == 0 && CurrElt == (SrcElt - V2EltIdx0))
1880 V2EltOffset = (SrcElt - V2EltIdx0) * (EltVT.getSizeInBits()/8);
1884 } else if (CurrElt != SrcElt) {
1892 if (PrevElt > 0 && SrcElt < MaxElts) {
1893 if ((PrevElt == SrcElt - 1)
1894 || (PrevElt == MaxElts - 1 && SrcElt == 0)) {
1899 } else if (i == 0 || (PrevElt==0 && SrcElt==1)) {
1900 // First time or after a "wrap around"
1904 // This isn't a rotation, takes elements from vector 2
1910 if (EltsFromV2 == 1 && monotonic) {
1911 // Compute mask and shuffle
1912 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1914 // As SHUFFLE_MASK becomes a c?d instruction, feed it an address
1915 // R1 ($sp) is used here only as it is guaranteed to have last bits zero
1916 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
1917 DAG.getRegister(SPU::R1, PtrVT),
1918 DAG.getConstant(V2EltOffset, MVT::i32));
1919 SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
1922 // Use shuffle mask in SHUFB synthetic instruction:
1923 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
1925 } else if (rotate) {
1928 rotamt *= EltVT.getSizeInBits()/8;
1929 return DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, V1.getValueType(),
1930 V1, DAG.getConstant(rotamt, MVT::i16));
1932 // Convert the SHUFFLE_VECTOR mask's input element units to the
1934 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
1936 SmallVector<SDValue, 16> ResultMask;
1937 for (unsigned i = 0, e = MaxElts; i != e; ++i) {
1938 unsigned SrcElt = SVN->getMaskElt(i) < 0 ? 0 : SVN->getMaskElt(i);
1940 for (unsigned j = 0; j < BytesPerElement; ++j)
1941 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,MVT::i8));
1943 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
1944 &ResultMask[0], ResultMask.size());
1945 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask);
1949 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
1950 SDValue Op0 = Op.getOperand(0); // Op0 = the scalar
1951 DebugLoc dl = Op.getDebugLoc();
1953 if (Op0.getNode()->getOpcode() == ISD::Constant) {
1954 // For a constant, build the appropriate constant vector, which will
1955 // eventually simplify to a vector register load.
1957 ConstantSDNode *CN = cast<ConstantSDNode>(Op0.getNode());
1958 SmallVector<SDValue, 16> ConstVecValues;
1962 // Create a constant vector:
1963 switch (Op.getValueType().getSimpleVT().SimpleTy) {
1964 default: llvm_unreachable("Unexpected constant value type in "
1965 "LowerSCALAR_TO_VECTOR");
1966 case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
1967 case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
1968 case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
1969 case MVT::v4f32: n_copies = 4; VT = MVT::f32; break;
1970 case MVT::v2i64: n_copies = 2; VT = MVT::i64; break;
1971 case MVT::v2f64: n_copies = 2; VT = MVT::f64; break;
1974 SDValue CValue = DAG.getConstant(CN->getZExtValue(), VT);
1975 for (size_t j = 0; j < n_copies; ++j)
1976 ConstVecValues.push_back(CValue);
1978 return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(),
1979 &ConstVecValues[0], ConstVecValues.size());
1981 // Otherwise, copy the value from one register to another:
1982 switch (Op0.getValueType().getSimpleVT().SimpleTy) {
1983 default: llvm_unreachable("Unexpected value type in LowerSCALAR_TO_VECTOR");
1990 return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0);
1997 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
1998 EVT VT = Op.getValueType();
1999 SDValue N = Op.getOperand(0);
2000 SDValue Elt = Op.getOperand(1);
2001 DebugLoc dl = Op.getDebugLoc();
2004 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
2005 // Constant argument:
2006 int EltNo = (int) C->getZExtValue();
2009 if (VT == MVT::i8 && EltNo >= 16)
2010 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i8 extraction slot > 15");
2011 else if (VT == MVT::i16 && EltNo >= 8)
2012 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i16 extraction slot > 7");
2013 else if (VT == MVT::i32 && EltNo >= 4)
2014 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i32 extraction slot > 4");
2015 else if (VT == MVT::i64 && EltNo >= 2)
2016 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i64 extraction slot > 2");
2018 if (EltNo == 0 && (VT == MVT::i32 || VT == MVT::i64)) {
2019 // i32 and i64: Element 0 is the preferred slot
2020 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, N);
2023 // Need to generate shuffle mask and extract:
2024 int prefslot_begin = -1, prefslot_end = -1;
2025 int elt_byte = EltNo * VT.getSizeInBits() / 8;
2027 switch (VT.getSimpleVT().SimpleTy) {
2029 assert(false && "Invalid value type!");
2031 prefslot_begin = prefslot_end = 3;
2035 prefslot_begin = 2; prefslot_end = 3;
2040 prefslot_begin = 0; prefslot_end = 3;
2045 prefslot_begin = 0; prefslot_end = 7;
2050 assert(prefslot_begin != -1 && prefslot_end != -1 &&
2051 "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
2053 unsigned int ShufBytes[16] = {
2054 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2056 for (int i = 0; i < 16; ++i) {
2057 // zero fill uppper part of preferred slot, don't care about the
2059 unsigned int mask_val;
2060 if (i <= prefslot_end) {
2062 ((i < prefslot_begin)
2064 : elt_byte + (i - prefslot_begin));
2066 ShufBytes[i] = mask_val;
2068 ShufBytes[i] = ShufBytes[i % (prefslot_end + 1)];
2071 SDValue ShufMask[4];
2072 for (unsigned i = 0; i < sizeof(ShufMask)/sizeof(ShufMask[0]); ++i) {
2073 unsigned bidx = i * 4;
2074 unsigned int bits = ((ShufBytes[bidx] << 24) |
2075 (ShufBytes[bidx+1] << 16) |
2076 (ShufBytes[bidx+2] << 8) |
2078 ShufMask[i] = DAG.getConstant(bits, MVT::i32);
2081 SDValue ShufMaskVec =
2082 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2083 &ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0]));
2085 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2086 DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(),
2087 N, N, ShufMaskVec));
2089 // Variable index: Rotate the requested element into slot 0, then replicate
2090 // slot 0 across the vector
2091 EVT VecVT = N.getValueType();
2092 if (!VecVT.isSimple() || !VecVT.isVector()) {
2093 report_fatal_error("LowerEXTRACT_VECTOR_ELT: Must have a simple, 128-bit"
2097 // Make life easier by making sure the index is zero-extended to i32
2098 if (Elt.getValueType() != MVT::i32)
2099 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Elt);
2101 // Scale the index to a bit/byte shift quantity
2103 APInt(32, uint64_t(16 / N.getValueType().getVectorNumElements()), false);
2104 unsigned scaleShift = scaleFactor.logBase2();
2107 if (scaleShift > 0) {
2108 // Scale the shift factor:
2109 Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
2110 DAG.getConstant(scaleShift, MVT::i32));
2113 vecShift = DAG.getNode(SPUISD::SHL_BYTES, dl, VecVT, N, Elt);
2115 // Replicate the bytes starting at byte 0 across the entire vector (for
2116 // consistency with the notion of a unified register set)
2119 switch (VT.getSimpleVT().SimpleTy) {
2121 report_fatal_error("LowerEXTRACT_VECTOR_ELT(varable): Unhandled vector"
2125 SDValue factor = DAG.getConstant(0x00000000, MVT::i32);
2126 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2127 factor, factor, factor, factor);
2131 SDValue factor = DAG.getConstant(0x00010001, MVT::i32);
2132 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2133 factor, factor, factor, factor);
2138 SDValue factor = DAG.getConstant(0x00010203, MVT::i32);
2139 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2140 factor, factor, factor, factor);
2145 SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32);
2146 SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32);
2147 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2148 loFactor, hiFactor, loFactor, hiFactor);
2153 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2154 DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2155 vecShift, vecShift, replicate));
2161 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2162 SDValue VecOp = Op.getOperand(0);
2163 SDValue ValOp = Op.getOperand(1);
2164 SDValue IdxOp = Op.getOperand(2);
2165 DebugLoc dl = Op.getDebugLoc();
2166 EVT VT = Op.getValueType();
2167 EVT eltVT = ValOp.getValueType();
2169 // use 0 when the lane to insert to is 'undef'
2171 if (IdxOp.getOpcode() != ISD::UNDEF) {
2172 ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
2173 assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
2174 Offset = (CN->getSExtValue()) * eltVT.getSizeInBits()/8;
2177 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2178 // Use $sp ($1) because it's always 16-byte aligned and it's available:
2179 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
2180 DAG.getRegister(SPU::R1, PtrVT),
2181 DAG.getConstant(Offset, PtrVT));
2182 // widen the mask when dealing with half vectors
2183 EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
2184 128/ VT.getVectorElementType().getSizeInBits());
2185 SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, maskVT, Pointer);
2188 DAG.getNode(SPUISD::SHUFB, dl, VT,
2189 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
2191 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ShufMask));
2196 static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
2197 const TargetLowering &TLI)
2199 SDValue N0 = Op.getOperand(0); // Everything has at least one operand
2200 DebugLoc dl = Op.getDebugLoc();
2201 EVT ShiftVT = TLI.getShiftAmountTy();
2203 assert(Op.getValueType() == MVT::i8);
2206 llvm_unreachable("Unhandled i8 math operator");
2210 // 8-bit addition: Promote the arguments up to 16-bits and truncate
2212 SDValue N1 = Op.getOperand(1);
2213 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2214 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2215 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2216 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2221 // 8-bit subtraction: Promote the arguments up to 16-bits and truncate
2223 SDValue N1 = Op.getOperand(1);
2224 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2225 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2226 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2227 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2231 SDValue N1 = Op.getOperand(1);
2232 EVT N1VT = N1.getValueType();
2234 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2235 if (!N1VT.bitsEq(ShiftVT)) {
2236 unsigned N1Opc = N1.getValueType().bitsLT(ShiftVT)
2239 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2242 // Replicate lower 8-bits into upper 8:
2244 DAG.getNode(ISD::OR, dl, MVT::i16, N0,
2245 DAG.getNode(ISD::SHL, dl, MVT::i16,
2246 N0, DAG.getConstant(8, MVT::i32)));
2248 // Truncate back down to i8
2249 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2250 DAG.getNode(Opc, dl, MVT::i16, ExpandArg, N1));
2254 SDValue N1 = Op.getOperand(1);
2255 EVT N1VT = N1.getValueType();
2257 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2258 if (!N1VT.bitsEq(ShiftVT)) {
2259 unsigned N1Opc = ISD::ZERO_EXTEND;
2261 if (N1.getValueType().bitsGT(ShiftVT))
2262 N1Opc = ISD::TRUNCATE;
2264 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2267 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2268 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2271 SDValue N1 = Op.getOperand(1);
2272 EVT N1VT = N1.getValueType();
2274 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2275 if (!N1VT.bitsEq(ShiftVT)) {
2276 unsigned N1Opc = ISD::SIGN_EXTEND;
2278 if (N1VT.bitsGT(ShiftVT))
2279 N1Opc = ISD::TRUNCATE;
2280 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2283 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2284 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2287 SDValue N1 = Op.getOperand(1);
2289 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2290 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2291 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2292 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2300 //! Lower byte immediate operations for v16i8 vectors:
2302 LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
2305 EVT VT = Op.getValueType();
2306 DebugLoc dl = Op.getDebugLoc();
2308 ConstVec = Op.getOperand(0);
2309 Arg = Op.getOperand(1);
2310 if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
2311 if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
2312 ConstVec = ConstVec.getOperand(0);
2314 ConstVec = Op.getOperand(1);
2315 Arg = Op.getOperand(0);
2316 if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
2317 ConstVec = ConstVec.getOperand(0);
2322 if (ConstVec.getNode()->getOpcode() == ISD::BUILD_VECTOR) {
2323 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(ConstVec.getNode());
2324 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerByteImmed");
2326 APInt APSplatBits, APSplatUndef;
2327 unsigned SplatBitSize;
2329 unsigned minSplatBits = VT.getVectorElementType().getSizeInBits();
2331 if (BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
2332 HasAnyUndefs, minSplatBits)
2333 && minSplatBits <= SplatBitSize) {
2334 uint64_t SplatBits = APSplatBits.getZExtValue();
2335 SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
2337 SmallVector<SDValue, 16> tcVec;
2338 tcVec.assign(16, tc);
2339 return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg,
2340 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &tcVec[0], tcVec.size()));
2344 // These operations (AND, OR, XOR) are legal, they just couldn't be custom
2345 // lowered. Return the operation, rather than a null SDValue.
2349 //! Custom lowering for CTPOP (count population)
2351 Custom lowering code that counts the number ones in the input
2352 operand. SPU has such an instruction, but it counts the number of
2353 ones per byte, which then have to be accumulated.
2355 static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
2356 EVT VT = Op.getValueType();
2357 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
2358 VT, (128 / VT.getSizeInBits()));
2359 DebugLoc dl = Op.getDebugLoc();
2361 switch (VT.getSimpleVT().SimpleTy) {
2363 assert(false && "Invalid value type!");
2365 SDValue N = Op.getOperand(0);
2366 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2368 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2369 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2371 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, CNTB, Elt0);
2375 MachineFunction &MF = DAG.getMachineFunction();
2376 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2378 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R16CRegClass);
2380 SDValue N = Op.getOperand(0);
2381 SDValue Elt0 = DAG.getConstant(0, MVT::i16);
2382 SDValue Mask0 = DAG.getConstant(0x0f, MVT::i16);
2383 SDValue Shift1 = DAG.getConstant(8, MVT::i32);
2385 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2386 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2388 // CNTB_result becomes the chain to which all of the virtual registers
2389 // CNTB_reg, SUM1_reg become associated:
2390 SDValue CNTB_result =
2391 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, CNTB, Elt0);
2393 SDValue CNTB_rescopy =
2394 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2396 SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i16);
2398 return DAG.getNode(ISD::AND, dl, MVT::i16,
2399 DAG.getNode(ISD::ADD, dl, MVT::i16,
2400 DAG.getNode(ISD::SRL, dl, MVT::i16,
2407 MachineFunction &MF = DAG.getMachineFunction();
2408 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2410 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2411 unsigned SUM1_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2413 SDValue N = Op.getOperand(0);
2414 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2415 SDValue Mask0 = DAG.getConstant(0xff, MVT::i32);
2416 SDValue Shift1 = DAG.getConstant(16, MVT::i32);
2417 SDValue Shift2 = DAG.getConstant(8, MVT::i32);
2419 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2420 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2422 // CNTB_result becomes the chain to which all of the virtual registers
2423 // CNTB_reg, SUM1_reg become associated:
2424 SDValue CNTB_result =
2425 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, CNTB, Elt0);
2427 SDValue CNTB_rescopy =
2428 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2431 DAG.getNode(ISD::SRL, dl, MVT::i32,
2432 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32),
2436 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp1,
2437 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32));
2439 SDValue Sum1_rescopy =
2440 DAG.getCopyToReg(CNTB_result, dl, SUM1_reg, Sum1);
2443 DAG.getNode(ISD::SRL, dl, MVT::i32,
2444 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32),
2447 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp2,
2448 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32));
2450 return DAG.getNode(ISD::AND, dl, MVT::i32, Sum2, Mask0);
2460 //! Lower ISD::FP_TO_SINT, ISD::FP_TO_UINT for i32
2462 f32->i32 passes through unchanged, whereas f64->i32 expands to a libcall.
2463 All conversions to i64 are expanded to a libcall.
2465 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2466 const SPUTargetLowering &TLI) {
2467 EVT OpVT = Op.getValueType();
2468 SDValue Op0 = Op.getOperand(0);
2469 EVT Op0VT = Op0.getValueType();
2471 if ((OpVT == MVT::i32 && Op0VT == MVT::f64)
2472 || OpVT == MVT::i64) {
2473 // Convert f32 / f64 to i32 / i64 via libcall.
2475 (Op.getOpcode() == ISD::FP_TO_SINT)
2476 ? RTLIB::getFPTOSINT(Op0VT, OpVT)
2477 : RTLIB::getFPTOUINT(Op0VT, OpVT);
2478 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd fp-to-int conversion!");
2480 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2486 //! Lower ISD::SINT_TO_FP, ISD::UINT_TO_FP for i32
2488 i32->f32 passes through unchanged, whereas i32->f64 is expanded to a libcall.
2489 All conversions from i64 are expanded to a libcall.
2491 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2492 const SPUTargetLowering &TLI) {
2493 EVT OpVT = Op.getValueType();
2494 SDValue Op0 = Op.getOperand(0);
2495 EVT Op0VT = Op0.getValueType();
2497 if ((OpVT == MVT::f64 && Op0VT == MVT::i32)
2498 || Op0VT == MVT::i64) {
2499 // Convert i32, i64 to f64 via libcall:
2501 (Op.getOpcode() == ISD::SINT_TO_FP)
2502 ? RTLIB::getSINTTOFP(Op0VT, OpVT)
2503 : RTLIB::getUINTTOFP(Op0VT, OpVT);
2504 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd int-to-fp conversion!");
2506 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2512 //! Lower ISD::SETCC
2514 This handles MVT::f64 (double floating point) condition lowering
2516 static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
2517 const TargetLowering &TLI) {
2518 CondCodeSDNode *CC = dyn_cast<CondCodeSDNode>(Op.getOperand(2));
2519 DebugLoc dl = Op.getDebugLoc();
2520 assert(CC != 0 && "LowerSETCC: CondCodeSDNode should not be null here!\n");
2522 SDValue lhs = Op.getOperand(0);
2523 SDValue rhs = Op.getOperand(1);
2524 EVT lhsVT = lhs.getValueType();
2525 assert(lhsVT == MVT::f64 && "LowerSETCC: type other than MVT::64\n");
2527 EVT ccResultVT = TLI.getSetCCResultType(lhs.getValueType());
2528 APInt ccResultOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2529 EVT IntVT(MVT::i64);
2531 // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
2532 // selected to a NOP:
2533 SDValue i64lhs = DAG.getNode(ISD::BITCAST, dl, IntVT, lhs);
2535 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2536 DAG.getNode(ISD::SRL, dl, IntVT,
2537 i64lhs, DAG.getConstant(32, MVT::i32)));
2538 SDValue lhsHi32abs =
2539 DAG.getNode(ISD::AND, dl, MVT::i32,
2540 lhsHi32, DAG.getConstant(0x7fffffff, MVT::i32));
2542 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, i64lhs);
2544 // SETO and SETUO only use the lhs operand:
2545 if (CC->get() == ISD::SETO) {
2546 // Evaluates to true if Op0 is not [SQ]NaN - lowers to the inverse of
2548 APInt ccResultAllOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2549 return DAG.getNode(ISD::XOR, dl, ccResultVT,
2550 DAG.getSetCC(dl, ccResultVT,
2551 lhs, DAG.getConstantFP(0.0, lhsVT),
2553 DAG.getConstant(ccResultAllOnes, ccResultVT));
2554 } else if (CC->get() == ISD::SETUO) {
2555 // Evaluates to true if Op0 is [SQ]NaN
2556 return DAG.getNode(ISD::AND, dl, ccResultVT,
2557 DAG.getSetCC(dl, ccResultVT,
2559 DAG.getConstant(0x7ff00000, MVT::i32),
2561 DAG.getSetCC(dl, ccResultVT,
2563 DAG.getConstant(0, MVT::i32),
2567 SDValue i64rhs = DAG.getNode(ISD::BITCAST, dl, IntVT, rhs);
2569 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2570 DAG.getNode(ISD::SRL, dl, IntVT,
2571 i64rhs, DAG.getConstant(32, MVT::i32)));
2573 // If a value is negative, subtract from the sign magnitude constant:
2574 SDValue signMag2TC = DAG.getConstant(0x8000000000000000ULL, IntVT);
2576 // Convert the sign-magnitude representation into 2's complement:
2577 SDValue lhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2578 lhsHi32, DAG.getConstant(31, MVT::i32));
2579 SDValue lhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64lhs);
2581 DAG.getNode(ISD::SELECT, dl, IntVT,
2582 lhsSelectMask, lhsSignMag2TC, i64lhs);
2584 SDValue rhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2585 rhsHi32, DAG.getConstant(31, MVT::i32));
2586 SDValue rhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64rhs);
2588 DAG.getNode(ISD::SELECT, dl, IntVT,
2589 rhsSelectMask, rhsSignMag2TC, i64rhs);
2593 switch (CC->get()) {
2596 compareOp = ISD::SETEQ; break;
2599 compareOp = ISD::SETGT; break;
2602 compareOp = ISD::SETGE; break;
2605 compareOp = ISD::SETLT; break;
2608 compareOp = ISD::SETLE; break;
2611 compareOp = ISD::SETNE; break;
2613 report_fatal_error("CellSPU ISel Select: unimplemented f64 condition");
2617 DAG.getSetCC(dl, ccResultVT, lhsSelect, rhsSelect,
2618 (ISD::CondCode) compareOp);
2620 if ((CC->get() & 0x8) == 0) {
2621 // Ordered comparison:
2622 SDValue lhsNaN = DAG.getSetCC(dl, ccResultVT,
2623 lhs, DAG.getConstantFP(0.0, MVT::f64),
2625 SDValue rhsNaN = DAG.getSetCC(dl, ccResultVT,
2626 rhs, DAG.getConstantFP(0.0, MVT::f64),
2628 SDValue ordered = DAG.getNode(ISD::AND, dl, ccResultVT, lhsNaN, rhsNaN);
2630 result = DAG.getNode(ISD::AND, dl, ccResultVT, ordered, result);
2636 //! Lower ISD::SELECT_CC
2638 ISD::SELECT_CC can (generally) be implemented directly on the SPU using the
2641 \note Need to revisit this in the future: if the code path through the true
2642 and false value computations is longer than the latency of a branch (6
2643 cycles), then it would be more advantageous to branch and insert a new basic
2644 block and branch on the condition. However, this code does not make that
2645 assumption, given the simplisitc uses so far.
2648 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2649 const TargetLowering &TLI) {
2650 EVT VT = Op.getValueType();
2651 SDValue lhs = Op.getOperand(0);
2652 SDValue rhs = Op.getOperand(1);
2653 SDValue trueval = Op.getOperand(2);
2654 SDValue falseval = Op.getOperand(3);
2655 SDValue condition = Op.getOperand(4);
2656 DebugLoc dl = Op.getDebugLoc();
2658 // NOTE: SELB's arguments: $rA, $rB, $mask
2660 // SELB selects bits from $rA where bits in $mask are 0, bits from $rB
2661 // where bits in $mask are 1. CCond will be inverted, having 1s where the
2662 // condition was true and 0s where the condition was false. Hence, the
2663 // arguments to SELB get reversed.
2665 // Note: Really should be ISD::SELECT instead of SPUISD::SELB, but LLVM's
2666 // legalizer insists on combining SETCC/SELECT into SELECT_CC, so we end up
2667 // with another "cannot select select_cc" assert:
2669 SDValue compare = DAG.getNode(ISD::SETCC, dl,
2670 TLI.getSetCCResultType(Op.getValueType()),
2671 lhs, rhs, condition);
2672 return DAG.getNode(SPUISD::SELB, dl, VT, falseval, trueval, compare);
2675 //! Custom lower ISD::TRUNCATE
2676 static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
2678 // Type to truncate to
2679 EVT VT = Op.getValueType();
2680 MVT simpleVT = VT.getSimpleVT();
2681 EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
2682 VT, (128 / VT.getSizeInBits()));
2683 DebugLoc dl = Op.getDebugLoc();
2685 // Type to truncate from
2686 SDValue Op0 = Op.getOperand(0);
2687 EVT Op0VT = Op0.getValueType();
2689 if (Op0VT == MVT::i128 && simpleVT == MVT::i64) {
2690 // Create shuffle mask, least significant doubleword of quadword
2691 unsigned maskHigh = 0x08090a0b;
2692 unsigned maskLow = 0x0c0d0e0f;
2693 // Use a shuffle to perform the truncation
2694 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2695 DAG.getConstant(maskHigh, MVT::i32),
2696 DAG.getConstant(maskLow, MVT::i32),
2697 DAG.getConstant(maskHigh, MVT::i32),
2698 DAG.getConstant(maskLow, MVT::i32));
2700 SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2701 Op0, Op0, shufMask);
2703 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, truncShuffle);
2706 return SDValue(); // Leave the truncate unmolested
2710 * Emit the instruction sequence for i64/i32 -> i128 sign extend. The basic
2711 * algorithm is to duplicate the sign bit using rotmai to generate at
2712 * least one byte full of sign bits. Then propagate the "sign-byte" into
2713 * the leftmost words and the i64/i32 into the rightmost words using shufb.
2715 * @param Op The sext operand
2716 * @param DAG The current DAG
2717 * @return The SDValue with the entire instruction sequence
2719 static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
2721 DebugLoc dl = Op.getDebugLoc();
2723 // Type to extend to
2724 MVT OpVT = Op.getValueType().getSimpleVT();
2726 // Type to extend from
2727 SDValue Op0 = Op.getOperand(0);
2728 MVT Op0VT = Op0.getValueType().getSimpleVT();
2730 // The type to extend to needs to be a i128 and
2731 // the type to extend from needs to be i64 or i32.
2732 assert((OpVT == MVT::i128 && (Op0VT == MVT::i64 || Op0VT == MVT::i32)) &&
2733 "LowerSIGN_EXTEND: input and/or output operand have wrong size");
2735 // Create shuffle mask
2736 unsigned mask1 = 0x10101010; // byte 0 - 3 and 4 - 7
2737 unsigned mask2 = Op0VT == MVT::i64 ? 0x00010203 : 0x10101010; // byte 8 - 11
2738 unsigned mask3 = Op0VT == MVT::i64 ? 0x04050607 : 0x00010203; // byte 12 - 15
2739 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2740 DAG.getConstant(mask1, MVT::i32),
2741 DAG.getConstant(mask1, MVT::i32),
2742 DAG.getConstant(mask2, MVT::i32),
2743 DAG.getConstant(mask3, MVT::i32));
2745 // Word wise arithmetic right shift to generate at least one byte
2746 // that contains sign bits.
2747 MVT mvt = Op0VT == MVT::i64 ? MVT::v2i64 : MVT::v4i32;
2748 SDValue sraVal = DAG.getNode(ISD::SRA,
2751 DAG.getNode(SPUISD::PREFSLOT2VEC, dl, mvt, Op0, Op0),
2752 DAG.getConstant(31, MVT::i32));
2754 // reinterpret as a i128 (SHUFB requires it). This gets lowered away.
2755 SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
2757 DAG.getTargetConstant(
2758 SPU::GPRCRegClass.getID(),
2760 // Shuffle bytes - Copy the sign bits into the upper 64 bits
2761 // and the input value into the lower 64 bits.
2762 SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
2763 extended, sraVal, shufMask);
2764 return DAG.getNode(ISD::BITCAST, dl, MVT::i128, extShuffle);
2767 //! Custom (target-specific) lowering entry point
2769 This is where LLVM's DAG selection process calls to do target-specific
2773 SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
2775 unsigned Opc = (unsigned) Op.getOpcode();
2776 EVT VT = Op.getValueType();
2781 errs() << "SPUTargetLowering::LowerOperation(): need to lower this!\n";
2782 errs() << "Op.getOpcode() = " << Opc << "\n";
2783 errs() << "*Op.getNode():\n";
2784 Op.getNode()->dump();
2786 llvm_unreachable(0);
2792 return LowerLOAD(Op, DAG, SPUTM.getSubtargetImpl());
2794 return LowerSTORE(Op, DAG, SPUTM.getSubtargetImpl());
2795 case ISD::ConstantPool:
2796 return LowerConstantPool(Op, DAG, SPUTM.getSubtargetImpl());
2797 case ISD::GlobalAddress:
2798 return LowerGlobalAddress(Op, DAG, SPUTM.getSubtargetImpl());
2799 case ISD::JumpTable:
2800 return LowerJumpTable(Op, DAG, SPUTM.getSubtargetImpl());
2801 case ISD::ConstantFP:
2802 return LowerConstantFP(Op, DAG);
2804 // i8, i64 math ops:
2813 return LowerI8Math(Op, DAG, Opc, *this);
2817 case ISD::FP_TO_SINT:
2818 case ISD::FP_TO_UINT:
2819 return LowerFP_TO_INT(Op, DAG, *this);
2821 case ISD::SINT_TO_FP:
2822 case ISD::UINT_TO_FP:
2823 return LowerINT_TO_FP(Op, DAG, *this);
2825 // Vector-related lowering.
2826 case ISD::BUILD_VECTOR:
2827 return LowerBUILD_VECTOR(Op, DAG);
2828 case ISD::SCALAR_TO_VECTOR:
2829 return LowerSCALAR_TO_VECTOR(Op, DAG);
2830 case ISD::VECTOR_SHUFFLE:
2831 return LowerVECTOR_SHUFFLE(Op, DAG);
2832 case ISD::EXTRACT_VECTOR_ELT:
2833 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2834 case ISD::INSERT_VECTOR_ELT:
2835 return LowerINSERT_VECTOR_ELT(Op, DAG);
2837 // Look for ANDBI, ORBI and XORBI opportunities and lower appropriately:
2841 return LowerByteImmed(Op, DAG);
2843 // Vector and i8 multiply:
2846 return LowerI8Math(Op, DAG, Opc, *this);
2849 return LowerCTPOP(Op, DAG);
2851 case ISD::SELECT_CC:
2852 return LowerSELECT_CC(Op, DAG, *this);
2855 return LowerSETCC(Op, DAG, *this);
2858 return LowerTRUNCATE(Op, DAG);
2860 case ISD::SIGN_EXTEND:
2861 return LowerSIGN_EXTEND(Op, DAG);
2867 void SPUTargetLowering::ReplaceNodeResults(SDNode *N,
2868 SmallVectorImpl<SDValue>&Results,
2869 SelectionDAG &DAG) const
2872 unsigned Opc = (unsigned) N->getOpcode();
2873 EVT OpVT = N->getValueType(0);
2877 errs() << "SPUTargetLowering::ReplaceNodeResults(): need to fix this!\n";
2878 errs() << "Op.getOpcode() = " << Opc << "\n";
2879 errs() << "*Op.getNode():\n";
2887 /* Otherwise, return unchanged */
2890 //===----------------------------------------------------------------------===//
2891 // Target Optimization Hooks
2892 //===----------------------------------------------------------------------===//
2895 SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
2898 TargetMachine &TM = getTargetMachine();
2900 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
2901 SelectionDAG &DAG = DCI.DAG;
2902 SDValue Op0 = N->getOperand(0); // everything has at least one operand
2903 EVT NodeVT = N->getValueType(0); // The node's value type
2904 EVT Op0VT = Op0.getValueType(); // The first operand's result
2905 SDValue Result; // Initially, empty result
2906 DebugLoc dl = N->getDebugLoc();
2908 switch (N->getOpcode()) {
2911 SDValue Op1 = N->getOperand(1);
2913 if (Op0.getOpcode() == SPUISD::IndirectAddr
2914 || Op1.getOpcode() == SPUISD::IndirectAddr) {
2915 // Normalize the operands to reduce repeated code
2916 SDValue IndirectArg = Op0, AddArg = Op1;
2918 if (Op1.getOpcode() == SPUISD::IndirectAddr) {
2923 if (isa<ConstantSDNode>(AddArg)) {
2924 ConstantSDNode *CN0 = cast<ConstantSDNode > (AddArg);
2925 SDValue IndOp1 = IndirectArg.getOperand(1);
2927 if (CN0->isNullValue()) {
2928 // (add (SPUindirect <arg>, <arg>), 0) ->
2929 // (SPUindirect <arg>, <arg>)
2931 #if !defined(NDEBUG)
2932 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2934 << "Replace: (add (SPUindirect <arg>, <arg>), 0)\n"
2935 << "With: (SPUindirect <arg>, <arg>)\n";
2940 } else if (isa<ConstantSDNode>(IndOp1)) {
2941 // (add (SPUindirect <arg>, <const>), <const>) ->
2942 // (SPUindirect <arg>, <const + const>)
2943 ConstantSDNode *CN1 = cast<ConstantSDNode > (IndOp1);
2944 int64_t combinedConst = CN0->getSExtValue() + CN1->getSExtValue();
2945 SDValue combinedValue = DAG.getConstant(combinedConst, Op0VT);
2947 #if !defined(NDEBUG)
2948 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2950 << "Replace: (add (SPUindirect <arg>, " << CN1->getSExtValue()
2951 << "), " << CN0->getSExtValue() << ")\n"
2952 << "With: (SPUindirect <arg>, "
2953 << combinedConst << ")\n";
2957 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2958 IndirectArg, combinedValue);
2964 case ISD::SIGN_EXTEND:
2965 case ISD::ZERO_EXTEND:
2966 case ISD::ANY_EXTEND: {
2967 if (Op0.getOpcode() == SPUISD::VEC2PREFSLOT && NodeVT == Op0VT) {
2968 // (any_extend (SPUextract_elt0 <arg>)) ->
2969 // (SPUextract_elt0 <arg>)
2970 // Types must match, however...
2971 #if !defined(NDEBUG)
2972 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2973 errs() << "\nReplace: ";
2975 errs() << "\nWith: ";
2976 Op0.getNode()->dump(&DAG);
2985 case SPUISD::IndirectAddr: {
2986 if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) {
2987 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1));
2988 if (CN != 0 && CN->isNullValue()) {
2989 // (SPUindirect (SPUaform <addr>, 0), 0) ->
2990 // (SPUaform <addr>, 0)
2992 DEBUG(errs() << "Replace: ");
2993 DEBUG(N->dump(&DAG));
2994 DEBUG(errs() << "\nWith: ");
2995 DEBUG(Op0.getNode()->dump(&DAG));
2996 DEBUG(errs() << "\n");
3000 } else if (Op0.getOpcode() == ISD::ADD) {
3001 SDValue Op1 = N->getOperand(1);
3002 if (ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(Op1)) {
3003 // (SPUindirect (add <arg>, <arg>), 0) ->
3004 // (SPUindirect <arg>, <arg>)
3005 if (CN1->isNullValue()) {
3007 #if !defined(NDEBUG)
3008 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
3010 << "Replace: (SPUindirect (add <arg>, <arg>), 0)\n"
3011 << "With: (SPUindirect <arg>, <arg>)\n";
3015 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
3016 Op0.getOperand(0), Op0.getOperand(1));
3022 case SPUISD::SHL_BITS:
3023 case SPUISD::SHL_BYTES:
3024 case SPUISD::ROTBYTES_LEFT: {
3025 SDValue Op1 = N->getOperand(1);
3027 // Kill degenerate vector shifts:
3028 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
3029 if (CN->isNullValue()) {
3035 case SPUISD::PREFSLOT2VEC: {
3036 switch (Op0.getOpcode()) {
3039 case ISD::ANY_EXTEND:
3040 case ISD::ZERO_EXTEND:
3041 case ISD::SIGN_EXTEND: {
3042 // (SPUprefslot2vec (any|zero|sign_extend (SPUvec2prefslot <arg>))) ->
3044 // but only if the SPUprefslot2vec and <arg> types match.
3045 SDValue Op00 = Op0.getOperand(0);
3046 if (Op00.getOpcode() == SPUISD::VEC2PREFSLOT) {
3047 SDValue Op000 = Op00.getOperand(0);
3048 if (Op000.getValueType() == NodeVT) {
3054 case SPUISD::VEC2PREFSLOT: {
3055 // (SPUprefslot2vec (SPUvec2prefslot <arg>)) ->
3057 Result = Op0.getOperand(0);
3065 // Otherwise, return unchanged.
3067 if (Result.getNode()) {
3068 DEBUG(errs() << "\nReplace.SPU: ");
3069 DEBUG(N->dump(&DAG));
3070 DEBUG(errs() << "\nWith: ");
3071 DEBUG(Result.getNode()->dump(&DAG));
3072 DEBUG(errs() << "\n");
3079 //===----------------------------------------------------------------------===//
3080 // Inline Assembly Support
3081 //===----------------------------------------------------------------------===//
3083 /// getConstraintType - Given a constraint letter, return the type of
3084 /// constraint it is for this target.
3085 SPUTargetLowering::ConstraintType
3086 SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const {
3087 if (ConstraintLetter.size() == 1) {
3088 switch (ConstraintLetter[0]) {
3095 return C_RegisterClass;
3098 return TargetLowering::getConstraintType(ConstraintLetter);
3101 /// Examine constraint type and operand type and determine a weight value.
3102 /// This object must already have been set up with the operand type
3103 /// and the current alternative constraint selected.
3104 TargetLowering::ConstraintWeight
3105 SPUTargetLowering::getSingleConstraintMatchWeight(
3106 AsmOperandInfo &info, const char *constraint) const {
3107 ConstraintWeight weight = CW_Invalid;
3108 Value *CallOperandVal = info.CallOperandVal;
3109 // If we don't have a value, we can't do a match,
3110 // but allow it at the lowest weight.
3111 if (CallOperandVal == NULL)
3113 // Look at the constraint type.
3114 switch (*constraint) {
3116 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3118 //FIXME: Seems like the supported constraint letters were just copied
3119 // from PPC, as the following doesn't correspond to the GCC docs.
3120 // I'm leaving it so until someone adds the corresponding lowering support.
3127 weight = CW_Register;
3133 std::pair<unsigned, const TargetRegisterClass*>
3134 SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
3137 if (Constraint.size() == 1) {
3138 // GCC RS6000 Constraint Letters
3139 switch (Constraint[0]) {
3143 return std::make_pair(0U, SPU::R64CRegisterClass);
3144 return std::make_pair(0U, SPU::R32CRegisterClass);
3147 return std::make_pair(0U, SPU::R32FPRegisterClass);
3148 else if (VT == MVT::f64)
3149 return std::make_pair(0U, SPU::R64FPRegisterClass);
3152 return std::make_pair(0U, SPU::GPRCRegisterClass);
3156 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
3159 //! Compute used/known bits for a SPU operand
3161 SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
3165 const SelectionDAG &DAG,
3166 unsigned Depth ) const {
3168 const uint64_t uint64_sizebits = sizeof(uint64_t) * CHAR_BIT;
3170 switch (Op.getOpcode()) {
3172 // KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
3178 case SPUISD::PREFSLOT2VEC:
3179 case SPUISD::LDRESULT:
3180 case SPUISD::VEC2PREFSLOT:
3181 case SPUISD::SHLQUAD_L_BITS:
3182 case SPUISD::SHLQUAD_L_BYTES:
3183 case SPUISD::VEC_ROTL:
3184 case SPUISD::VEC_ROTR:
3185 case SPUISD::ROTBYTES_LEFT:
3186 case SPUISD::SELECT_MASK:
3193 SPUTargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
3194 unsigned Depth) const {
3195 switch (Op.getOpcode()) {
3200 EVT VT = Op.getValueType();
3202 if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32) {
3205 return VT.getSizeInBits();
3210 // LowerAsmOperandForConstraint
3212 SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3213 char ConstraintLetter,
3214 std::vector<SDValue> &Ops,
3215 SelectionDAG &DAG) const {
3216 // Default, for the time being, to the base class handler
3217 TargetLowering::LowerAsmOperandForConstraint(Op, ConstraintLetter, Ops, DAG);
3220 /// isLegalAddressImmediate - Return true if the integer value can be used
3221 /// as the offset of the target addressing mode.
3222 bool SPUTargetLowering::isLegalAddressImmediate(int64_t V,
3223 const Type *Ty) const {
3224 // SPU's addresses are 256K:
3225 return (V > -(1 << 18) && V < (1 << 18) - 1);
3228 bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
3233 SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3234 // The SPU target isn't yet aware of offsets.
3238 // can we compare to Imm without writing it into a register?
3239 bool SPUTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
3240 //ceqi, cgti, etc. all take s10 operand
3241 return isInt<10>(Imm);
3245 SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
3246 const Type * ) const{
3248 // A-form: 18bit absolute address.
3249 if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && AM.BaseOffs == 0)
3252 // D-form: reg + 14bit offset
3253 if (AM.BaseGV ==0 && AM.HasBaseReg && AM.Scale == 0 && isInt<14>(AM.BaseOffs))
3257 if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 1 && AM.BaseOffs ==0)