1 //===-- SPUISelLowering.cpp - Cell SPU DAG Lowering Implementation --------===//
2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file implements the SPUTargetLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "SPUISelLowering.h"
14 #include "SPUTargetMachine.h"
15 #include "SPUFrameLowering.h"
16 #include "SPUMachineFunction.h"
17 #include "llvm/Constants.h"
18 #include "llvm/Function.h"
19 #include "llvm/Intrinsics.h"
20 #include "llvm/CallingConv.h"
21 #include "llvm/Type.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/Target/TargetOptions.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/raw_ostream.h"
38 // Used in getTargetNodeName() below
40 std::map<unsigned, const char *> node_names;
42 // Byte offset of the preferred slot (counted from the MSB)
43 int prefslotOffset(EVT VT) {
45 if (VT==MVT::i1) retval=3;
46 if (VT==MVT::i8) retval=3;
47 if (VT==MVT::i16) retval=2;
52 //! Expand a library call into an actual call DAG node
55 This code is taken from SelectionDAGLegalize, since it is not exposed as
56 part of the LLVM SelectionDAG API.
60 ExpandLibCall(RTLIB::Libcall LC, SDValue Op, SelectionDAG &DAG,
61 bool isSigned, SDValue &Hi, const SPUTargetLowering &TLI) {
62 // The input chain to this libcall is the entry node of the function.
63 // Legalizing the call will automatically add the previous call to the
65 SDValue InChain = DAG.getEntryNode();
67 TargetLowering::ArgListTy Args;
68 TargetLowering::ArgListEntry Entry;
69 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
70 EVT ArgVT = Op.getOperand(i).getValueType();
71 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
72 Entry.Node = Op.getOperand(i);
74 Entry.isSExt = isSigned;
75 Entry.isZExt = !isSigned;
76 Args.push_back(Entry);
78 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
81 // Splice the libcall in wherever FindInputOutputChains tells us to.
83 Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
84 std::pair<SDValue, SDValue> CallInfo =
85 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
86 0, TLI.getLibcallCallingConv(LC),
88 /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
89 Callee, Args, DAG, Op.getDebugLoc());
91 return CallInfo.first;
95 SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
96 : TargetLowering(TM, new TargetLoweringObjectFileELF()),
99 // Use _setjmp/_longjmp instead of setjmp/longjmp.
100 setUseUnderscoreSetJmp(true);
101 setUseUnderscoreLongJmp(true);
103 // Set RTLIB libcall names as used by SPU:
104 setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
106 // Set up the SPU's register classes:
107 addRegisterClass(MVT::i8, SPU::R8CRegisterClass);
108 addRegisterClass(MVT::i16, SPU::R16CRegisterClass);
109 addRegisterClass(MVT::i32, SPU::R32CRegisterClass);
110 addRegisterClass(MVT::i64, SPU::R64CRegisterClass);
111 addRegisterClass(MVT::f32, SPU::R32FPRegisterClass);
112 addRegisterClass(MVT::f64, SPU::R64FPRegisterClass);
113 addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
115 // SPU has no sign or zero extended loads for i1, i8, i16:
116 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
117 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
118 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
120 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
121 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
123 setTruncStoreAction(MVT::i128, MVT::i64, Expand);
124 setTruncStoreAction(MVT::i128, MVT::i32, Expand);
125 setTruncStoreAction(MVT::i128, MVT::i16, Expand);
126 setTruncStoreAction(MVT::i128, MVT::i8, Expand);
128 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
130 // SPU constant load actions are custom lowered:
131 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
132 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
134 // SPU's loads and stores have to be custom lowered:
135 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::i128;
137 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
139 setOperationAction(ISD::LOAD, VT, Custom);
140 setOperationAction(ISD::STORE, VT, Custom);
141 setLoadExtAction(ISD::EXTLOAD, VT, Custom);
142 setLoadExtAction(ISD::ZEXTLOAD, VT, Custom);
143 setLoadExtAction(ISD::SEXTLOAD, VT, Custom);
145 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::i8; --stype) {
146 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
147 setTruncStoreAction(VT, StoreVT, Expand);
151 for (unsigned sctype = (unsigned) MVT::f32; sctype < (unsigned) MVT::f64;
153 MVT::SimpleValueType VT = (MVT::SimpleValueType) sctype;
155 setOperationAction(ISD::LOAD, VT, Custom);
156 setOperationAction(ISD::STORE, VT, Custom);
158 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::f32; --stype) {
159 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
160 setTruncStoreAction(VT, StoreVT, Expand);
164 // Expand the jumptable branches
165 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
166 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
168 // Custom lower SELECT_CC for most cases, but expand by default
169 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
170 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
171 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
172 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
173 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
175 // SPU has no intrinsics for these particular operations:
176 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
177 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
179 // SPU has no division/remainder instructions
180 setOperationAction(ISD::SREM, MVT::i8, Expand);
181 setOperationAction(ISD::UREM, MVT::i8, Expand);
182 setOperationAction(ISD::SDIV, MVT::i8, Expand);
183 setOperationAction(ISD::UDIV, MVT::i8, Expand);
184 setOperationAction(ISD::SDIVREM, MVT::i8, Expand);
185 setOperationAction(ISD::UDIVREM, MVT::i8, Expand);
186 setOperationAction(ISD::SREM, MVT::i16, Expand);
187 setOperationAction(ISD::UREM, MVT::i16, Expand);
188 setOperationAction(ISD::SDIV, MVT::i16, Expand);
189 setOperationAction(ISD::UDIV, MVT::i16, Expand);
190 setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
191 setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
192 setOperationAction(ISD::SREM, MVT::i32, Expand);
193 setOperationAction(ISD::UREM, MVT::i32, Expand);
194 setOperationAction(ISD::SDIV, MVT::i32, Expand);
195 setOperationAction(ISD::UDIV, MVT::i32, Expand);
196 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
197 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
198 setOperationAction(ISD::SREM, MVT::i64, Expand);
199 setOperationAction(ISD::UREM, MVT::i64, Expand);
200 setOperationAction(ISD::SDIV, MVT::i64, Expand);
201 setOperationAction(ISD::UDIV, MVT::i64, Expand);
202 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
203 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
204 setOperationAction(ISD::SREM, MVT::i128, Expand);
205 setOperationAction(ISD::UREM, MVT::i128, Expand);
206 setOperationAction(ISD::SDIV, MVT::i128, Expand);
207 setOperationAction(ISD::UDIV, MVT::i128, Expand);
208 setOperationAction(ISD::SDIVREM, MVT::i128, Expand);
209 setOperationAction(ISD::UDIVREM, MVT::i128, Expand);
211 // We don't support sin/cos/sqrt/fmod
212 setOperationAction(ISD::FSIN , MVT::f64, Expand);
213 setOperationAction(ISD::FCOS , MVT::f64, Expand);
214 setOperationAction(ISD::FREM , MVT::f64, Expand);
215 setOperationAction(ISD::FSIN , MVT::f32, Expand);
216 setOperationAction(ISD::FCOS , MVT::f32, Expand);
217 setOperationAction(ISD::FREM , MVT::f32, Expand);
219 // Expand fsqrt to the appropriate libcall (NOTE: should use h/w fsqrt
221 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
222 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
224 setOperationAction(ISD::FMA, MVT::f64, Expand);
225 setOperationAction(ISD::FMA, MVT::f32, Expand);
227 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
228 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
230 // SPU can do rotate right and left, so legalize it... but customize for i8
231 // because instructions don't exist.
233 // FIXME: Change from "expand" to appropriate type once ROTR is supported in
235 setOperationAction(ISD::ROTR, MVT::i32, Expand /*Legal*/);
236 setOperationAction(ISD::ROTR, MVT::i16, Expand /*Legal*/);
237 setOperationAction(ISD::ROTR, MVT::i8, Expand /*Custom*/);
239 setOperationAction(ISD::ROTL, MVT::i32, Legal);
240 setOperationAction(ISD::ROTL, MVT::i16, Legal);
241 setOperationAction(ISD::ROTL, MVT::i8, Custom);
243 // SPU has no native version of shift left/right for i8
244 setOperationAction(ISD::SHL, MVT::i8, Custom);
245 setOperationAction(ISD::SRL, MVT::i8, Custom);
246 setOperationAction(ISD::SRA, MVT::i8, Custom);
248 // Make these operations legal and handle them during instruction selection:
249 setOperationAction(ISD::SHL, MVT::i64, Legal);
250 setOperationAction(ISD::SRL, MVT::i64, Legal);
251 setOperationAction(ISD::SRA, MVT::i64, Legal);
253 // Custom lower i8, i32 and i64 multiplications
254 setOperationAction(ISD::MUL, MVT::i8, Custom);
255 setOperationAction(ISD::MUL, MVT::i32, Legal);
256 setOperationAction(ISD::MUL, MVT::i64, Legal);
258 // Expand double-width multiplication
259 // FIXME: It would probably be reasonable to support some of these operations
260 setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
261 setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
262 setOperationAction(ISD::MULHU, MVT::i8, Expand);
263 setOperationAction(ISD::MULHS, MVT::i8, Expand);
264 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
265 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
266 setOperationAction(ISD::MULHU, MVT::i16, Expand);
267 setOperationAction(ISD::MULHS, MVT::i16, Expand);
268 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
269 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
270 setOperationAction(ISD::MULHU, MVT::i32, Expand);
271 setOperationAction(ISD::MULHS, MVT::i32, Expand);
272 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
273 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
274 setOperationAction(ISD::MULHU, MVT::i64, Expand);
275 setOperationAction(ISD::MULHS, MVT::i64, Expand);
277 // Need to custom handle (some) common i8, i64 math ops
278 setOperationAction(ISD::ADD, MVT::i8, Custom);
279 setOperationAction(ISD::ADD, MVT::i64, Legal);
280 setOperationAction(ISD::SUB, MVT::i8, Custom);
281 setOperationAction(ISD::SUB, MVT::i64, Legal);
283 // SPU does not have BSWAP. It does have i32 support CTLZ.
284 // CTPOP has to be custom lowered.
285 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
286 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
288 setOperationAction(ISD::CTPOP, MVT::i8, Custom);
289 setOperationAction(ISD::CTPOP, MVT::i16, Custom);
290 setOperationAction(ISD::CTPOP, MVT::i32, Custom);
291 setOperationAction(ISD::CTPOP, MVT::i64, Custom);
292 setOperationAction(ISD::CTPOP, MVT::i128, Expand);
294 setOperationAction(ISD::CTTZ , MVT::i8, Expand);
295 setOperationAction(ISD::CTTZ , MVT::i16, Expand);
296 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
297 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
298 setOperationAction(ISD::CTTZ , MVT::i128, Expand);
299 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i8, Expand);
300 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
301 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
302 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
303 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i128, Expand);
305 setOperationAction(ISD::CTLZ , MVT::i8, Promote);
306 setOperationAction(ISD::CTLZ , MVT::i16, Promote);
307 setOperationAction(ISD::CTLZ , MVT::i32, Legal);
308 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
309 setOperationAction(ISD::CTLZ , MVT::i128, Expand);
310 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8, Expand);
311 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Expand);
312 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
313 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
314 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i128, Expand);
316 // SPU has a version of select that implements (a&~c)|(b&c), just like
317 // select ought to work:
318 setOperationAction(ISD::SELECT, MVT::i8, Legal);
319 setOperationAction(ISD::SELECT, MVT::i16, Legal);
320 setOperationAction(ISD::SELECT, MVT::i32, Legal);
321 setOperationAction(ISD::SELECT, MVT::i64, Legal);
323 setOperationAction(ISD::SETCC, MVT::i8, Legal);
324 setOperationAction(ISD::SETCC, MVT::i16, Legal);
325 setOperationAction(ISD::SETCC, MVT::i32, Legal);
326 setOperationAction(ISD::SETCC, MVT::i64, Legal);
327 setOperationAction(ISD::SETCC, MVT::f64, Custom);
329 // Custom lower i128 -> i64 truncates
330 setOperationAction(ISD::TRUNCATE, MVT::i64, Custom);
332 // Custom lower i32/i64 -> i128 sign extend
333 setOperationAction(ISD::SIGN_EXTEND, MVT::i128, Custom);
335 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
336 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
337 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
338 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
339 // SPU has a legal FP -> signed INT instruction for f32, but for f64, need
340 // to expand to a libcall, hence the custom lowering:
341 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
342 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
343 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
344 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
345 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Expand);
346 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Expand);
348 // FDIV on SPU requires custom lowering
349 setOperationAction(ISD::FDIV, MVT::f64, Expand); // to libcall
351 // SPU has [U|S]INT_TO_FP for f32->i32, but not for f64->i32, f64->i64:
352 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
353 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
354 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
355 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
356 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
357 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
358 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
359 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
361 setOperationAction(ISD::BITCAST, MVT::i32, Legal);
362 setOperationAction(ISD::BITCAST, MVT::f32, Legal);
363 setOperationAction(ISD::BITCAST, MVT::i64, Legal);
364 setOperationAction(ISD::BITCAST, MVT::f64, Legal);
366 // We cannot sextinreg(i1). Expand to shifts.
367 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
369 // We want to legalize GlobalAddress and ConstantPool nodes into the
370 // appropriate instructions to materialize the address.
371 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::f128;
373 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
375 setOperationAction(ISD::GlobalAddress, VT, Custom);
376 setOperationAction(ISD::ConstantPool, VT, Custom);
377 setOperationAction(ISD::JumpTable, VT, Custom);
380 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
381 setOperationAction(ISD::VASTART , MVT::Other, Custom);
383 // Use the default implementation.
384 setOperationAction(ISD::VAARG , MVT::Other, Expand);
385 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
386 setOperationAction(ISD::VAEND , MVT::Other, Expand);
387 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
388 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
389 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
390 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Expand);
392 // Cell SPU has instructions for converting between i64 and fp.
393 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
394 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
396 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
397 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
399 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
400 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
402 // First set operation action for all vector types to expand. Then we
403 // will selectively turn on ones that can be effectively codegen'd.
404 addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
405 addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
406 addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
407 addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
408 addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
409 addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
411 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
412 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
413 MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
415 // Set operation actions to legal types only.
416 if (!isTypeLegal(VT)) continue;
418 // add/sub are legal for all supported vector VT's.
419 setOperationAction(ISD::ADD, VT, Legal);
420 setOperationAction(ISD::SUB, VT, Legal);
421 // mul has to be custom lowered.
422 setOperationAction(ISD::MUL, VT, Legal);
424 setOperationAction(ISD::AND, VT, Legal);
425 setOperationAction(ISD::OR, VT, Legal);
426 setOperationAction(ISD::XOR, VT, Legal);
427 setOperationAction(ISD::LOAD, VT, Custom);
428 setOperationAction(ISD::SELECT, VT, Legal);
429 setOperationAction(ISD::STORE, VT, Custom);
431 // These operations need to be expanded:
432 setOperationAction(ISD::SDIV, VT, Expand);
433 setOperationAction(ISD::SREM, VT, Expand);
434 setOperationAction(ISD::UDIV, VT, Expand);
435 setOperationAction(ISD::UREM, VT, Expand);
437 // Expand all trunc stores
438 for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
439 j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) {
440 MVT::SimpleValueType TargetVT = (MVT::SimpleValueType)j;
441 setTruncStoreAction(VT, TargetVT, Expand);
444 // Custom lower build_vector, constant pool spills, insert and
445 // extract vector elements:
446 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
447 setOperationAction(ISD::ConstantPool, VT, Custom);
448 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
449 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
450 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
451 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
454 setOperationAction(ISD::SHL, MVT::v2i64, Expand);
456 setOperationAction(ISD::AND, MVT::v16i8, Custom);
457 setOperationAction(ISD::OR, MVT::v16i8, Custom);
458 setOperationAction(ISD::XOR, MVT::v16i8, Custom);
459 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
461 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
463 setBooleanContents(ZeroOrNegativeOneBooleanContent);
464 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); // FIXME: Is this correct?
466 setStackPointerRegisterToSaveRestore(SPU::R1);
468 // We have target-specific dag combine patterns for the following nodes:
469 setTargetDAGCombine(ISD::ADD);
470 setTargetDAGCombine(ISD::ZERO_EXTEND);
471 setTargetDAGCombine(ISD::SIGN_EXTEND);
472 setTargetDAGCombine(ISD::ANY_EXTEND);
474 setMinFunctionAlignment(3);
476 computeRegisterProperties();
478 // Set pre-RA register scheduler default to BURR, which produces slightly
479 // better code than the default (could also be TDRR, but TargetLowering.h
480 // needs a mod to support that model):
481 setSchedulingPreference(Sched::RegPressure);
485 SPUTargetLowering::getTargetNodeName(unsigned Opcode) const
487 if (node_names.empty()) {
488 node_names[(unsigned) SPUISD::RET_FLAG] = "SPUISD::RET_FLAG";
489 node_names[(unsigned) SPUISD::Hi] = "SPUISD::Hi";
490 node_names[(unsigned) SPUISD::Lo] = "SPUISD::Lo";
491 node_names[(unsigned) SPUISD::PCRelAddr] = "SPUISD::PCRelAddr";
492 node_names[(unsigned) SPUISD::AFormAddr] = "SPUISD::AFormAddr";
493 node_names[(unsigned) SPUISD::IndirectAddr] = "SPUISD::IndirectAddr";
494 node_names[(unsigned) SPUISD::LDRESULT] = "SPUISD::LDRESULT";
495 node_names[(unsigned) SPUISD::CALL] = "SPUISD::CALL";
496 node_names[(unsigned) SPUISD::SHUFB] = "SPUISD::SHUFB";
497 node_names[(unsigned) SPUISD::SHUFFLE_MASK] = "SPUISD::SHUFFLE_MASK";
498 node_names[(unsigned) SPUISD::CNTB] = "SPUISD::CNTB";
499 node_names[(unsigned) SPUISD::PREFSLOT2VEC] = "SPUISD::PREFSLOT2VEC";
500 node_names[(unsigned) SPUISD::VEC2PREFSLOT] = "SPUISD::VEC2PREFSLOT";
501 node_names[(unsigned) SPUISD::SHL_BITS] = "SPUISD::SHL_BITS";
502 node_names[(unsigned) SPUISD::SHL_BYTES] = "SPUISD::SHL_BYTES";
503 node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL";
504 node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR";
505 node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
506 node_names[(unsigned) SPUISD::ROTBYTES_LEFT_BITS] =
507 "SPUISD::ROTBYTES_LEFT_BITS";
508 node_names[(unsigned) SPUISD::SELECT_MASK] = "SPUISD::SELECT_MASK";
509 node_names[(unsigned) SPUISD::SELB] = "SPUISD::SELB";
510 node_names[(unsigned) SPUISD::ADD64_MARKER] = "SPUISD::ADD64_MARKER";
511 node_names[(unsigned) SPUISD::SUB64_MARKER] = "SPUISD::SUB64_MARKER";
512 node_names[(unsigned) SPUISD::MUL64_MARKER] = "SPUISD::MUL64_MARKER";
515 std::map<unsigned, const char *>::iterator i = node_names.find(Opcode);
517 return ((i != node_names.end()) ? i->second : 0);
520 //===----------------------------------------------------------------------===//
521 // Return the Cell SPU's SETCC result type
522 //===----------------------------------------------------------------------===//
524 EVT SPUTargetLowering::getSetCCResultType(EVT VT) const {
525 // i8, i16 and i32 are valid SETCC result types
526 MVT::SimpleValueType retval;
528 switch(VT.getSimpleVT().SimpleTy){
531 retval = MVT::i8; break;
533 retval = MVT::i16; break;
541 //===----------------------------------------------------------------------===//
542 // Calling convention code:
543 //===----------------------------------------------------------------------===//
545 #include "SPUGenCallingConv.inc"
547 //===----------------------------------------------------------------------===//
548 // LowerOperation implementation
549 //===----------------------------------------------------------------------===//
551 /// Custom lower loads for CellSPU
553 All CellSPU loads and stores are aligned to 16-byte boundaries, so for elements
554 within a 16-byte block, we have to rotate to extract the requested element.
556 For extending loads, we also want to ensure that the following sequence is
557 emitted, e.g. for MVT::f32 extending load to MVT::f64:
561 %2 v16i8,ch = rotate %1
562 %3 v4f8, ch = bitconvert %2
563 %4 f32 = vec2perfslot %3
564 %5 f64 = fp_extend %4
568 LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
569 LoadSDNode *LN = cast<LoadSDNode>(Op);
570 SDValue the_chain = LN->getChain();
571 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
572 EVT InVT = LN->getMemoryVT();
573 EVT OutVT = Op.getValueType();
574 ISD::LoadExtType ExtType = LN->getExtensionType();
575 unsigned alignment = LN->getAlignment();
576 int pso = prefslotOffset(InVT);
577 DebugLoc dl = Op.getDebugLoc();
578 EVT vecVT = InVT.isVector()? InVT: EVT::getVectorVT(*DAG.getContext(), InVT,
579 (128 / InVT.getSizeInBits()));
582 assert( LN->getAddressingMode() == ISD::UNINDEXED
583 && "we should get only UNINDEXED adresses");
584 // clean aligned loads can be selected as-is
585 if (InVT.getSizeInBits() == 128 && (alignment%16) == 0)
588 // Get pointerinfos to the memory chunk(s) that contain the data to load
589 uint64_t mpi_offset = LN->getPointerInfo().Offset;
590 mpi_offset -= mpi_offset%16;
591 MachinePointerInfo lowMemPtr(LN->getPointerInfo().V, mpi_offset);
592 MachinePointerInfo highMemPtr(LN->getPointerInfo().V, mpi_offset+16);
595 SDValue basePtr = LN->getBasePtr();
598 if ((alignment%16) == 0) {
601 // Special cases for a known aligned load to simplify the base pointer
602 // and the rotation amount:
603 if (basePtr.getOpcode() == ISD::ADD
604 && (CN = dyn_cast<ConstantSDNode > (basePtr.getOperand(1))) != 0) {
605 // Known offset into basePtr
606 int64_t offset = CN->getSExtValue();
607 int64_t rotamt = int64_t((offset & 0xf) - pso);
612 rotate = DAG.getConstant(rotamt, MVT::i16);
614 // Simplify the base pointer for this case:
615 basePtr = basePtr.getOperand(0);
616 if ((offset & ~0xf) > 0) {
617 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
619 DAG.getConstant((offset & ~0xf), PtrVT));
621 } else if ((basePtr.getOpcode() == SPUISD::AFormAddr)
622 || (basePtr.getOpcode() == SPUISD::IndirectAddr
623 && basePtr.getOperand(0).getOpcode() == SPUISD::Hi
624 && basePtr.getOperand(1).getOpcode() == SPUISD::Lo)) {
625 // Plain aligned a-form address: rotate into preferred slot
626 // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
627 int64_t rotamt = -pso;
630 rotate = DAG.getConstant(rotamt, MVT::i16);
632 // Offset the rotate amount by the basePtr and the preferred slot
634 int64_t rotamt = -pso;
637 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
639 DAG.getConstant(rotamt, PtrVT));
642 // Unaligned load: must be more pessimistic about addressing modes:
643 if (basePtr.getOpcode() == ISD::ADD) {
644 MachineFunction &MF = DAG.getMachineFunction();
645 MachineRegisterInfo &RegInfo = MF.getRegInfo();
646 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
649 SDValue Op0 = basePtr.getOperand(0);
650 SDValue Op1 = basePtr.getOperand(1);
652 if (isa<ConstantSDNode>(Op1)) {
653 // Convert the (add <ptr>, <const>) to an indirect address contained
654 // in a register. Note that this is done because we need to avoid
655 // creating a 0(reg) d-form address due to the SPU's block loads.
656 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
657 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
658 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
660 // Convert the (add <arg1>, <arg2>) to an indirect address, which
661 // will likely be lowered as a reg(reg) x-form address.
662 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
665 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
667 DAG.getConstant(0, PtrVT));
670 // Offset the rotate amount by the basePtr and the preferred slot
672 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
674 DAG.getConstant(-pso, PtrVT));
677 // Do the load as a i128 to allow possible shifting
678 SDValue low = DAG.getLoad(MVT::i128, dl, the_chain, basePtr,
680 LN->isVolatile(), LN->isNonTemporal(), false, 16);
682 // When the size is not greater than alignment we get all data with just
684 if (alignment >= InVT.getSizeInBits()/8) {
686 the_chain = low.getValue(1);
688 // Rotate into the preferred slot:
689 result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::i128,
690 low.getValue(0), rotate);
692 // Convert the loaded v16i8 vector to the appropriate vector type
693 // specified by the operand:
694 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
695 InVT, (128 / InVT.getSizeInBits()));
696 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
697 DAG.getNode(ISD::BITCAST, dl, vecVT, result));
699 // When alignment is less than the size, we might need (known only at
700 // run-time) two loads
701 // TODO: if the memory address is composed only from constants, we have
702 // extra kowledge, and might avoid the second load
704 // storage position offset from lower 16 byte aligned memory chunk
705 SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
706 basePtr, DAG.getConstant( 0xf, MVT::i32 ) );
707 // get a registerfull of ones. (this implementation is a workaround: LLVM
708 // cannot handle 128 bit signed int constants)
709 SDValue ones = DAG.getConstant(-1, MVT::v4i32 );
710 ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
712 SDValue high = DAG.getLoad(MVT::i128, dl, the_chain,
713 DAG.getNode(ISD::ADD, dl, PtrVT,
715 DAG.getConstant(16, PtrVT)),
717 LN->isVolatile(), LN->isNonTemporal(), false,
720 the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
723 // Shift the (possible) high part right to compensate the misalignemnt.
724 // if there is no highpart (i.e. value is i64 and offset is 4), this
725 // will zero out the high value.
726 high = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, high,
727 DAG.getNode(ISD::SUB, dl, MVT::i32,
728 DAG.getConstant( 16, MVT::i32),
732 // Shift the low similarly
733 // TODO: add SPUISD::SHL_BYTES
734 low = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, low, offset );
736 // Merge the two parts
737 result = DAG.getNode(ISD::BITCAST, dl, vecVT,
738 DAG.getNode(ISD::OR, dl, MVT::i128, low, high));
740 if (!InVT.isVector()) {
741 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT, result );
745 // Handle extending loads by extending the scalar result:
746 if (ExtType == ISD::SEXTLOAD) {
747 result = DAG.getNode(ISD::SIGN_EXTEND, dl, OutVT, result);
748 } else if (ExtType == ISD::ZEXTLOAD) {
749 result = DAG.getNode(ISD::ZERO_EXTEND, dl, OutVT, result);
750 } else if (ExtType == ISD::EXTLOAD) {
751 unsigned NewOpc = ISD::ANY_EXTEND;
753 if (OutVT.isFloatingPoint())
754 NewOpc = ISD::FP_EXTEND;
756 result = DAG.getNode(NewOpc, dl, OutVT, result);
759 SDVTList retvts = DAG.getVTList(OutVT, MVT::Other);
760 SDValue retops[2] = {
765 result = DAG.getNode(SPUISD::LDRESULT, dl, retvts,
766 retops, sizeof(retops) / sizeof(retops[0]));
770 /// Custom lower stores for CellSPU
772 All CellSPU stores are aligned to 16-byte boundaries, so for elements
773 within a 16-byte block, we have to generate a shuffle to insert the
774 requested element into its place, then store the resulting block.
777 LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
778 StoreSDNode *SN = cast<StoreSDNode>(Op);
779 SDValue Value = SN->getValue();
780 EVT VT = Value.getValueType();
781 EVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
782 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
783 DebugLoc dl = Op.getDebugLoc();
784 unsigned alignment = SN->getAlignment();
786 EVT vecVT = StVT.isVector()? StVT: EVT::getVectorVT(*DAG.getContext(), StVT,
787 (128 / StVT.getSizeInBits()));
788 // Get pointerinfos to the memory chunk(s) that contain the data to load
789 uint64_t mpi_offset = SN->getPointerInfo().Offset;
790 mpi_offset -= mpi_offset%16;
791 MachinePointerInfo lowMemPtr(SN->getPointerInfo().V, mpi_offset);
792 MachinePointerInfo highMemPtr(SN->getPointerInfo().V, mpi_offset+16);
796 assert( SN->getAddressingMode() == ISD::UNINDEXED
797 && "we should get only UNINDEXED adresses");
798 // clean aligned loads can be selected as-is
799 if (StVT.getSizeInBits() == 128 && (alignment%16) == 0)
802 SDValue alignLoadVec;
803 SDValue basePtr = SN->getBasePtr();
804 SDValue the_chain = SN->getChain();
805 SDValue insertEltOffs;
807 if ((alignment%16) == 0) {
809 // Special cases for a known aligned load to simplify the base pointer
810 // and insertion byte:
811 if (basePtr.getOpcode() == ISD::ADD
812 && (CN = dyn_cast<ConstantSDNode>(basePtr.getOperand(1))) != 0) {
813 // Known offset into basePtr
814 int64_t offset = CN->getSExtValue();
816 // Simplify the base pointer for this case:
817 basePtr = basePtr.getOperand(0);
818 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
820 DAG.getConstant((offset & 0xf), PtrVT));
822 if ((offset & ~0xf) > 0) {
823 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
825 DAG.getConstant((offset & ~0xf), PtrVT));
828 // Otherwise, assume it's at byte 0 of basePtr
829 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
831 DAG.getConstant(0, PtrVT));
832 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
834 DAG.getConstant(0, PtrVT));
837 // Unaligned load: must be more pessimistic about addressing modes:
838 if (basePtr.getOpcode() == ISD::ADD) {
839 MachineFunction &MF = DAG.getMachineFunction();
840 MachineRegisterInfo &RegInfo = MF.getRegInfo();
841 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
844 SDValue Op0 = basePtr.getOperand(0);
845 SDValue Op1 = basePtr.getOperand(1);
847 if (isa<ConstantSDNode>(Op1)) {
848 // Convert the (add <ptr>, <const>) to an indirect address contained
849 // in a register. Note that this is done because we need to avoid
850 // creating a 0(reg) d-form address due to the SPU's block loads.
851 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
852 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
853 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
855 // Convert the (add <arg1>, <arg2>) to an indirect address, which
856 // will likely be lowered as a reg(reg) x-form address.
857 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
860 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
862 DAG.getConstant(0, PtrVT));
865 // Insertion point is solely determined by basePtr's contents
866 insertEltOffs = DAG.getNode(ISD::ADD, dl, PtrVT,
868 DAG.getConstant(0, PtrVT));
871 // Load the lower part of the memory to which to store.
872 SDValue low = DAG.getLoad(vecVT, dl, the_chain, basePtr,
873 lowMemPtr, SN->isVolatile(), SN->isNonTemporal(),
876 // if we don't need to store over the 16 byte boundary, one store suffices
877 if (alignment >= StVT.getSizeInBits()/8) {
879 the_chain = low.getValue(1);
881 LoadSDNode *LN = cast<LoadSDNode>(low);
882 SDValue theValue = SN->getValue();
885 && (theValue.getOpcode() == ISD::AssertZext
886 || theValue.getOpcode() == ISD::AssertSext)) {
887 // Drill down and get the value for zero- and sign-extended
889 theValue = theValue.getOperand(0);
892 // If the base pointer is already a D-form address, then just create
893 // a new D-form address with a slot offset and the orignal base pointer.
894 // Otherwise generate a D-form address with the slot offset relative
895 // to the stack pointer, which is always aligned.
897 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
898 errs() << "CellSPU LowerSTORE: basePtr = ";
899 basePtr.getNode()->dump(&DAG);
904 SDValue insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT,
906 SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
909 result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
911 DAG.getNode(ISD::BITCAST, dl,
912 MVT::v4i32, insertEltOp));
914 result = DAG.getStore(the_chain, dl, result, basePtr,
916 LN->isVolatile(), LN->isNonTemporal(),
920 // do the store when it might cross the 16 byte memory access boundary.
922 // TODO issue a warning if SN->isVolatile()== true? This is likely not
923 // what the user wanted.
925 // address offset from nearest lower 16byte alinged address
926 SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
928 DAG.getConstant(0xf, MVT::i32));
930 SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
931 DAG.getConstant( 16, MVT::i32),
933 // 16 - sizeof(Value)
934 SDValue surplus = DAG.getNode(ISD::SUB, dl, MVT::i32,
935 DAG.getConstant( 16, MVT::i32),
936 DAG.getConstant( VT.getSizeInBits()/8,
938 // get a registerfull of ones
939 SDValue ones = DAG.getConstant(-1, MVT::v4i32);
940 ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
942 // Create the 128 bit masks that have ones where the data to store is
944 SDValue lowmask, himask;
945 // if the value to store don't fill up the an entire 128 bits, zero
946 // out the last bits of the mask so that only the value we want to store
948 // this is e.g. in the case of store i32, align 2
950 Value = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, Value);
951 lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, ones, surplus);
952 lowmask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
954 Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
955 Value = DAG.getNode(ISD::AND, dl, MVT::i128, Value, lowmask);
960 Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
962 // this will zero, if there are no data that goes to the high quad
963 himask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
965 lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, lowmask,
968 // Load in the old data and zero out the parts that will be overwritten with
969 // the new data to store.
970 SDValue hi = DAG.getLoad(MVT::i128, dl, the_chain,
971 DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
972 DAG.getConstant( 16, PtrVT)),
974 SN->isVolatile(), SN->isNonTemporal(),
976 the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
979 low = DAG.getNode(ISD::AND, dl, MVT::i128,
980 DAG.getNode( ISD::BITCAST, dl, MVT::i128, low),
981 DAG.getNode( ISD::XOR, dl, MVT::i128, lowmask, ones));
982 hi = DAG.getNode(ISD::AND, dl, MVT::i128,
983 DAG.getNode( ISD::BITCAST, dl, MVT::i128, hi),
984 DAG.getNode( ISD::XOR, dl, MVT::i128, himask, ones));
986 // Shift the Value to store into place. rlow contains the parts that go to
987 // the lower memory chunk, rhi has the parts that go to the upper one.
988 SDValue rlow = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, Value, offset);
989 rlow = DAG.getNode(ISD::AND, dl, MVT::i128, rlow, lowmask);
990 SDValue rhi = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, Value,
993 // Merge the old data and the new data and store the results
994 // Need to convert vectors here to integer as 'OR'ing floats assert
995 rlow = DAG.getNode(ISD::OR, dl, MVT::i128,
996 DAG.getNode(ISD::BITCAST, dl, MVT::i128, low),
997 DAG.getNode(ISD::BITCAST, dl, MVT::i128, rlow));
998 rhi = DAG.getNode(ISD::OR, dl, MVT::i128,
999 DAG.getNode(ISD::BITCAST, dl, MVT::i128, hi),
1000 DAG.getNode(ISD::BITCAST, dl, MVT::i128, rhi));
1002 low = DAG.getStore(the_chain, dl, rlow, basePtr,
1004 SN->isVolatile(), SN->isNonTemporal(), 16);
1005 hi = DAG.getStore(the_chain, dl, rhi,
1006 DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
1007 DAG.getConstant( 16, PtrVT)),
1009 SN->isVolatile(), SN->isNonTemporal(), 16);
1010 result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(0),
1017 //! Generate the address of a constant pool entry.
1019 LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1020 EVT PtrVT = Op.getValueType();
1021 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1022 const Constant *C = CP->getConstVal();
1023 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
1024 SDValue Zero = DAG.getConstant(0, PtrVT);
1025 const TargetMachine &TM = DAG.getTarget();
1026 // FIXME there is no actual debug info here
1027 DebugLoc dl = Op.getDebugLoc();
1029 if (TM.getRelocationModel() == Reloc::Static) {
1030 if (!ST->usingLargeMem()) {
1031 // Just return the SDValue with the constant pool address in it.
1032 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, CPI, Zero);
1034 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, CPI, Zero);
1035 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, CPI, Zero);
1036 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1040 llvm_unreachable("LowerConstantPool: Relocation model other than static"
1044 //! Alternate entry point for generating the address of a constant pool entry
1046 SPU::LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUTargetMachine &TM) {
1047 return ::LowerConstantPool(Op, DAG, TM.getSubtargetImpl());
1051 LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1052 EVT PtrVT = Op.getValueType();
1053 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1054 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
1055 SDValue Zero = DAG.getConstant(0, PtrVT);
1056 const TargetMachine &TM = DAG.getTarget();
1057 // FIXME there is no actual debug info here
1058 DebugLoc dl = Op.getDebugLoc();
1060 if (TM.getRelocationModel() == Reloc::Static) {
1061 if (!ST->usingLargeMem()) {
1062 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, JTI, Zero);
1064 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, JTI, Zero);
1065 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, JTI, Zero);
1066 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1070 llvm_unreachable("LowerJumpTable: Relocation model other than static"
1075 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1076 EVT PtrVT = Op.getValueType();
1077 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
1078 const GlobalValue *GV = GSDN->getGlobal();
1079 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
1080 PtrVT, GSDN->getOffset());
1081 const TargetMachine &TM = DAG.getTarget();
1082 SDValue Zero = DAG.getConstant(0, PtrVT);
1083 // FIXME there is no actual debug info here
1084 DebugLoc dl = Op.getDebugLoc();
1086 if (TM.getRelocationModel() == Reloc::Static) {
1087 if (!ST->usingLargeMem()) {
1088 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, GA, Zero);
1090 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, GA, Zero);
1091 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, GA, Zero);
1092 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1095 report_fatal_error("LowerGlobalAddress: Relocation model other than static"
1101 //! Custom lower double precision floating point constants
1103 LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
1104 EVT VT = Op.getValueType();
1105 // FIXME there is no actual debug info here
1106 DebugLoc dl = Op.getDebugLoc();
1108 if (VT == MVT::f64) {
1109 ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.getNode());
1112 "LowerConstantFP: Node is not ConstantFPSDNode");
1114 uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
1115 SDValue T = DAG.getConstant(dbits, MVT::i64);
1116 SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
1117 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
1118 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Tvec));
1125 SPUTargetLowering::LowerFormalArguments(SDValue Chain,
1126 CallingConv::ID CallConv, bool isVarArg,
1127 const SmallVectorImpl<ISD::InputArg>
1129 DebugLoc dl, SelectionDAG &DAG,
1130 SmallVectorImpl<SDValue> &InVals)
1133 MachineFunction &MF = DAG.getMachineFunction();
1134 MachineFrameInfo *MFI = MF.getFrameInfo();
1135 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1136 SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
1138 unsigned ArgOffset = SPUFrameLowering::minStackSize();
1139 unsigned ArgRegIdx = 0;
1140 unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
1142 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1144 SmallVector<CCValAssign, 16> ArgLocs;
1145 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1146 getTargetMachine(), ArgLocs, *DAG.getContext());
1147 // FIXME: allow for other calling conventions
1148 CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU);
1150 // Add DAG nodes to load the arguments or copy them out of registers.
1151 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
1152 EVT ObjectVT = Ins[ArgNo].VT;
1153 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1155 CCValAssign &VA = ArgLocs[ArgNo];
1157 if (VA.isRegLoc()) {
1158 const TargetRegisterClass *ArgRegClass;
1160 switch (ObjectVT.getSimpleVT().SimpleTy) {
1162 report_fatal_error("LowerFormalArguments Unhandled argument type: " +
1163 Twine(ObjectVT.getEVTString()));
1165 ArgRegClass = &SPU::R8CRegClass;
1168 ArgRegClass = &SPU::R16CRegClass;
1171 ArgRegClass = &SPU::R32CRegClass;
1174 ArgRegClass = &SPU::R64CRegClass;
1177 ArgRegClass = &SPU::GPRCRegClass;
1180 ArgRegClass = &SPU::R32FPRegClass;
1183 ArgRegClass = &SPU::R64FPRegClass;
1191 ArgRegClass = &SPU::VECREGRegClass;
1195 unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
1196 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1197 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
1200 // We need to load the argument to a virtual register if we determined
1201 // above that we ran out of physical registers of the appropriate type
1202 // or we're forced to do vararg
1203 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true);
1204 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1205 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
1206 false, false, false, 0);
1207 ArgOffset += StackSlotSize;
1210 InVals.push_back(ArgVal);
1212 Chain = ArgVal.getOperand(0);
1217 // FIXME: we should be able to query the argument registers from
1218 // tablegen generated code.
1219 static const unsigned ArgRegs[] = {
1220 SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
1221 SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
1222 SPU::R17, SPU::R18, SPU::R19, SPU::R20, SPU::R21, SPU::R22, SPU::R23,
1223 SPU::R24, SPU::R25, SPU::R26, SPU::R27, SPU::R28, SPU::R29, SPU::R30,
1224 SPU::R31, SPU::R32, SPU::R33, SPU::R34, SPU::R35, SPU::R36, SPU::R37,
1225 SPU::R38, SPU::R39, SPU::R40, SPU::R41, SPU::R42, SPU::R43, SPU::R44,
1226 SPU::R45, SPU::R46, SPU::R47, SPU::R48, SPU::R49, SPU::R50, SPU::R51,
1227 SPU::R52, SPU::R53, SPU::R54, SPU::R55, SPU::R56, SPU::R57, SPU::R58,
1228 SPU::R59, SPU::R60, SPU::R61, SPU::R62, SPU::R63, SPU::R64, SPU::R65,
1229 SPU::R66, SPU::R67, SPU::R68, SPU::R69, SPU::R70, SPU::R71, SPU::R72,
1230 SPU::R73, SPU::R74, SPU::R75, SPU::R76, SPU::R77, SPU::R78, SPU::R79
1232 // size of ArgRegs array
1233 unsigned NumArgRegs = 77;
1235 // We will spill (79-3)+1 registers to the stack
1236 SmallVector<SDValue, 79-3+1> MemOps;
1238 // Create the frame slot
1239 for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
1240 FuncInfo->setVarArgsFrameIndex(
1241 MFI->CreateFixedObject(StackSlotSize, ArgOffset, true));
1242 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1243 unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::VECREGRegClass);
1244 SDValue ArgVal = DAG.getRegister(VReg, MVT::v16i8);
1245 SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, MachinePointerInfo(),
1247 Chain = Store.getOperand(0);
1248 MemOps.push_back(Store);
1250 // Increment address by stack slot size for the next stored argument
1251 ArgOffset += StackSlotSize;
1253 if (!MemOps.empty())
1254 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1255 &MemOps[0], MemOps.size());
1261 /// isLSAAddress - Return the immediate to use if the specified
1262 /// value is representable as a LSA address.
1263 static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
1264 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1267 int Addr = C->getZExtValue();
1268 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1269 (Addr << 14 >> 14) != Addr)
1270 return 0; // Top 14 bits have to be sext of immediate.
1272 return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode();
1276 SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1277 CallingConv::ID CallConv, bool isVarArg,
1278 bool doesNotRet, bool &isTailCall,
1279 const SmallVectorImpl<ISD::OutputArg> &Outs,
1280 const SmallVectorImpl<SDValue> &OutVals,
1281 const SmallVectorImpl<ISD::InputArg> &Ins,
1282 DebugLoc dl, SelectionDAG &DAG,
1283 SmallVectorImpl<SDValue> &InVals) const {
1284 // CellSPU target does not yet support tail call optimization.
1287 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
1288 unsigned NumOps = Outs.size();
1289 unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
1291 SmallVector<CCValAssign, 16> ArgLocs;
1292 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1293 getTargetMachine(), ArgLocs, *DAG.getContext());
1294 // FIXME: allow for other calling conventions
1295 CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
1297 const unsigned NumArgRegs = ArgLocs.size();
1300 // Handy pointer type
1301 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1303 // Set up a copy of the stack pointer for use loading and storing any
1304 // arguments that may not fit in the registers available for argument
1306 SDValue StackPtr = DAG.getRegister(SPU::R1, MVT::i32);
1308 // Figure out which arguments are going to go in registers, and which in
1310 unsigned ArgOffset = SPUFrameLowering::minStackSize(); // Just below [LR]
1311 unsigned ArgRegIdx = 0;
1313 // Keep track of registers passing arguments
1314 std::vector<std::pair<unsigned, SDValue> > RegsToPass;
1315 // And the arguments passed on the stack
1316 SmallVector<SDValue, 8> MemOpChains;
1318 for (; ArgRegIdx != NumOps; ++ArgRegIdx) {
1319 SDValue Arg = OutVals[ArgRegIdx];
1320 CCValAssign &VA = ArgLocs[ArgRegIdx];
1322 // PtrOff will be used to store the current argument to the stack if a
1323 // register cannot be found for it.
1324 SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1325 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
1327 switch (Arg.getValueType().getSimpleVT().SimpleTy) {
1328 default: llvm_unreachable("Unexpected ValueType for argument!");
1342 if (ArgRegIdx != NumArgRegs) {
1343 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1345 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
1346 MachinePointerInfo(),
1348 ArgOffset += StackSlotSize;
1354 // Accumulate how many bytes are to be pushed on the stack, including the
1355 // linkage area, and parameter passing area. According to the SPU ABI,
1356 // we minimally need space for [LR] and [SP].
1357 unsigned NumStackBytes = ArgOffset - SPUFrameLowering::minStackSize();
1359 // Insert a call sequence start
1360 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumStackBytes,
1363 if (!MemOpChains.empty()) {
1364 // Adjust the stack pointer for the stack arguments.
1365 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1366 &MemOpChains[0], MemOpChains.size());
1369 // Build a sequence of copy-to-reg nodes chained together with token chain
1370 // and flag operands which copy the outgoing args into the appropriate regs.
1372 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1373 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1374 RegsToPass[i].second, InFlag);
1375 InFlag = Chain.getValue(1);
1378 SmallVector<SDValue, 8> Ops;
1379 unsigned CallOpc = SPUISD::CALL;
1381 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1382 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1383 // node so that legalize doesn't hack it.
1384 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1385 const GlobalValue *GV = G->getGlobal();
1386 EVT CalleeVT = Callee.getValueType();
1387 SDValue Zero = DAG.getConstant(0, PtrVT);
1388 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, CalleeVT);
1390 if (!ST->usingLargeMem()) {
1391 // Turn calls to targets that are defined (i.e., have bodies) into BRSL
1392 // style calls, otherwise, external symbols are BRASL calls. This assumes
1393 // that declared/defined symbols are in the same compilation unit and can
1394 // be reached through PC-relative jumps.
1397 // This may be an unsafe assumption for JIT and really large compilation
1399 if (GV->isDeclaration()) {
1400 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, GA, Zero);
1402 Callee = DAG.getNode(SPUISD::PCRelAddr, dl, CalleeVT, GA, Zero);
1405 // "Large memory" mode: Turn all calls into indirect calls with a X-form
1407 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, GA, Zero);
1409 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1410 EVT CalleeVT = Callee.getValueType();
1411 SDValue Zero = DAG.getConstant(0, PtrVT);
1412 SDValue ExtSym = DAG.getTargetExternalSymbol(S->getSymbol(),
1413 Callee.getValueType());
1415 if (!ST->usingLargeMem()) {
1416 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, ExtSym, Zero);
1418 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, ExtSym, Zero);
1420 } else if (SDNode *Dest = isLSAAddress(Callee, DAG)) {
1421 // If this is an absolute destination address that appears to be a legal
1422 // local store address, use the munged value.
1423 Callee = SDValue(Dest, 0);
1426 Ops.push_back(Chain);
1427 Ops.push_back(Callee);
1429 // Add argument registers to the end of the list so that they are known live
1431 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1432 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1433 RegsToPass[i].second.getValueType()));
1435 if (InFlag.getNode())
1436 Ops.push_back(InFlag);
1437 // Returns a chain and a flag for retval copy to use.
1438 Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Glue),
1439 &Ops[0], Ops.size());
1440 InFlag = Chain.getValue(1);
1442 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumStackBytes, true),
1443 DAG.getIntPtrConstant(0, true), InFlag);
1445 InFlag = Chain.getValue(1);
1447 // If the function returns void, just return the chain.
1451 // Now handle the return value(s)
1452 SmallVector<CCValAssign, 16> RVLocs;
1453 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1454 getTargetMachine(), RVLocs, *DAG.getContext());
1455 CCRetInfo.AnalyzeCallResult(Ins, CCC_SPU);
1458 // If the call has results, copy the values out of the ret val registers.
1459 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1460 CCValAssign VA = RVLocs[i];
1462 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1464 Chain = Val.getValue(1);
1465 InFlag = Val.getValue(2);
1466 InVals.push_back(Val);
1473 SPUTargetLowering::LowerReturn(SDValue Chain,
1474 CallingConv::ID CallConv, bool isVarArg,
1475 const SmallVectorImpl<ISD::OutputArg> &Outs,
1476 const SmallVectorImpl<SDValue> &OutVals,
1477 DebugLoc dl, SelectionDAG &DAG) const {
1479 SmallVector<CCValAssign, 16> RVLocs;
1480 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1481 getTargetMachine(), RVLocs, *DAG.getContext());
1482 CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
1484 // If this is the first return lowered for this function, add the regs to the
1485 // liveout set for the function.
1486 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1487 for (unsigned i = 0; i != RVLocs.size(); ++i)
1488 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1493 // Copy the result values into the output registers.
1494 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1495 CCValAssign &VA = RVLocs[i];
1496 assert(VA.isRegLoc() && "Can only return in registers!");
1497 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1499 Flag = Chain.getValue(1);
1503 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1505 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain);
1509 //===----------------------------------------------------------------------===//
1510 // Vector related lowering:
1511 //===----------------------------------------------------------------------===//
1513 static ConstantSDNode *
1514 getVecImm(SDNode *N) {
1515 SDValue OpVal(0, 0);
1517 // Check to see if this buildvec has a single non-undef value in its elements.
1518 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1519 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
1520 if (OpVal.getNode() == 0)
1521 OpVal = N->getOperand(i);
1522 else if (OpVal != N->getOperand(i))
1526 if (OpVal.getNode() != 0) {
1527 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1535 /// get_vec_i18imm - Test if this vector is a vector filled with the same value
1536 /// and the value fits into an unsigned 18-bit constant, and if so, return the
1538 SDValue SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
1540 if (ConstantSDNode *CN = getVecImm(N)) {
1541 uint64_t Value = CN->getZExtValue();
1542 if (ValueType == MVT::i64) {
1543 uint64_t UValue = CN->getZExtValue();
1544 uint32_t upper = uint32_t(UValue >> 32);
1545 uint32_t lower = uint32_t(UValue);
1548 Value = Value >> 32;
1550 if (Value <= 0x3ffff)
1551 return DAG.getTargetConstant(Value, ValueType);
1557 /// get_vec_i16imm - Test if this vector is a vector filled with the same value
1558 /// and the value fits into a signed 16-bit constant, and if so, return the
1560 SDValue SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
1562 if (ConstantSDNode *CN = getVecImm(N)) {
1563 int64_t Value = CN->getSExtValue();
1564 if (ValueType == MVT::i64) {
1565 uint64_t UValue = CN->getZExtValue();
1566 uint32_t upper = uint32_t(UValue >> 32);
1567 uint32_t lower = uint32_t(UValue);
1570 Value = Value >> 32;
1572 if (Value >= -(1 << 15) && Value <= ((1 << 15) - 1)) {
1573 return DAG.getTargetConstant(Value, ValueType);
1580 /// get_vec_i10imm - Test if this vector is a vector filled with the same value
1581 /// and the value fits into a signed 10-bit constant, and if so, return the
1583 SDValue SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
1585 if (ConstantSDNode *CN = getVecImm(N)) {
1586 int64_t Value = CN->getSExtValue();
1587 if (ValueType == MVT::i64) {
1588 uint64_t UValue = CN->getZExtValue();
1589 uint32_t upper = uint32_t(UValue >> 32);
1590 uint32_t lower = uint32_t(UValue);
1593 Value = Value >> 32;
1595 if (isInt<10>(Value))
1596 return DAG.getTargetConstant(Value, ValueType);
1602 /// get_vec_i8imm - Test if this vector is a vector filled with the same value
1603 /// and the value fits into a signed 8-bit constant, and if so, return the
1606 /// @note: The incoming vector is v16i8 because that's the only way we can load
1607 /// constant vectors. Thus, we test to see if the upper and lower bytes are the
1609 SDValue SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
1611 if (ConstantSDNode *CN = getVecImm(N)) {
1612 int Value = (int) CN->getZExtValue();
1613 if (ValueType == MVT::i16
1614 && Value <= 0xffff /* truncated from uint64_t */
1615 && ((short) Value >> 8) == ((short) Value & 0xff))
1616 return DAG.getTargetConstant(Value & 0xff, ValueType);
1617 else if (ValueType == MVT::i8
1618 && (Value & 0xff) == Value)
1619 return DAG.getTargetConstant(Value, ValueType);
1625 /// get_ILHUvec_imm - Test if this vector is a vector filled with the same value
1626 /// and the value fits into a signed 16-bit constant, and if so, return the
1628 SDValue SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
1630 if (ConstantSDNode *CN = getVecImm(N)) {
1631 uint64_t Value = CN->getZExtValue();
1632 if ((ValueType == MVT::i32
1633 && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
1634 || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
1635 return DAG.getTargetConstant(Value >> 16, ValueType);
1641 /// get_v4i32_imm - Catch-all for general 32-bit constant vectors
1642 SDValue SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) {
1643 if (ConstantSDNode *CN = getVecImm(N)) {
1644 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i32);
1650 /// get_v4i32_imm - Catch-all for general 64-bit constant vectors
1651 SDValue SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) {
1652 if (ConstantSDNode *CN = getVecImm(N)) {
1653 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i64);
1659 //! Lower a BUILD_VECTOR instruction creatively:
1661 LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
1662 EVT VT = Op.getValueType();
1663 EVT EltVT = VT.getVectorElementType();
1664 DebugLoc dl = Op.getDebugLoc();
1665 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(Op.getNode());
1666 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerBUILD_VECTOR");
1667 unsigned minSplatBits = EltVT.getSizeInBits();
1669 if (minSplatBits < 16)
1672 APInt APSplatBits, APSplatUndef;
1673 unsigned SplatBitSize;
1676 if (!BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
1677 HasAnyUndefs, minSplatBits)
1678 || minSplatBits < SplatBitSize)
1679 return SDValue(); // Wasn't a constant vector or splat exceeded min
1681 uint64_t SplatBits = APSplatBits.getZExtValue();
1683 switch (VT.getSimpleVT().SimpleTy) {
1685 report_fatal_error("CellSPU: Unhandled VT in LowerBUILD_VECTOR, VT = " +
1686 Twine(VT.getEVTString()));
1689 uint32_t Value32 = uint32_t(SplatBits);
1690 assert(SplatBitSize == 32
1691 && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
1692 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1693 SDValue T = DAG.getConstant(Value32, MVT::i32);
1694 return DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,
1695 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
1698 uint64_t f64val = uint64_t(SplatBits);
1699 assert(SplatBitSize == 64
1700 && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
1701 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1702 SDValue T = DAG.getConstant(f64val, MVT::i64);
1703 return DAG.getNode(ISD::BITCAST, dl, MVT::v2f64,
1704 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
1707 // 8-bit constants have to be expanded to 16-bits
1708 unsigned short Value16 = SplatBits /* | (SplatBits << 8) */;
1709 SmallVector<SDValue, 8> Ops;
1711 Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
1712 return DAG.getNode(ISD::BITCAST, dl, VT,
1713 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
1716 unsigned short Value16 = SplatBits;
1717 SDValue T = DAG.getConstant(Value16, EltVT);
1718 SmallVector<SDValue, 8> Ops;
1721 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
1724 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1725 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T);
1728 return SPU::LowerV2I64Splat(VT, DAG, SplatBits, dl);
1736 SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
1738 uint32_t upper = uint32_t(SplatVal >> 32);
1739 uint32_t lower = uint32_t(SplatVal);
1741 if (upper == lower) {
1742 // Magic constant that can be matched by IL, ILA, et. al.
1743 SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
1744 return DAG.getNode(ISD::BITCAST, dl, OpVT,
1745 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1746 Val, Val, Val, Val));
1748 bool upper_special, lower_special;
1750 // NOTE: This code creates common-case shuffle masks that can be easily
1751 // detected as common expressions. It is not attempting to create highly
1752 // specialized masks to replace any and all 0's, 0xff's and 0x80's.
1754 // Detect if the upper or lower half is a special shuffle mask pattern:
1755 upper_special = (upper == 0 || upper == 0xffffffff || upper == 0x80000000);
1756 lower_special = (lower == 0 || lower == 0xffffffff || lower == 0x80000000);
1758 // Both upper and lower are special, lower to a constant pool load:
1759 if (lower_special && upper_special) {
1760 SDValue UpperVal = DAG.getConstant(upper, MVT::i32);
1761 SDValue LowerVal = DAG.getConstant(lower, MVT::i32);
1762 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1763 UpperVal, LowerVal, UpperVal, LowerVal);
1764 return DAG.getNode(ISD::BITCAST, dl, OpVT, BV);
1769 SmallVector<SDValue, 16> ShufBytes;
1772 // Create lower vector if not a special pattern
1773 if (!lower_special) {
1774 SDValue LO32C = DAG.getConstant(lower, MVT::i32);
1775 LO32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
1776 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1777 LO32C, LO32C, LO32C, LO32C));
1780 // Create upper vector if not a special pattern
1781 if (!upper_special) {
1782 SDValue HI32C = DAG.getConstant(upper, MVT::i32);
1783 HI32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
1784 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1785 HI32C, HI32C, HI32C, HI32C));
1788 // If either upper or lower are special, then the two input operands are
1789 // the same (basically, one of them is a "don't care")
1795 for (int i = 0; i < 4; ++i) {
1797 for (int j = 0; j < 4; ++j) {
1799 bool process_upper, process_lower;
1801 process_upper = (upper_special && (i & 1) == 0);
1802 process_lower = (lower_special && (i & 1) == 1);
1804 if (process_upper || process_lower) {
1805 if ((process_upper && upper == 0)
1806 || (process_lower && lower == 0))
1808 else if ((process_upper && upper == 0xffffffff)
1809 || (process_lower && lower == 0xffffffff))
1811 else if ((process_upper && upper == 0x80000000)
1812 || (process_lower && lower == 0x80000000))
1813 val |= (j == 0 ? 0xe0 : 0x80);
1815 val |= i * 4 + j + ((i & 1) * 16);
1818 ShufBytes.push_back(DAG.getConstant(val, MVT::i32));
1821 return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32,
1822 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1823 &ShufBytes[0], ShufBytes.size()));
1827 /// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3) to something on
1828 /// which the Cell can operate. The code inspects V3 to ascertain whether the
1829 /// permutation vector, V3, is monotonically increasing with one "exception"
1830 /// element, e.g., (0, 1, _, 3). If this is the case, then generate a
1831 /// SHUFFLE_MASK synthetic instruction. Otherwise, spill V3 to the constant pool.
1832 /// In either case, the net result is going to eventually invoke SHUFB to
1833 /// permute/shuffle the bytes from V1 and V2.
1835 /// SHUFFLE_MASK is eventually selected as one of the C*D instructions, generate
1836 /// control word for byte/halfword/word insertion. This takes care of a single
1837 /// element move from V2 into V1.
1839 /// SPUISD::SHUFB is eventually selected as Cell's <i>shufb</i> instructions.
1840 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
1841 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
1842 SDValue V1 = Op.getOperand(0);
1843 SDValue V2 = Op.getOperand(1);
1844 DebugLoc dl = Op.getDebugLoc();
1846 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1848 // If we have a single element being moved from V1 to V2, this can be handled
1849 // using the C*[DX] compute mask instructions, but the vector elements have
1850 // to be monotonically increasing with one exception element, and the source
1851 // slot of the element to move must be the same as the destination.
1852 EVT VecVT = V1.getValueType();
1853 EVT EltVT = VecVT.getVectorElementType();
1854 unsigned EltsFromV2 = 0;
1855 unsigned V2EltOffset = 0;
1856 unsigned V2EltIdx0 = 0;
1857 unsigned CurrElt = 0;
1858 unsigned MaxElts = VecVT.getVectorNumElements();
1859 unsigned PrevElt = 0;
1860 bool monotonic = true;
1863 EVT maskVT; // which of the c?d instructions to use
1865 if (EltVT == MVT::i8) {
1867 maskVT = MVT::v16i8;
1868 } else if (EltVT == MVT::i16) {
1870 maskVT = MVT::v8i16;
1871 } else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
1873 maskVT = MVT::v4i32;
1874 } else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
1876 maskVT = MVT::v2i64;
1878 llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
1880 for (unsigned i = 0; i != MaxElts; ++i) {
1881 if (SVN->getMaskElt(i) < 0)
1884 unsigned SrcElt = SVN->getMaskElt(i);
1887 if (SrcElt >= V2EltIdx0) {
1888 // TODO: optimize for the monotonic case when several consecutive
1889 // elements are taken form V2. Do we ever get such a case?
1890 if (EltsFromV2 == 0 && CurrElt == (SrcElt - V2EltIdx0))
1891 V2EltOffset = (SrcElt - V2EltIdx0) * (EltVT.getSizeInBits()/8);
1895 } else if (CurrElt != SrcElt) {
1903 if (PrevElt > 0 && SrcElt < MaxElts) {
1904 if ((PrevElt == SrcElt - 1)
1905 || (PrevElt == MaxElts - 1 && SrcElt == 0)) {
1910 } else if (i == 0 || (PrevElt==0 && SrcElt==1)) {
1911 // First time or after a "wrap around"
1915 // This isn't a rotation, takes elements from vector 2
1921 if (EltsFromV2 == 1 && monotonic) {
1922 // Compute mask and shuffle
1923 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1925 // As SHUFFLE_MASK becomes a c?d instruction, feed it an address
1926 // R1 ($sp) is used here only as it is guaranteed to have last bits zero
1927 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
1928 DAG.getRegister(SPU::R1, PtrVT),
1929 DAG.getConstant(V2EltOffset, MVT::i32));
1930 SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
1933 // Use shuffle mask in SHUFB synthetic instruction:
1934 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
1936 } else if (rotate) {
1939 rotamt *= EltVT.getSizeInBits()/8;
1940 return DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, V1.getValueType(),
1941 V1, DAG.getConstant(rotamt, MVT::i16));
1943 // Convert the SHUFFLE_VECTOR mask's input element units to the
1945 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
1947 SmallVector<SDValue, 16> ResultMask;
1948 for (unsigned i = 0, e = MaxElts; i != e; ++i) {
1949 unsigned SrcElt = SVN->getMaskElt(i) < 0 ? 0 : SVN->getMaskElt(i);
1951 for (unsigned j = 0; j < BytesPerElement; ++j)
1952 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,MVT::i8));
1954 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
1955 &ResultMask[0], ResultMask.size());
1956 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask);
1960 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
1961 SDValue Op0 = Op.getOperand(0); // Op0 = the scalar
1962 DebugLoc dl = Op.getDebugLoc();
1964 if (Op0.getNode()->getOpcode() == ISD::Constant) {
1965 // For a constant, build the appropriate constant vector, which will
1966 // eventually simplify to a vector register load.
1968 ConstantSDNode *CN = cast<ConstantSDNode>(Op0.getNode());
1969 SmallVector<SDValue, 16> ConstVecValues;
1973 // Create a constant vector:
1974 switch (Op.getValueType().getSimpleVT().SimpleTy) {
1975 default: llvm_unreachable("Unexpected constant value type in "
1976 "LowerSCALAR_TO_VECTOR");
1977 case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
1978 case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
1979 case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
1980 case MVT::v4f32: n_copies = 4; VT = MVT::f32; break;
1981 case MVT::v2i64: n_copies = 2; VT = MVT::i64; break;
1982 case MVT::v2f64: n_copies = 2; VT = MVT::f64; break;
1985 SDValue CValue = DAG.getConstant(CN->getZExtValue(), VT);
1986 for (size_t j = 0; j < n_copies; ++j)
1987 ConstVecValues.push_back(CValue);
1989 return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(),
1990 &ConstVecValues[0], ConstVecValues.size());
1992 // Otherwise, copy the value from one register to another:
1993 switch (Op0.getValueType().getSimpleVT().SimpleTy) {
1994 default: llvm_unreachable("Unexpected value type in LowerSCALAR_TO_VECTOR");
2001 return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0);
2006 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2007 EVT VT = Op.getValueType();
2008 SDValue N = Op.getOperand(0);
2009 SDValue Elt = Op.getOperand(1);
2010 DebugLoc dl = Op.getDebugLoc();
2013 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
2014 // Constant argument:
2015 int EltNo = (int) C->getZExtValue();
2018 if (VT == MVT::i8 && EltNo >= 16)
2019 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i8 extraction slot > 15");
2020 else if (VT == MVT::i16 && EltNo >= 8)
2021 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i16 extraction slot > 7");
2022 else if (VT == MVT::i32 && EltNo >= 4)
2023 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i32 extraction slot > 4");
2024 else if (VT == MVT::i64 && EltNo >= 2)
2025 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i64 extraction slot > 2");
2027 if (EltNo == 0 && (VT == MVT::i32 || VT == MVT::i64)) {
2028 // i32 and i64: Element 0 is the preferred slot
2029 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, N);
2032 // Need to generate shuffle mask and extract:
2033 int prefslot_begin = -1, prefslot_end = -1;
2034 int elt_byte = EltNo * VT.getSizeInBits() / 8;
2036 switch (VT.getSimpleVT().SimpleTy) {
2037 default: llvm_unreachable("Invalid value type!");
2039 prefslot_begin = prefslot_end = 3;
2043 prefslot_begin = 2; prefslot_end = 3;
2048 prefslot_begin = 0; prefslot_end = 3;
2053 prefslot_begin = 0; prefslot_end = 7;
2058 assert(prefslot_begin != -1 && prefslot_end != -1 &&
2059 "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
2061 unsigned int ShufBytes[16] = {
2062 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2064 for (int i = 0; i < 16; ++i) {
2065 // zero fill uppper part of preferred slot, don't care about the
2067 unsigned int mask_val;
2068 if (i <= prefslot_end) {
2070 ((i < prefslot_begin)
2072 : elt_byte + (i - prefslot_begin));
2074 ShufBytes[i] = mask_val;
2076 ShufBytes[i] = ShufBytes[i % (prefslot_end + 1)];
2079 SDValue ShufMask[4];
2080 for (unsigned i = 0; i < sizeof(ShufMask)/sizeof(ShufMask[0]); ++i) {
2081 unsigned bidx = i * 4;
2082 unsigned int bits = ((ShufBytes[bidx] << 24) |
2083 (ShufBytes[bidx+1] << 16) |
2084 (ShufBytes[bidx+2] << 8) |
2086 ShufMask[i] = DAG.getConstant(bits, MVT::i32);
2089 SDValue ShufMaskVec =
2090 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2091 &ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0]));
2093 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2094 DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(),
2095 N, N, ShufMaskVec));
2097 // Variable index: Rotate the requested element into slot 0, then replicate
2098 // slot 0 across the vector
2099 EVT VecVT = N.getValueType();
2100 if (!VecVT.isSimple() || !VecVT.isVector()) {
2101 report_fatal_error("LowerEXTRACT_VECTOR_ELT: Must have a simple, 128-bit"
2105 // Make life easier by making sure the index is zero-extended to i32
2106 if (Elt.getValueType() != MVT::i32)
2107 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Elt);
2109 // Scale the index to a bit/byte shift quantity
2111 APInt(32, uint64_t(16 / N.getValueType().getVectorNumElements()), false);
2112 unsigned scaleShift = scaleFactor.logBase2();
2115 if (scaleShift > 0) {
2116 // Scale the shift factor:
2117 Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
2118 DAG.getConstant(scaleShift, MVT::i32));
2121 vecShift = DAG.getNode(SPUISD::SHL_BYTES, dl, VecVT, N, Elt);
2123 // Replicate the bytes starting at byte 0 across the entire vector (for
2124 // consistency with the notion of a unified register set)
2127 switch (VT.getSimpleVT().SimpleTy) {
2129 report_fatal_error("LowerEXTRACT_VECTOR_ELT(varable): Unhandled vector"
2133 SDValue factor = DAG.getConstant(0x00000000, MVT::i32);
2134 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2135 factor, factor, factor, factor);
2139 SDValue factor = DAG.getConstant(0x00010001, MVT::i32);
2140 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2141 factor, factor, factor, factor);
2146 SDValue factor = DAG.getConstant(0x00010203, MVT::i32);
2147 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2148 factor, factor, factor, factor);
2153 SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32);
2154 SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32);
2155 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2156 loFactor, hiFactor, loFactor, hiFactor);
2161 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2162 DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2163 vecShift, vecShift, replicate));
2169 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2170 SDValue VecOp = Op.getOperand(0);
2171 SDValue ValOp = Op.getOperand(1);
2172 SDValue IdxOp = Op.getOperand(2);
2173 DebugLoc dl = Op.getDebugLoc();
2174 EVT VT = Op.getValueType();
2175 EVT eltVT = ValOp.getValueType();
2177 // use 0 when the lane to insert to is 'undef'
2179 if (IdxOp.getOpcode() != ISD::UNDEF) {
2180 ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
2181 assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
2182 Offset = (CN->getSExtValue()) * eltVT.getSizeInBits()/8;
2185 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2186 // Use $sp ($1) because it's always 16-byte aligned and it's available:
2187 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
2188 DAG.getRegister(SPU::R1, PtrVT),
2189 DAG.getConstant(Offset, PtrVT));
2190 // widen the mask when dealing with half vectors
2191 EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
2192 128/ VT.getVectorElementType().getSizeInBits());
2193 SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, maskVT, Pointer);
2196 DAG.getNode(SPUISD::SHUFB, dl, VT,
2197 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
2199 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ShufMask));
2204 static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
2205 const TargetLowering &TLI)
2207 SDValue N0 = Op.getOperand(0); // Everything has at least one operand
2208 DebugLoc dl = Op.getDebugLoc();
2209 EVT ShiftVT = TLI.getShiftAmountTy(N0.getValueType());
2211 assert(Op.getValueType() == MVT::i8);
2214 llvm_unreachable("Unhandled i8 math operator");
2216 // 8-bit addition: Promote the arguments up to 16-bits and truncate
2218 SDValue N1 = Op.getOperand(1);
2219 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2220 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2221 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2222 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2227 // 8-bit subtraction: Promote the arguments up to 16-bits and truncate
2229 SDValue N1 = Op.getOperand(1);
2230 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2231 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2232 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2233 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2237 SDValue N1 = Op.getOperand(1);
2238 EVT N1VT = N1.getValueType();
2240 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2241 if (!N1VT.bitsEq(ShiftVT)) {
2242 unsigned N1Opc = N1.getValueType().bitsLT(ShiftVT)
2245 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2248 // Replicate lower 8-bits into upper 8:
2250 DAG.getNode(ISD::OR, dl, MVT::i16, N0,
2251 DAG.getNode(ISD::SHL, dl, MVT::i16,
2252 N0, DAG.getConstant(8, MVT::i32)));
2254 // Truncate back down to i8
2255 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2256 DAG.getNode(Opc, dl, MVT::i16, ExpandArg, N1));
2260 SDValue N1 = Op.getOperand(1);
2261 EVT N1VT = N1.getValueType();
2263 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2264 if (!N1VT.bitsEq(ShiftVT)) {
2265 unsigned N1Opc = ISD::ZERO_EXTEND;
2267 if (N1.getValueType().bitsGT(ShiftVT))
2268 N1Opc = ISD::TRUNCATE;
2270 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2273 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2274 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2277 SDValue N1 = Op.getOperand(1);
2278 EVT N1VT = N1.getValueType();
2280 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2281 if (!N1VT.bitsEq(ShiftVT)) {
2282 unsigned N1Opc = ISD::SIGN_EXTEND;
2284 if (N1VT.bitsGT(ShiftVT))
2285 N1Opc = ISD::TRUNCATE;
2286 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2289 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2290 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2293 SDValue N1 = Op.getOperand(1);
2295 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2296 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2297 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2298 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2303 //! Lower byte immediate operations for v16i8 vectors:
2305 LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
2308 EVT VT = Op.getValueType();
2309 DebugLoc dl = Op.getDebugLoc();
2311 ConstVec = Op.getOperand(0);
2312 Arg = Op.getOperand(1);
2313 if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
2314 if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
2315 ConstVec = ConstVec.getOperand(0);
2317 ConstVec = Op.getOperand(1);
2318 Arg = Op.getOperand(0);
2319 if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
2320 ConstVec = ConstVec.getOperand(0);
2325 if (ConstVec.getNode()->getOpcode() == ISD::BUILD_VECTOR) {
2326 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(ConstVec.getNode());
2327 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerByteImmed");
2329 APInt APSplatBits, APSplatUndef;
2330 unsigned SplatBitSize;
2332 unsigned minSplatBits = VT.getVectorElementType().getSizeInBits();
2334 if (BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
2335 HasAnyUndefs, minSplatBits)
2336 && minSplatBits <= SplatBitSize) {
2337 uint64_t SplatBits = APSplatBits.getZExtValue();
2338 SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
2340 SmallVector<SDValue, 16> tcVec;
2341 tcVec.assign(16, tc);
2342 return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg,
2343 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &tcVec[0], tcVec.size()));
2347 // These operations (AND, OR, XOR) are legal, they just couldn't be custom
2348 // lowered. Return the operation, rather than a null SDValue.
2352 //! Custom lowering for CTPOP (count population)
2354 Custom lowering code that counts the number ones in the input
2355 operand. SPU has such an instruction, but it counts the number of
2356 ones per byte, which then have to be accumulated.
2358 static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
2359 EVT VT = Op.getValueType();
2360 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
2361 VT, (128 / VT.getSizeInBits()));
2362 DebugLoc dl = Op.getDebugLoc();
2364 switch (VT.getSimpleVT().SimpleTy) {
2365 default: llvm_unreachable("Invalid value type!");
2367 SDValue N = Op.getOperand(0);
2368 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2370 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2371 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2373 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, CNTB, Elt0);
2377 MachineFunction &MF = DAG.getMachineFunction();
2378 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2380 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R16CRegClass);
2382 SDValue N = Op.getOperand(0);
2383 SDValue Elt0 = DAG.getConstant(0, MVT::i16);
2384 SDValue Mask0 = DAG.getConstant(0x0f, MVT::i16);
2385 SDValue Shift1 = DAG.getConstant(8, MVT::i32);
2387 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2388 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2390 // CNTB_result becomes the chain to which all of the virtual registers
2391 // CNTB_reg, SUM1_reg become associated:
2392 SDValue CNTB_result =
2393 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, CNTB, Elt0);
2395 SDValue CNTB_rescopy =
2396 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2398 SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i16);
2400 return DAG.getNode(ISD::AND, dl, MVT::i16,
2401 DAG.getNode(ISD::ADD, dl, MVT::i16,
2402 DAG.getNode(ISD::SRL, dl, MVT::i16,
2409 MachineFunction &MF = DAG.getMachineFunction();
2410 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2412 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2413 unsigned SUM1_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2415 SDValue N = Op.getOperand(0);
2416 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2417 SDValue Mask0 = DAG.getConstant(0xff, MVT::i32);
2418 SDValue Shift1 = DAG.getConstant(16, MVT::i32);
2419 SDValue Shift2 = DAG.getConstant(8, MVT::i32);
2421 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2422 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2424 // CNTB_result becomes the chain to which all of the virtual registers
2425 // CNTB_reg, SUM1_reg become associated:
2426 SDValue CNTB_result =
2427 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, CNTB, Elt0);
2429 SDValue CNTB_rescopy =
2430 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2433 DAG.getNode(ISD::SRL, dl, MVT::i32,
2434 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32),
2438 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp1,
2439 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32));
2441 SDValue Sum1_rescopy =
2442 DAG.getCopyToReg(CNTB_result, dl, SUM1_reg, Sum1);
2445 DAG.getNode(ISD::SRL, dl, MVT::i32,
2446 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32),
2449 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp2,
2450 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32));
2452 return DAG.getNode(ISD::AND, dl, MVT::i32, Sum2, Mask0);
2462 //! Lower ISD::FP_TO_SINT, ISD::FP_TO_UINT for i32
2464 f32->i32 passes through unchanged, whereas f64->i32 expands to a libcall.
2465 All conversions to i64 are expanded to a libcall.
2467 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2468 const SPUTargetLowering &TLI) {
2469 EVT OpVT = Op.getValueType();
2470 SDValue Op0 = Op.getOperand(0);
2471 EVT Op0VT = Op0.getValueType();
2473 if ((OpVT == MVT::i32 && Op0VT == MVT::f64)
2474 || OpVT == MVT::i64) {
2475 // Convert f32 / f64 to i32 / i64 via libcall.
2477 (Op.getOpcode() == ISD::FP_TO_SINT)
2478 ? RTLIB::getFPTOSINT(Op0VT, OpVT)
2479 : RTLIB::getFPTOUINT(Op0VT, OpVT);
2480 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd fp-to-int conversion!");
2482 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2488 //! Lower ISD::SINT_TO_FP, ISD::UINT_TO_FP for i32
2490 i32->f32 passes through unchanged, whereas i32->f64 is expanded to a libcall.
2491 All conversions from i64 are expanded to a libcall.
2493 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2494 const SPUTargetLowering &TLI) {
2495 EVT OpVT = Op.getValueType();
2496 SDValue Op0 = Op.getOperand(0);
2497 EVT Op0VT = Op0.getValueType();
2499 if ((OpVT == MVT::f64 && Op0VT == MVT::i32)
2500 || Op0VT == MVT::i64) {
2501 // Convert i32, i64 to f64 via libcall:
2503 (Op.getOpcode() == ISD::SINT_TO_FP)
2504 ? RTLIB::getSINTTOFP(Op0VT, OpVT)
2505 : RTLIB::getUINTTOFP(Op0VT, OpVT);
2506 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd int-to-fp conversion!");
2508 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2514 //! Lower ISD::SETCC
2516 This handles MVT::f64 (double floating point) condition lowering
2518 static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
2519 const TargetLowering &TLI) {
2520 CondCodeSDNode *CC = dyn_cast<CondCodeSDNode>(Op.getOperand(2));
2521 DebugLoc dl = Op.getDebugLoc();
2522 assert(CC != 0 && "LowerSETCC: CondCodeSDNode should not be null here!\n");
2524 SDValue lhs = Op.getOperand(0);
2525 SDValue rhs = Op.getOperand(1);
2526 EVT lhsVT = lhs.getValueType();
2527 assert(lhsVT == MVT::f64 && "LowerSETCC: type other than MVT::64\n");
2529 EVT ccResultVT = TLI.getSetCCResultType(lhs.getValueType());
2530 APInt ccResultOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2531 EVT IntVT(MVT::i64);
2533 // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
2534 // selected to a NOP:
2535 SDValue i64lhs = DAG.getNode(ISD::BITCAST, dl, IntVT, lhs);
2537 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2538 DAG.getNode(ISD::SRL, dl, IntVT,
2539 i64lhs, DAG.getConstant(32, MVT::i32)));
2540 SDValue lhsHi32abs =
2541 DAG.getNode(ISD::AND, dl, MVT::i32,
2542 lhsHi32, DAG.getConstant(0x7fffffff, MVT::i32));
2544 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, i64lhs);
2546 // SETO and SETUO only use the lhs operand:
2547 if (CC->get() == ISD::SETO) {
2548 // Evaluates to true if Op0 is not [SQ]NaN - lowers to the inverse of
2550 APInt ccResultAllOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2551 return DAG.getNode(ISD::XOR, dl, ccResultVT,
2552 DAG.getSetCC(dl, ccResultVT,
2553 lhs, DAG.getConstantFP(0.0, lhsVT),
2555 DAG.getConstant(ccResultAllOnes, ccResultVT));
2556 } else if (CC->get() == ISD::SETUO) {
2557 // Evaluates to true if Op0 is [SQ]NaN
2558 return DAG.getNode(ISD::AND, dl, ccResultVT,
2559 DAG.getSetCC(dl, ccResultVT,
2561 DAG.getConstant(0x7ff00000, MVT::i32),
2563 DAG.getSetCC(dl, ccResultVT,
2565 DAG.getConstant(0, MVT::i32),
2569 SDValue i64rhs = DAG.getNode(ISD::BITCAST, dl, IntVT, rhs);
2571 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2572 DAG.getNode(ISD::SRL, dl, IntVT,
2573 i64rhs, DAG.getConstant(32, MVT::i32)));
2575 // If a value is negative, subtract from the sign magnitude constant:
2576 SDValue signMag2TC = DAG.getConstant(0x8000000000000000ULL, IntVT);
2578 // Convert the sign-magnitude representation into 2's complement:
2579 SDValue lhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2580 lhsHi32, DAG.getConstant(31, MVT::i32));
2581 SDValue lhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64lhs);
2583 DAG.getNode(ISD::SELECT, dl, IntVT,
2584 lhsSelectMask, lhsSignMag2TC, i64lhs);
2586 SDValue rhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2587 rhsHi32, DAG.getConstant(31, MVT::i32));
2588 SDValue rhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64rhs);
2590 DAG.getNode(ISD::SELECT, dl, IntVT,
2591 rhsSelectMask, rhsSignMag2TC, i64rhs);
2595 switch (CC->get()) {
2598 compareOp = ISD::SETEQ; break;
2601 compareOp = ISD::SETGT; break;
2604 compareOp = ISD::SETGE; break;
2607 compareOp = ISD::SETLT; break;
2610 compareOp = ISD::SETLE; break;
2613 compareOp = ISD::SETNE; break;
2615 report_fatal_error("CellSPU ISel Select: unimplemented f64 condition");
2619 DAG.getSetCC(dl, ccResultVT, lhsSelect, rhsSelect,
2620 (ISD::CondCode) compareOp);
2622 if ((CC->get() & 0x8) == 0) {
2623 // Ordered comparison:
2624 SDValue lhsNaN = DAG.getSetCC(dl, ccResultVT,
2625 lhs, DAG.getConstantFP(0.0, MVT::f64),
2627 SDValue rhsNaN = DAG.getSetCC(dl, ccResultVT,
2628 rhs, DAG.getConstantFP(0.0, MVT::f64),
2630 SDValue ordered = DAG.getNode(ISD::AND, dl, ccResultVT, lhsNaN, rhsNaN);
2632 result = DAG.getNode(ISD::AND, dl, ccResultVT, ordered, result);
2638 //! Lower ISD::SELECT_CC
2640 ISD::SELECT_CC can (generally) be implemented directly on the SPU using the
2643 \note Need to revisit this in the future: if the code path through the true
2644 and false value computations is longer than the latency of a branch (6
2645 cycles), then it would be more advantageous to branch and insert a new basic
2646 block and branch on the condition. However, this code does not make that
2647 assumption, given the simplisitc uses so far.
2650 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2651 const TargetLowering &TLI) {
2652 EVT VT = Op.getValueType();
2653 SDValue lhs = Op.getOperand(0);
2654 SDValue rhs = Op.getOperand(1);
2655 SDValue trueval = Op.getOperand(2);
2656 SDValue falseval = Op.getOperand(3);
2657 SDValue condition = Op.getOperand(4);
2658 DebugLoc dl = Op.getDebugLoc();
2660 // NOTE: SELB's arguments: $rA, $rB, $mask
2662 // SELB selects bits from $rA where bits in $mask are 0, bits from $rB
2663 // where bits in $mask are 1. CCond will be inverted, having 1s where the
2664 // condition was true and 0s where the condition was false. Hence, the
2665 // arguments to SELB get reversed.
2667 // Note: Really should be ISD::SELECT instead of SPUISD::SELB, but LLVM's
2668 // legalizer insists on combining SETCC/SELECT into SELECT_CC, so we end up
2669 // with another "cannot select select_cc" assert:
2671 SDValue compare = DAG.getNode(ISD::SETCC, dl,
2672 TLI.getSetCCResultType(Op.getValueType()),
2673 lhs, rhs, condition);
2674 return DAG.getNode(SPUISD::SELB, dl, VT, falseval, trueval, compare);
2677 //! Custom lower ISD::TRUNCATE
2678 static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
2680 // Type to truncate to
2681 EVT VT = Op.getValueType();
2682 MVT simpleVT = VT.getSimpleVT();
2683 EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
2684 VT, (128 / VT.getSizeInBits()));
2685 DebugLoc dl = Op.getDebugLoc();
2687 // Type to truncate from
2688 SDValue Op0 = Op.getOperand(0);
2689 EVT Op0VT = Op0.getValueType();
2691 if (Op0VT == MVT::i128 && simpleVT == MVT::i64) {
2692 // Create shuffle mask, least significant doubleword of quadword
2693 unsigned maskHigh = 0x08090a0b;
2694 unsigned maskLow = 0x0c0d0e0f;
2695 // Use a shuffle to perform the truncation
2696 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2697 DAG.getConstant(maskHigh, MVT::i32),
2698 DAG.getConstant(maskLow, MVT::i32),
2699 DAG.getConstant(maskHigh, MVT::i32),
2700 DAG.getConstant(maskLow, MVT::i32));
2702 SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2703 Op0, Op0, shufMask);
2705 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, truncShuffle);
2708 return SDValue(); // Leave the truncate unmolested
2712 * Emit the instruction sequence for i64/i32 -> i128 sign extend. The basic
2713 * algorithm is to duplicate the sign bit using rotmai to generate at
2714 * least one byte full of sign bits. Then propagate the "sign-byte" into
2715 * the leftmost words and the i64/i32 into the rightmost words using shufb.
2717 * @param Op The sext operand
2718 * @param DAG The current DAG
2719 * @return The SDValue with the entire instruction sequence
2721 static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
2723 DebugLoc dl = Op.getDebugLoc();
2725 // Type to extend to
2726 MVT OpVT = Op.getValueType().getSimpleVT();
2728 // Type to extend from
2729 SDValue Op0 = Op.getOperand(0);
2730 MVT Op0VT = Op0.getValueType().getSimpleVT();
2732 // extend i8 & i16 via i32
2733 if (Op0VT == MVT::i8 || Op0VT == MVT::i16) {
2734 Op0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Op0);
2738 // The type to extend to needs to be a i128 and
2739 // the type to extend from needs to be i64 or i32.
2740 assert((OpVT == MVT::i128 && (Op0VT == MVT::i64 || Op0VT == MVT::i32)) &&
2741 "LowerSIGN_EXTEND: input and/or output operand have wrong size");
2744 // Create shuffle mask
2745 unsigned mask1 = 0x10101010; // byte 0 - 3 and 4 - 7
2746 unsigned mask2 = Op0VT == MVT::i64 ? 0x00010203 : 0x10101010; // byte 8 - 11
2747 unsigned mask3 = Op0VT == MVT::i64 ? 0x04050607 : 0x00010203; // byte 12 - 15
2748 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2749 DAG.getConstant(mask1, MVT::i32),
2750 DAG.getConstant(mask1, MVT::i32),
2751 DAG.getConstant(mask2, MVT::i32),
2752 DAG.getConstant(mask3, MVT::i32));
2754 // Word wise arithmetic right shift to generate at least one byte
2755 // that contains sign bits.
2756 MVT mvt = Op0VT == MVT::i64 ? MVT::v2i64 : MVT::v4i32;
2757 SDValue sraVal = DAG.getNode(ISD::SRA,
2760 DAG.getNode(SPUISD::PREFSLOT2VEC, dl, mvt, Op0, Op0),
2761 DAG.getConstant(31, MVT::i32));
2763 // reinterpret as a i128 (SHUFB requires it). This gets lowered away.
2764 SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
2766 DAG.getTargetConstant(
2767 SPU::GPRCRegClass.getID(),
2769 // Shuffle bytes - Copy the sign bits into the upper 64 bits
2770 // and the input value into the lower 64 bits.
2771 SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
2772 extended, sraVal, shufMask);
2773 return DAG.getNode(ISD::BITCAST, dl, MVT::i128, extShuffle);
2776 //! Custom (target-specific) lowering entry point
2778 This is where LLVM's DAG selection process calls to do target-specific
2782 SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
2784 unsigned Opc = (unsigned) Op.getOpcode();
2785 EVT VT = Op.getValueType();
2790 errs() << "SPUTargetLowering::LowerOperation(): need to lower this!\n";
2791 errs() << "Op.getOpcode() = " << Opc << "\n";
2792 errs() << "*Op.getNode():\n";
2793 Op.getNode()->dump();
2795 llvm_unreachable(0);
2801 return LowerLOAD(Op, DAG, SPUTM.getSubtargetImpl());
2803 return LowerSTORE(Op, DAG, SPUTM.getSubtargetImpl());
2804 case ISD::ConstantPool:
2805 return LowerConstantPool(Op, DAG, SPUTM.getSubtargetImpl());
2806 case ISD::GlobalAddress:
2807 return LowerGlobalAddress(Op, DAG, SPUTM.getSubtargetImpl());
2808 case ISD::JumpTable:
2809 return LowerJumpTable(Op, DAG, SPUTM.getSubtargetImpl());
2810 case ISD::ConstantFP:
2811 return LowerConstantFP(Op, DAG);
2813 // i8, i64 math ops:
2822 return LowerI8Math(Op, DAG, Opc, *this);
2826 case ISD::FP_TO_SINT:
2827 case ISD::FP_TO_UINT:
2828 return LowerFP_TO_INT(Op, DAG, *this);
2830 case ISD::SINT_TO_FP:
2831 case ISD::UINT_TO_FP:
2832 return LowerINT_TO_FP(Op, DAG, *this);
2834 // Vector-related lowering.
2835 case ISD::BUILD_VECTOR:
2836 return LowerBUILD_VECTOR(Op, DAG);
2837 case ISD::SCALAR_TO_VECTOR:
2838 return LowerSCALAR_TO_VECTOR(Op, DAG);
2839 case ISD::VECTOR_SHUFFLE:
2840 return LowerVECTOR_SHUFFLE(Op, DAG);
2841 case ISD::EXTRACT_VECTOR_ELT:
2842 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2843 case ISD::INSERT_VECTOR_ELT:
2844 return LowerINSERT_VECTOR_ELT(Op, DAG);
2846 // Look for ANDBI, ORBI and XORBI opportunities and lower appropriately:
2850 return LowerByteImmed(Op, DAG);
2852 // Vector and i8 multiply:
2855 return LowerI8Math(Op, DAG, Opc, *this);
2858 return LowerCTPOP(Op, DAG);
2860 case ISD::SELECT_CC:
2861 return LowerSELECT_CC(Op, DAG, *this);
2864 return LowerSETCC(Op, DAG, *this);
2867 return LowerTRUNCATE(Op, DAG);
2869 case ISD::SIGN_EXTEND:
2870 return LowerSIGN_EXTEND(Op, DAG);
2876 void SPUTargetLowering::ReplaceNodeResults(SDNode *N,
2877 SmallVectorImpl<SDValue>&Results,
2878 SelectionDAG &DAG) const
2881 unsigned Opc = (unsigned) N->getOpcode();
2882 EVT OpVT = N->getValueType(0);
2886 errs() << "SPUTargetLowering::ReplaceNodeResults(): need to fix this!\n";
2887 errs() << "Op.getOpcode() = " << Opc << "\n";
2888 errs() << "*Op.getNode():\n";
2896 /* Otherwise, return unchanged */
2899 //===----------------------------------------------------------------------===//
2900 // Target Optimization Hooks
2901 //===----------------------------------------------------------------------===//
2904 SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
2907 TargetMachine &TM = getTargetMachine();
2909 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
2910 SelectionDAG &DAG = DCI.DAG;
2911 SDValue Op0 = N->getOperand(0); // everything has at least one operand
2912 EVT NodeVT = N->getValueType(0); // The node's value type
2913 EVT Op0VT = Op0.getValueType(); // The first operand's result
2914 SDValue Result; // Initially, empty result
2915 DebugLoc dl = N->getDebugLoc();
2917 switch (N->getOpcode()) {
2920 SDValue Op1 = N->getOperand(1);
2922 if (Op0.getOpcode() == SPUISD::IndirectAddr
2923 || Op1.getOpcode() == SPUISD::IndirectAddr) {
2924 // Normalize the operands to reduce repeated code
2925 SDValue IndirectArg = Op0, AddArg = Op1;
2927 if (Op1.getOpcode() == SPUISD::IndirectAddr) {
2932 if (isa<ConstantSDNode>(AddArg)) {
2933 ConstantSDNode *CN0 = cast<ConstantSDNode > (AddArg);
2934 SDValue IndOp1 = IndirectArg.getOperand(1);
2936 if (CN0->isNullValue()) {
2937 // (add (SPUindirect <arg>, <arg>), 0) ->
2938 // (SPUindirect <arg>, <arg>)
2940 #if !defined(NDEBUG)
2941 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2943 << "Replace: (add (SPUindirect <arg>, <arg>), 0)\n"
2944 << "With: (SPUindirect <arg>, <arg>)\n";
2949 } else if (isa<ConstantSDNode>(IndOp1)) {
2950 // (add (SPUindirect <arg>, <const>), <const>) ->
2951 // (SPUindirect <arg>, <const + const>)
2952 ConstantSDNode *CN1 = cast<ConstantSDNode > (IndOp1);
2953 int64_t combinedConst = CN0->getSExtValue() + CN1->getSExtValue();
2954 SDValue combinedValue = DAG.getConstant(combinedConst, Op0VT);
2956 #if !defined(NDEBUG)
2957 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2959 << "Replace: (add (SPUindirect <arg>, " << CN1->getSExtValue()
2960 << "), " << CN0->getSExtValue() << ")\n"
2961 << "With: (SPUindirect <arg>, "
2962 << combinedConst << ")\n";
2966 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2967 IndirectArg, combinedValue);
2973 case ISD::SIGN_EXTEND:
2974 case ISD::ZERO_EXTEND:
2975 case ISD::ANY_EXTEND: {
2976 if (Op0.getOpcode() == SPUISD::VEC2PREFSLOT && NodeVT == Op0VT) {
2977 // (any_extend (SPUextract_elt0 <arg>)) ->
2978 // (SPUextract_elt0 <arg>)
2979 // Types must match, however...
2980 #if !defined(NDEBUG)
2981 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2982 errs() << "\nReplace: ";
2984 errs() << "\nWith: ";
2985 Op0.getNode()->dump(&DAG);
2994 case SPUISD::IndirectAddr: {
2995 if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) {
2996 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1));
2997 if (CN != 0 && CN->isNullValue()) {
2998 // (SPUindirect (SPUaform <addr>, 0), 0) ->
2999 // (SPUaform <addr>, 0)
3001 DEBUG(errs() << "Replace: ");
3002 DEBUG(N->dump(&DAG));
3003 DEBUG(errs() << "\nWith: ");
3004 DEBUG(Op0.getNode()->dump(&DAG));
3005 DEBUG(errs() << "\n");
3009 } else if (Op0.getOpcode() == ISD::ADD) {
3010 SDValue Op1 = N->getOperand(1);
3011 if (ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(Op1)) {
3012 // (SPUindirect (add <arg>, <arg>), 0) ->
3013 // (SPUindirect <arg>, <arg>)
3014 if (CN1->isNullValue()) {
3016 #if !defined(NDEBUG)
3017 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
3019 << "Replace: (SPUindirect (add <arg>, <arg>), 0)\n"
3020 << "With: (SPUindirect <arg>, <arg>)\n";
3024 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
3025 Op0.getOperand(0), Op0.getOperand(1));
3031 case SPUISD::SHL_BITS:
3032 case SPUISD::SHL_BYTES:
3033 case SPUISD::ROTBYTES_LEFT: {
3034 SDValue Op1 = N->getOperand(1);
3036 // Kill degenerate vector shifts:
3037 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
3038 if (CN->isNullValue()) {
3044 case SPUISD::PREFSLOT2VEC: {
3045 switch (Op0.getOpcode()) {
3048 case ISD::ANY_EXTEND:
3049 case ISD::ZERO_EXTEND:
3050 case ISD::SIGN_EXTEND: {
3051 // (SPUprefslot2vec (any|zero|sign_extend (SPUvec2prefslot <arg>))) ->
3053 // but only if the SPUprefslot2vec and <arg> types match.
3054 SDValue Op00 = Op0.getOperand(0);
3055 if (Op00.getOpcode() == SPUISD::VEC2PREFSLOT) {
3056 SDValue Op000 = Op00.getOperand(0);
3057 if (Op000.getValueType() == NodeVT) {
3063 case SPUISD::VEC2PREFSLOT: {
3064 // (SPUprefslot2vec (SPUvec2prefslot <arg>)) ->
3066 Result = Op0.getOperand(0);
3074 // Otherwise, return unchanged.
3076 if (Result.getNode()) {
3077 DEBUG(errs() << "\nReplace.SPU: ");
3078 DEBUG(N->dump(&DAG));
3079 DEBUG(errs() << "\nWith: ");
3080 DEBUG(Result.getNode()->dump(&DAG));
3081 DEBUG(errs() << "\n");
3088 //===----------------------------------------------------------------------===//
3089 // Inline Assembly Support
3090 //===----------------------------------------------------------------------===//
3092 /// getConstraintType - Given a constraint letter, return the type of
3093 /// constraint it is for this target.
3094 SPUTargetLowering::ConstraintType
3095 SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const {
3096 if (ConstraintLetter.size() == 1) {
3097 switch (ConstraintLetter[0]) {
3104 return C_RegisterClass;
3107 return TargetLowering::getConstraintType(ConstraintLetter);
3110 /// Examine constraint type and operand type and determine a weight value.
3111 /// This object must already have been set up with the operand type
3112 /// and the current alternative constraint selected.
3113 TargetLowering::ConstraintWeight
3114 SPUTargetLowering::getSingleConstraintMatchWeight(
3115 AsmOperandInfo &info, const char *constraint) const {
3116 ConstraintWeight weight = CW_Invalid;
3117 Value *CallOperandVal = info.CallOperandVal;
3118 // If we don't have a value, we can't do a match,
3119 // but allow it at the lowest weight.
3120 if (CallOperandVal == NULL)
3122 // Look at the constraint type.
3123 switch (*constraint) {
3125 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3127 //FIXME: Seems like the supported constraint letters were just copied
3128 // from PPC, as the following doesn't correspond to the GCC docs.
3129 // I'm leaving it so until someone adds the corresponding lowering support.
3136 weight = CW_Register;
3142 std::pair<unsigned, const TargetRegisterClass*>
3143 SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
3146 if (Constraint.size() == 1) {
3147 // GCC RS6000 Constraint Letters
3148 switch (Constraint[0]) {
3152 return std::make_pair(0U, SPU::R64CRegisterClass);
3153 return std::make_pair(0U, SPU::R32CRegisterClass);
3156 return std::make_pair(0U, SPU::R32FPRegisterClass);
3157 else if (VT == MVT::f64)
3158 return std::make_pair(0U, SPU::R64FPRegisterClass);
3161 return std::make_pair(0U, SPU::GPRCRegisterClass);
3165 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
3168 //! Compute used/known bits for a SPU operand
3170 SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
3174 const SelectionDAG &DAG,
3175 unsigned Depth ) const {
3177 const uint64_t uint64_sizebits = sizeof(uint64_t) * CHAR_BIT;
3179 switch (Op.getOpcode()) {
3181 // KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
3187 case SPUISD::PREFSLOT2VEC:
3188 case SPUISD::LDRESULT:
3189 case SPUISD::VEC2PREFSLOT:
3190 case SPUISD::SHLQUAD_L_BITS:
3191 case SPUISD::SHLQUAD_L_BYTES:
3192 case SPUISD::VEC_ROTL:
3193 case SPUISD::VEC_ROTR:
3194 case SPUISD::ROTBYTES_LEFT:
3195 case SPUISD::SELECT_MASK:
3202 SPUTargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
3203 unsigned Depth) const {
3204 switch (Op.getOpcode()) {
3209 EVT VT = Op.getValueType();
3211 if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32) {
3214 return VT.getSizeInBits();
3219 // LowerAsmOperandForConstraint
3221 SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3222 std::string &Constraint,
3223 std::vector<SDValue> &Ops,
3224 SelectionDAG &DAG) const {
3225 // Default, for the time being, to the base class handler
3226 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3229 /// isLegalAddressImmediate - Return true if the integer value can be used
3230 /// as the offset of the target addressing mode.
3231 bool SPUTargetLowering::isLegalAddressImmediate(int64_t V,
3233 // SPU's addresses are 256K:
3234 return (V > -(1 << 18) && V < (1 << 18) - 1);
3237 bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
3242 SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3243 // The SPU target isn't yet aware of offsets.
3247 // can we compare to Imm without writing it into a register?
3248 bool SPUTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
3249 //ceqi, cgti, etc. all take s10 operand
3250 return isInt<10>(Imm);
3254 SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
3257 // A-form: 18bit absolute address.
3258 if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && AM.BaseOffs == 0)
3261 // D-form: reg + 14bit offset
3262 if (AM.BaseGV ==0 && AM.HasBaseReg && AM.Scale == 0 && isInt<14>(AM.BaseOffs))
3266 if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 1 && AM.BaseOffs ==0)