1 //===-- SPUISelLowering.cpp - Cell SPU DAG Lowering Implementation --------===//
2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file implements the SPUTargetLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "SPUISelLowering.h"
14 #include "SPUTargetMachine.h"
15 #include "SPUFrameLowering.h"
16 #include "SPUMachineFunction.h"
17 #include "llvm/Constants.h"
18 #include "llvm/Function.h"
19 #include "llvm/Intrinsics.h"
20 #include "llvm/CallingConv.h"
21 #include "llvm/Type.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/Target/TargetOptions.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/raw_ostream.h"
38 // Used in getTargetNodeName() below
40 std::map<unsigned, const char *> node_names;
42 // Byte offset of the preferred slot (counted from the MSB)
43 int prefslotOffset(EVT VT) {
45 if (VT==MVT::i1) retval=3;
46 if (VT==MVT::i8) retval=3;
47 if (VT==MVT::i16) retval=2;
52 //! Expand a library call into an actual call DAG node
55 This code is taken from SelectionDAGLegalize, since it is not exposed as
56 part of the LLVM SelectionDAG API.
60 ExpandLibCall(RTLIB::Libcall LC, SDValue Op, SelectionDAG &DAG,
61 bool isSigned, SDValue &Hi, const SPUTargetLowering &TLI) {
62 // The input chain to this libcall is the entry node of the function.
63 // Legalizing the call will automatically add the previous call to the
65 SDValue InChain = DAG.getEntryNode();
67 TargetLowering::ArgListTy Args;
68 TargetLowering::ArgListEntry Entry;
69 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
70 EVT ArgVT = Op.getOperand(i).getValueType();
71 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
72 Entry.Node = Op.getOperand(i);
74 Entry.isSExt = isSigned;
75 Entry.isZExt = !isSigned;
76 Args.push_back(Entry);
78 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
81 // Splice the libcall in wherever FindInputOutputChains tells us to.
83 Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
84 std::pair<SDValue, SDValue> CallInfo =
85 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
86 0, TLI.getLibcallCallingConv(LC), false,
87 /*isReturnValueUsed=*/true,
88 Callee, Args, DAG, Op.getDebugLoc());
90 return CallInfo.first;
94 SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
95 : TargetLowering(TM, new TargetLoweringObjectFileELF()),
98 // Use _setjmp/_longjmp instead of setjmp/longjmp.
99 setUseUnderscoreSetJmp(true);
100 setUseUnderscoreLongJmp(true);
102 // Set RTLIB libcall names as used by SPU:
103 setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
105 // Set up the SPU's register classes:
106 addRegisterClass(MVT::i8, SPU::R8CRegisterClass);
107 addRegisterClass(MVT::i16, SPU::R16CRegisterClass);
108 addRegisterClass(MVT::i32, SPU::R32CRegisterClass);
109 addRegisterClass(MVT::i64, SPU::R64CRegisterClass);
110 addRegisterClass(MVT::f32, SPU::R32FPRegisterClass);
111 addRegisterClass(MVT::f64, SPU::R64FPRegisterClass);
112 addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
114 // SPU has no sign or zero extended loads for i1, i8, i16:
115 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
116 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
117 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
119 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
120 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
122 setTruncStoreAction(MVT::i128, MVT::i64, Expand);
123 setTruncStoreAction(MVT::i128, MVT::i32, Expand);
124 setTruncStoreAction(MVT::i128, MVT::i16, Expand);
125 setTruncStoreAction(MVT::i128, MVT::i8, Expand);
127 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
129 // SPU constant load actions are custom lowered:
130 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
131 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
133 // SPU's loads and stores have to be custom lowered:
134 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::i128;
136 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
138 setOperationAction(ISD::LOAD, VT, Custom);
139 setOperationAction(ISD::STORE, VT, Custom);
140 setLoadExtAction(ISD::EXTLOAD, VT, Custom);
141 setLoadExtAction(ISD::ZEXTLOAD, VT, Custom);
142 setLoadExtAction(ISD::SEXTLOAD, VT, Custom);
144 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::i8; --stype) {
145 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
146 setTruncStoreAction(VT, StoreVT, Expand);
150 for (unsigned sctype = (unsigned) MVT::f32; sctype < (unsigned) MVT::f64;
152 MVT::SimpleValueType VT = (MVT::SimpleValueType) sctype;
154 setOperationAction(ISD::LOAD, VT, Custom);
155 setOperationAction(ISD::STORE, VT, Custom);
157 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::f32; --stype) {
158 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
159 setTruncStoreAction(VT, StoreVT, Expand);
163 // Expand the jumptable branches
164 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
165 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
167 // Custom lower SELECT_CC for most cases, but expand by default
168 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
169 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
170 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
171 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
172 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
174 // SPU has no intrinsics for these particular operations:
175 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
176 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
178 // SPU has no division/remainder instructions
179 setOperationAction(ISD::SREM, MVT::i8, Expand);
180 setOperationAction(ISD::UREM, MVT::i8, Expand);
181 setOperationAction(ISD::SDIV, MVT::i8, Expand);
182 setOperationAction(ISD::UDIV, MVT::i8, Expand);
183 setOperationAction(ISD::SDIVREM, MVT::i8, Expand);
184 setOperationAction(ISD::UDIVREM, MVT::i8, Expand);
185 setOperationAction(ISD::SREM, MVT::i16, Expand);
186 setOperationAction(ISD::UREM, MVT::i16, Expand);
187 setOperationAction(ISD::SDIV, MVT::i16, Expand);
188 setOperationAction(ISD::UDIV, MVT::i16, Expand);
189 setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
190 setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
191 setOperationAction(ISD::SREM, MVT::i32, Expand);
192 setOperationAction(ISD::UREM, MVT::i32, Expand);
193 setOperationAction(ISD::SDIV, MVT::i32, Expand);
194 setOperationAction(ISD::UDIV, MVT::i32, Expand);
195 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
196 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
197 setOperationAction(ISD::SREM, MVT::i64, Expand);
198 setOperationAction(ISD::UREM, MVT::i64, Expand);
199 setOperationAction(ISD::SDIV, MVT::i64, Expand);
200 setOperationAction(ISD::UDIV, MVT::i64, Expand);
201 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
202 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
203 setOperationAction(ISD::SREM, MVT::i128, Expand);
204 setOperationAction(ISD::UREM, MVT::i128, Expand);
205 setOperationAction(ISD::SDIV, MVT::i128, Expand);
206 setOperationAction(ISD::UDIV, MVT::i128, Expand);
207 setOperationAction(ISD::SDIVREM, MVT::i128, Expand);
208 setOperationAction(ISD::UDIVREM, MVT::i128, Expand);
210 // We don't support sin/cos/sqrt/fmod
211 setOperationAction(ISD::FSIN , MVT::f64, Expand);
212 setOperationAction(ISD::FCOS , MVT::f64, Expand);
213 setOperationAction(ISD::FREM , MVT::f64, Expand);
214 setOperationAction(ISD::FSIN , MVT::f32, Expand);
215 setOperationAction(ISD::FCOS , MVT::f32, Expand);
216 setOperationAction(ISD::FREM , MVT::f32, Expand);
218 // Expand fsqrt to the appropriate libcall (NOTE: should use h/w fsqrt
220 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
221 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
223 setOperationAction(ISD::FMA, MVT::f64, Expand);
224 setOperationAction(ISD::FMA, MVT::f32, Expand);
226 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
227 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
229 // SPU can do rotate right and left, so legalize it... but customize for i8
230 // because instructions don't exist.
232 // FIXME: Change from "expand" to appropriate type once ROTR is supported in
234 setOperationAction(ISD::ROTR, MVT::i32, Expand /*Legal*/);
235 setOperationAction(ISD::ROTR, MVT::i16, Expand /*Legal*/);
236 setOperationAction(ISD::ROTR, MVT::i8, Expand /*Custom*/);
238 setOperationAction(ISD::ROTL, MVT::i32, Legal);
239 setOperationAction(ISD::ROTL, MVT::i16, Legal);
240 setOperationAction(ISD::ROTL, MVT::i8, Custom);
242 // SPU has no native version of shift left/right for i8
243 setOperationAction(ISD::SHL, MVT::i8, Custom);
244 setOperationAction(ISD::SRL, MVT::i8, Custom);
245 setOperationAction(ISD::SRA, MVT::i8, Custom);
247 // Make these operations legal and handle them during instruction selection:
248 setOperationAction(ISD::SHL, MVT::i64, Legal);
249 setOperationAction(ISD::SRL, MVT::i64, Legal);
250 setOperationAction(ISD::SRA, MVT::i64, Legal);
252 // Custom lower i8, i32 and i64 multiplications
253 setOperationAction(ISD::MUL, MVT::i8, Custom);
254 setOperationAction(ISD::MUL, MVT::i32, Legal);
255 setOperationAction(ISD::MUL, MVT::i64, Legal);
257 // Expand double-width multiplication
258 // FIXME: It would probably be reasonable to support some of these operations
259 setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
260 setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
261 setOperationAction(ISD::MULHU, MVT::i8, Expand);
262 setOperationAction(ISD::MULHS, MVT::i8, Expand);
263 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
264 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
265 setOperationAction(ISD::MULHU, MVT::i16, Expand);
266 setOperationAction(ISD::MULHS, MVT::i16, Expand);
267 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
268 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
269 setOperationAction(ISD::MULHU, MVT::i32, Expand);
270 setOperationAction(ISD::MULHS, MVT::i32, Expand);
271 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
272 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
273 setOperationAction(ISD::MULHU, MVT::i64, Expand);
274 setOperationAction(ISD::MULHS, MVT::i64, Expand);
276 // Need to custom handle (some) common i8, i64 math ops
277 setOperationAction(ISD::ADD, MVT::i8, Custom);
278 setOperationAction(ISD::ADD, MVT::i64, Legal);
279 setOperationAction(ISD::SUB, MVT::i8, Custom);
280 setOperationAction(ISD::SUB, MVT::i64, Legal);
282 // SPU does not have BSWAP. It does have i32 support CTLZ.
283 // CTPOP has to be custom lowered.
284 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
285 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
287 setOperationAction(ISD::CTPOP, MVT::i8, Custom);
288 setOperationAction(ISD::CTPOP, MVT::i16, Custom);
289 setOperationAction(ISD::CTPOP, MVT::i32, Custom);
290 setOperationAction(ISD::CTPOP, MVT::i64, Custom);
291 setOperationAction(ISD::CTPOP, MVT::i128, Expand);
293 setOperationAction(ISD::CTTZ , MVT::i8, Expand);
294 setOperationAction(ISD::CTTZ , MVT::i16, Expand);
295 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
296 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
297 setOperationAction(ISD::CTTZ , MVT::i128, Expand);
298 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i8, Expand);
299 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
300 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
301 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
302 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i128, Expand);
304 setOperationAction(ISD::CTLZ , MVT::i8, Promote);
305 setOperationAction(ISD::CTLZ , MVT::i16, Promote);
306 setOperationAction(ISD::CTLZ , MVT::i32, Legal);
307 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
308 setOperationAction(ISD::CTLZ , MVT::i128, Expand);
309 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8, Expand);
310 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Expand);
311 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
312 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
313 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i128, Expand);
315 // SPU has a version of select that implements (a&~c)|(b&c), just like
316 // select ought to work:
317 setOperationAction(ISD::SELECT, MVT::i8, Legal);
318 setOperationAction(ISD::SELECT, MVT::i16, Legal);
319 setOperationAction(ISD::SELECT, MVT::i32, Legal);
320 setOperationAction(ISD::SELECT, MVT::i64, Legal);
322 setOperationAction(ISD::SETCC, MVT::i8, Legal);
323 setOperationAction(ISD::SETCC, MVT::i16, Legal);
324 setOperationAction(ISD::SETCC, MVT::i32, Legal);
325 setOperationAction(ISD::SETCC, MVT::i64, Legal);
326 setOperationAction(ISD::SETCC, MVT::f64, Custom);
328 // Custom lower i128 -> i64 truncates
329 setOperationAction(ISD::TRUNCATE, MVT::i64, Custom);
331 // Custom lower i32/i64 -> i128 sign extend
332 setOperationAction(ISD::SIGN_EXTEND, MVT::i128, Custom);
334 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
335 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
336 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
337 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
338 // SPU has a legal FP -> signed INT instruction for f32, but for f64, need
339 // to expand to a libcall, hence the custom lowering:
340 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
341 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
342 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
343 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
344 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Expand);
345 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Expand);
347 // FDIV on SPU requires custom lowering
348 setOperationAction(ISD::FDIV, MVT::f64, Expand); // to libcall
350 // SPU has [U|S]INT_TO_FP for f32->i32, but not for f64->i32, f64->i64:
351 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
352 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
353 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
354 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
355 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
356 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
357 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
358 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
360 setOperationAction(ISD::BITCAST, MVT::i32, Legal);
361 setOperationAction(ISD::BITCAST, MVT::f32, Legal);
362 setOperationAction(ISD::BITCAST, MVT::i64, Legal);
363 setOperationAction(ISD::BITCAST, MVT::f64, Legal);
365 // We cannot sextinreg(i1). Expand to shifts.
366 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
368 // We want to legalize GlobalAddress and ConstantPool nodes into the
369 // appropriate instructions to materialize the address.
370 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::f128;
372 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
374 setOperationAction(ISD::GlobalAddress, VT, Custom);
375 setOperationAction(ISD::ConstantPool, VT, Custom);
376 setOperationAction(ISD::JumpTable, VT, Custom);
379 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
380 setOperationAction(ISD::VASTART , MVT::Other, Custom);
382 // Use the default implementation.
383 setOperationAction(ISD::VAARG , MVT::Other, Expand);
384 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
385 setOperationAction(ISD::VAEND , MVT::Other, Expand);
386 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
387 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
388 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
389 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Expand);
391 // Cell SPU has instructions for converting between i64 and fp.
392 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
393 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
395 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
396 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
398 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
399 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
401 // First set operation action for all vector types to expand. Then we
402 // will selectively turn on ones that can be effectively codegen'd.
403 addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
404 addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
405 addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
406 addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
407 addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
408 addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
410 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
411 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
412 MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
414 // Set operation actions to legal types only.
415 if (!isTypeLegal(VT)) continue;
417 // add/sub are legal for all supported vector VT's.
418 setOperationAction(ISD::ADD, VT, Legal);
419 setOperationAction(ISD::SUB, VT, Legal);
420 // mul has to be custom lowered.
421 setOperationAction(ISD::MUL, VT, Legal);
423 setOperationAction(ISD::AND, VT, Legal);
424 setOperationAction(ISD::OR, VT, Legal);
425 setOperationAction(ISD::XOR, VT, Legal);
426 setOperationAction(ISD::LOAD, VT, Custom);
427 setOperationAction(ISD::SELECT, VT, Legal);
428 setOperationAction(ISD::STORE, VT, Custom);
430 // These operations need to be expanded:
431 setOperationAction(ISD::SDIV, VT, Expand);
432 setOperationAction(ISD::SREM, VT, Expand);
433 setOperationAction(ISD::UDIV, VT, Expand);
434 setOperationAction(ISD::UREM, VT, Expand);
436 // Expand all trunc stores
437 for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
438 j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) {
439 MVT::SimpleValueType TargetVT = (MVT::SimpleValueType)j;
440 setTruncStoreAction(VT, TargetVT, Expand);
443 // Custom lower build_vector, constant pool spills, insert and
444 // extract vector elements:
445 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
446 setOperationAction(ISD::ConstantPool, VT, Custom);
447 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
448 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
449 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
450 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
453 setOperationAction(ISD::SHL, MVT::v2i64, Expand);
455 setOperationAction(ISD::AND, MVT::v16i8, Custom);
456 setOperationAction(ISD::OR, MVT::v16i8, Custom);
457 setOperationAction(ISD::XOR, MVT::v16i8, Custom);
458 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
460 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
462 setBooleanContents(ZeroOrNegativeOneBooleanContent);
463 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); // FIXME: Is this correct?
465 setStackPointerRegisterToSaveRestore(SPU::R1);
467 // We have target-specific dag combine patterns for the following nodes:
468 setTargetDAGCombine(ISD::ADD);
469 setTargetDAGCombine(ISD::ZERO_EXTEND);
470 setTargetDAGCombine(ISD::SIGN_EXTEND);
471 setTargetDAGCombine(ISD::ANY_EXTEND);
473 setMinFunctionAlignment(3);
475 computeRegisterProperties();
477 // Set pre-RA register scheduler default to BURR, which produces slightly
478 // better code than the default (could also be TDRR, but TargetLowering.h
479 // needs a mod to support that model):
480 setSchedulingPreference(Sched::RegPressure);
484 SPUTargetLowering::getTargetNodeName(unsigned Opcode) const
486 if (node_names.empty()) {
487 node_names[(unsigned) SPUISD::RET_FLAG] = "SPUISD::RET_FLAG";
488 node_names[(unsigned) SPUISD::Hi] = "SPUISD::Hi";
489 node_names[(unsigned) SPUISD::Lo] = "SPUISD::Lo";
490 node_names[(unsigned) SPUISD::PCRelAddr] = "SPUISD::PCRelAddr";
491 node_names[(unsigned) SPUISD::AFormAddr] = "SPUISD::AFormAddr";
492 node_names[(unsigned) SPUISD::IndirectAddr] = "SPUISD::IndirectAddr";
493 node_names[(unsigned) SPUISD::LDRESULT] = "SPUISD::LDRESULT";
494 node_names[(unsigned) SPUISD::CALL] = "SPUISD::CALL";
495 node_names[(unsigned) SPUISD::SHUFB] = "SPUISD::SHUFB";
496 node_names[(unsigned) SPUISD::SHUFFLE_MASK] = "SPUISD::SHUFFLE_MASK";
497 node_names[(unsigned) SPUISD::CNTB] = "SPUISD::CNTB";
498 node_names[(unsigned) SPUISD::PREFSLOT2VEC] = "SPUISD::PREFSLOT2VEC";
499 node_names[(unsigned) SPUISD::VEC2PREFSLOT] = "SPUISD::VEC2PREFSLOT";
500 node_names[(unsigned) SPUISD::SHL_BITS] = "SPUISD::SHL_BITS";
501 node_names[(unsigned) SPUISD::SHL_BYTES] = "SPUISD::SHL_BYTES";
502 node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL";
503 node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR";
504 node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
505 node_names[(unsigned) SPUISD::ROTBYTES_LEFT_BITS] =
506 "SPUISD::ROTBYTES_LEFT_BITS";
507 node_names[(unsigned) SPUISD::SELECT_MASK] = "SPUISD::SELECT_MASK";
508 node_names[(unsigned) SPUISD::SELB] = "SPUISD::SELB";
509 node_names[(unsigned) SPUISD::ADD64_MARKER] = "SPUISD::ADD64_MARKER";
510 node_names[(unsigned) SPUISD::SUB64_MARKER] = "SPUISD::SUB64_MARKER";
511 node_names[(unsigned) SPUISD::MUL64_MARKER] = "SPUISD::MUL64_MARKER";
514 std::map<unsigned, const char *>::iterator i = node_names.find(Opcode);
516 return ((i != node_names.end()) ? i->second : 0);
519 //===----------------------------------------------------------------------===//
520 // Return the Cell SPU's SETCC result type
521 //===----------------------------------------------------------------------===//
523 EVT SPUTargetLowering::getSetCCResultType(EVT VT) const {
524 // i8, i16 and i32 are valid SETCC result types
525 MVT::SimpleValueType retval;
527 switch(VT.getSimpleVT().SimpleTy){
530 retval = MVT::i8; break;
532 retval = MVT::i16; break;
540 //===----------------------------------------------------------------------===//
541 // Calling convention code:
542 //===----------------------------------------------------------------------===//
544 #include "SPUGenCallingConv.inc"
546 //===----------------------------------------------------------------------===//
547 // LowerOperation implementation
548 //===----------------------------------------------------------------------===//
550 /// Custom lower loads for CellSPU
552 All CellSPU loads and stores are aligned to 16-byte boundaries, so for elements
553 within a 16-byte block, we have to rotate to extract the requested element.
555 For extending loads, we also want to ensure that the following sequence is
556 emitted, e.g. for MVT::f32 extending load to MVT::f64:
560 %2 v16i8,ch = rotate %1
561 %3 v4f8, ch = bitconvert %2
562 %4 f32 = vec2perfslot %3
563 %5 f64 = fp_extend %4
567 LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
568 LoadSDNode *LN = cast<LoadSDNode>(Op);
569 SDValue the_chain = LN->getChain();
570 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
571 EVT InVT = LN->getMemoryVT();
572 EVT OutVT = Op.getValueType();
573 ISD::LoadExtType ExtType = LN->getExtensionType();
574 unsigned alignment = LN->getAlignment();
575 int pso = prefslotOffset(InVT);
576 DebugLoc dl = Op.getDebugLoc();
577 EVT vecVT = InVT.isVector()? InVT: EVT::getVectorVT(*DAG.getContext(), InVT,
578 (128 / InVT.getSizeInBits()));
581 assert( LN->getAddressingMode() == ISD::UNINDEXED
582 && "we should get only UNINDEXED adresses");
583 // clean aligned loads can be selected as-is
584 if (InVT.getSizeInBits() == 128 && (alignment%16) == 0)
587 // Get pointerinfos to the memory chunk(s) that contain the data to load
588 uint64_t mpi_offset = LN->getPointerInfo().Offset;
589 mpi_offset -= mpi_offset%16;
590 MachinePointerInfo lowMemPtr(LN->getPointerInfo().V, mpi_offset);
591 MachinePointerInfo highMemPtr(LN->getPointerInfo().V, mpi_offset+16);
594 SDValue basePtr = LN->getBasePtr();
597 if ((alignment%16) == 0) {
600 // Special cases for a known aligned load to simplify the base pointer
601 // and the rotation amount:
602 if (basePtr.getOpcode() == ISD::ADD
603 && (CN = dyn_cast<ConstantSDNode > (basePtr.getOperand(1))) != 0) {
604 // Known offset into basePtr
605 int64_t offset = CN->getSExtValue();
606 int64_t rotamt = int64_t((offset & 0xf) - pso);
611 rotate = DAG.getConstant(rotamt, MVT::i16);
613 // Simplify the base pointer for this case:
614 basePtr = basePtr.getOperand(0);
615 if ((offset & ~0xf) > 0) {
616 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
618 DAG.getConstant((offset & ~0xf), PtrVT));
620 } else if ((basePtr.getOpcode() == SPUISD::AFormAddr)
621 || (basePtr.getOpcode() == SPUISD::IndirectAddr
622 && basePtr.getOperand(0).getOpcode() == SPUISD::Hi
623 && basePtr.getOperand(1).getOpcode() == SPUISD::Lo)) {
624 // Plain aligned a-form address: rotate into preferred slot
625 // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
626 int64_t rotamt = -pso;
629 rotate = DAG.getConstant(rotamt, MVT::i16);
631 // Offset the rotate amount by the basePtr and the preferred slot
633 int64_t rotamt = -pso;
636 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
638 DAG.getConstant(rotamt, PtrVT));
641 // Unaligned load: must be more pessimistic about addressing modes:
642 if (basePtr.getOpcode() == ISD::ADD) {
643 MachineFunction &MF = DAG.getMachineFunction();
644 MachineRegisterInfo &RegInfo = MF.getRegInfo();
645 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
648 SDValue Op0 = basePtr.getOperand(0);
649 SDValue Op1 = basePtr.getOperand(1);
651 if (isa<ConstantSDNode>(Op1)) {
652 // Convert the (add <ptr>, <const>) to an indirect address contained
653 // in a register. Note that this is done because we need to avoid
654 // creating a 0(reg) d-form address due to the SPU's block loads.
655 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
656 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
657 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
659 // Convert the (add <arg1>, <arg2>) to an indirect address, which
660 // will likely be lowered as a reg(reg) x-form address.
661 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
664 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
666 DAG.getConstant(0, PtrVT));
669 // Offset the rotate amount by the basePtr and the preferred slot
671 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
673 DAG.getConstant(-pso, PtrVT));
676 // Do the load as a i128 to allow possible shifting
677 SDValue low = DAG.getLoad(MVT::i128, dl, the_chain, basePtr,
679 LN->isVolatile(), LN->isNonTemporal(), false, 16);
681 // When the size is not greater than alignment we get all data with just
683 if (alignment >= InVT.getSizeInBits()/8) {
685 the_chain = low.getValue(1);
687 // Rotate into the preferred slot:
688 result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::i128,
689 low.getValue(0), rotate);
691 // Convert the loaded v16i8 vector to the appropriate vector type
692 // specified by the operand:
693 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
694 InVT, (128 / InVT.getSizeInBits()));
695 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
696 DAG.getNode(ISD::BITCAST, dl, vecVT, result));
698 // When alignment is less than the size, we might need (known only at
699 // run-time) two loads
700 // TODO: if the memory address is composed only from constants, we have
701 // extra kowledge, and might avoid the second load
703 // storage position offset from lower 16 byte aligned memory chunk
704 SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
705 basePtr, DAG.getConstant( 0xf, MVT::i32 ) );
706 // get a registerfull of ones. (this implementation is a workaround: LLVM
707 // cannot handle 128 bit signed int constants)
708 SDValue ones = DAG.getConstant(-1, MVT::v4i32 );
709 ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
711 SDValue high = DAG.getLoad(MVT::i128, dl, the_chain,
712 DAG.getNode(ISD::ADD, dl, PtrVT,
714 DAG.getConstant(16, PtrVT)),
716 LN->isVolatile(), LN->isNonTemporal(), false,
719 the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
722 // Shift the (possible) high part right to compensate the misalignemnt.
723 // if there is no highpart (i.e. value is i64 and offset is 4), this
724 // will zero out the high value.
725 high = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, high,
726 DAG.getNode(ISD::SUB, dl, MVT::i32,
727 DAG.getConstant( 16, MVT::i32),
731 // Shift the low similarly
732 // TODO: add SPUISD::SHL_BYTES
733 low = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, low, offset );
735 // Merge the two parts
736 result = DAG.getNode(ISD::BITCAST, dl, vecVT,
737 DAG.getNode(ISD::OR, dl, MVT::i128, low, high));
739 if (!InVT.isVector()) {
740 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT, result );
744 // Handle extending loads by extending the scalar result:
745 if (ExtType == ISD::SEXTLOAD) {
746 result = DAG.getNode(ISD::SIGN_EXTEND, dl, OutVT, result);
747 } else if (ExtType == ISD::ZEXTLOAD) {
748 result = DAG.getNode(ISD::ZERO_EXTEND, dl, OutVT, result);
749 } else if (ExtType == ISD::EXTLOAD) {
750 unsigned NewOpc = ISD::ANY_EXTEND;
752 if (OutVT.isFloatingPoint())
753 NewOpc = ISD::FP_EXTEND;
755 result = DAG.getNode(NewOpc, dl, OutVT, result);
758 SDVTList retvts = DAG.getVTList(OutVT, MVT::Other);
759 SDValue retops[2] = {
764 result = DAG.getNode(SPUISD::LDRESULT, dl, retvts,
765 retops, sizeof(retops) / sizeof(retops[0]));
769 /// Custom lower stores for CellSPU
771 All CellSPU stores are aligned to 16-byte boundaries, so for elements
772 within a 16-byte block, we have to generate a shuffle to insert the
773 requested element into its place, then store the resulting block.
776 LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
777 StoreSDNode *SN = cast<StoreSDNode>(Op);
778 SDValue Value = SN->getValue();
779 EVT VT = Value.getValueType();
780 EVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
781 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
782 DebugLoc dl = Op.getDebugLoc();
783 unsigned alignment = SN->getAlignment();
785 EVT vecVT = StVT.isVector()? StVT: EVT::getVectorVT(*DAG.getContext(), StVT,
786 (128 / StVT.getSizeInBits()));
787 // Get pointerinfos to the memory chunk(s) that contain the data to load
788 uint64_t mpi_offset = SN->getPointerInfo().Offset;
789 mpi_offset -= mpi_offset%16;
790 MachinePointerInfo lowMemPtr(SN->getPointerInfo().V, mpi_offset);
791 MachinePointerInfo highMemPtr(SN->getPointerInfo().V, mpi_offset+16);
795 assert( SN->getAddressingMode() == ISD::UNINDEXED
796 && "we should get only UNINDEXED adresses");
797 // clean aligned loads can be selected as-is
798 if (StVT.getSizeInBits() == 128 && (alignment%16) == 0)
801 SDValue alignLoadVec;
802 SDValue basePtr = SN->getBasePtr();
803 SDValue the_chain = SN->getChain();
804 SDValue insertEltOffs;
806 if ((alignment%16) == 0) {
808 // Special cases for a known aligned load to simplify the base pointer
809 // and insertion byte:
810 if (basePtr.getOpcode() == ISD::ADD
811 && (CN = dyn_cast<ConstantSDNode>(basePtr.getOperand(1))) != 0) {
812 // Known offset into basePtr
813 int64_t offset = CN->getSExtValue();
815 // Simplify the base pointer for this case:
816 basePtr = basePtr.getOperand(0);
817 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
819 DAG.getConstant((offset & 0xf), PtrVT));
821 if ((offset & ~0xf) > 0) {
822 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
824 DAG.getConstant((offset & ~0xf), PtrVT));
827 // Otherwise, assume it's at byte 0 of basePtr
828 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
830 DAG.getConstant(0, PtrVT));
831 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
833 DAG.getConstant(0, PtrVT));
836 // Unaligned load: must be more pessimistic about addressing modes:
837 if (basePtr.getOpcode() == ISD::ADD) {
838 MachineFunction &MF = DAG.getMachineFunction();
839 MachineRegisterInfo &RegInfo = MF.getRegInfo();
840 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
843 SDValue Op0 = basePtr.getOperand(0);
844 SDValue Op1 = basePtr.getOperand(1);
846 if (isa<ConstantSDNode>(Op1)) {
847 // Convert the (add <ptr>, <const>) to an indirect address contained
848 // in a register. Note that this is done because we need to avoid
849 // creating a 0(reg) d-form address due to the SPU's block loads.
850 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
851 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
852 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
854 // Convert the (add <arg1>, <arg2>) to an indirect address, which
855 // will likely be lowered as a reg(reg) x-form address.
856 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
859 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
861 DAG.getConstant(0, PtrVT));
864 // Insertion point is solely determined by basePtr's contents
865 insertEltOffs = DAG.getNode(ISD::ADD, dl, PtrVT,
867 DAG.getConstant(0, PtrVT));
870 // Load the lower part of the memory to which to store.
871 SDValue low = DAG.getLoad(vecVT, dl, the_chain, basePtr,
872 lowMemPtr, SN->isVolatile(), SN->isNonTemporal(),
875 // if we don't need to store over the 16 byte boundary, one store suffices
876 if (alignment >= StVT.getSizeInBits()/8) {
878 the_chain = low.getValue(1);
880 LoadSDNode *LN = cast<LoadSDNode>(low);
881 SDValue theValue = SN->getValue();
884 && (theValue.getOpcode() == ISD::AssertZext
885 || theValue.getOpcode() == ISD::AssertSext)) {
886 // Drill down and get the value for zero- and sign-extended
888 theValue = theValue.getOperand(0);
891 // If the base pointer is already a D-form address, then just create
892 // a new D-form address with a slot offset and the orignal base pointer.
893 // Otherwise generate a D-form address with the slot offset relative
894 // to the stack pointer, which is always aligned.
896 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
897 errs() << "CellSPU LowerSTORE: basePtr = ";
898 basePtr.getNode()->dump(&DAG);
903 SDValue insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT,
905 SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
908 result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
910 DAG.getNode(ISD::BITCAST, dl,
911 MVT::v4i32, insertEltOp));
913 result = DAG.getStore(the_chain, dl, result, basePtr,
915 LN->isVolatile(), LN->isNonTemporal(),
919 // do the store when it might cross the 16 byte memory access boundary.
921 // TODO issue a warning if SN->isVolatile()== true? This is likely not
922 // what the user wanted.
924 // address offset from nearest lower 16byte alinged address
925 SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
927 DAG.getConstant(0xf, MVT::i32));
929 SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
930 DAG.getConstant( 16, MVT::i32),
932 // 16 - sizeof(Value)
933 SDValue surplus = DAG.getNode(ISD::SUB, dl, MVT::i32,
934 DAG.getConstant( 16, MVT::i32),
935 DAG.getConstant( VT.getSizeInBits()/8,
937 // get a registerfull of ones
938 SDValue ones = DAG.getConstant(-1, MVT::v4i32);
939 ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
941 // Create the 128 bit masks that have ones where the data to store is
943 SDValue lowmask, himask;
944 // if the value to store don't fill up the an entire 128 bits, zero
945 // out the last bits of the mask so that only the value we want to store
947 // this is e.g. in the case of store i32, align 2
949 Value = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, Value);
950 lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, ones, surplus);
951 lowmask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
953 Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
954 Value = DAG.getNode(ISD::AND, dl, MVT::i128, Value, lowmask);
959 Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
961 // this will zero, if there are no data that goes to the high quad
962 himask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
964 lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, lowmask,
967 // Load in the old data and zero out the parts that will be overwritten with
968 // the new data to store.
969 SDValue hi = DAG.getLoad(MVT::i128, dl, the_chain,
970 DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
971 DAG.getConstant( 16, PtrVT)),
973 SN->isVolatile(), SN->isNonTemporal(),
975 the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
978 low = DAG.getNode(ISD::AND, dl, MVT::i128,
979 DAG.getNode( ISD::BITCAST, dl, MVT::i128, low),
980 DAG.getNode( ISD::XOR, dl, MVT::i128, lowmask, ones));
981 hi = DAG.getNode(ISD::AND, dl, MVT::i128,
982 DAG.getNode( ISD::BITCAST, dl, MVT::i128, hi),
983 DAG.getNode( ISD::XOR, dl, MVT::i128, himask, ones));
985 // Shift the Value to store into place. rlow contains the parts that go to
986 // the lower memory chunk, rhi has the parts that go to the upper one.
987 SDValue rlow = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, Value, offset);
988 rlow = DAG.getNode(ISD::AND, dl, MVT::i128, rlow, lowmask);
989 SDValue rhi = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, Value,
992 // Merge the old data and the new data and store the results
993 // Need to convert vectors here to integer as 'OR'ing floats assert
994 rlow = DAG.getNode(ISD::OR, dl, MVT::i128,
995 DAG.getNode(ISD::BITCAST, dl, MVT::i128, low),
996 DAG.getNode(ISD::BITCAST, dl, MVT::i128, rlow));
997 rhi = DAG.getNode(ISD::OR, dl, MVT::i128,
998 DAG.getNode(ISD::BITCAST, dl, MVT::i128, hi),
999 DAG.getNode(ISD::BITCAST, dl, MVT::i128, rhi));
1001 low = DAG.getStore(the_chain, dl, rlow, basePtr,
1003 SN->isVolatile(), SN->isNonTemporal(), 16);
1004 hi = DAG.getStore(the_chain, dl, rhi,
1005 DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
1006 DAG.getConstant( 16, PtrVT)),
1008 SN->isVolatile(), SN->isNonTemporal(), 16);
1009 result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(0),
1016 //! Generate the address of a constant pool entry.
1018 LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1019 EVT PtrVT = Op.getValueType();
1020 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1021 const Constant *C = CP->getConstVal();
1022 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
1023 SDValue Zero = DAG.getConstant(0, PtrVT);
1024 const TargetMachine &TM = DAG.getTarget();
1025 // FIXME there is no actual debug info here
1026 DebugLoc dl = Op.getDebugLoc();
1028 if (TM.getRelocationModel() == Reloc::Static) {
1029 if (!ST->usingLargeMem()) {
1030 // Just return the SDValue with the constant pool address in it.
1031 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, CPI, Zero);
1033 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, CPI, Zero);
1034 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, CPI, Zero);
1035 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1039 llvm_unreachable("LowerConstantPool: Relocation model other than static"
1044 //! Alternate entry point for generating the address of a constant pool entry
1046 SPU::LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUTargetMachine &TM) {
1047 return ::LowerConstantPool(Op, DAG, TM.getSubtargetImpl());
1051 LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1052 EVT PtrVT = Op.getValueType();
1053 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1054 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
1055 SDValue Zero = DAG.getConstant(0, PtrVT);
1056 const TargetMachine &TM = DAG.getTarget();
1057 // FIXME there is no actual debug info here
1058 DebugLoc dl = Op.getDebugLoc();
1060 if (TM.getRelocationModel() == Reloc::Static) {
1061 if (!ST->usingLargeMem()) {
1062 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, JTI, Zero);
1064 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, JTI, Zero);
1065 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, JTI, Zero);
1066 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1070 llvm_unreachable("LowerJumpTable: Relocation model other than static"
1076 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1077 EVT PtrVT = Op.getValueType();
1078 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
1079 const GlobalValue *GV = GSDN->getGlobal();
1080 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
1081 PtrVT, GSDN->getOffset());
1082 const TargetMachine &TM = DAG.getTarget();
1083 SDValue Zero = DAG.getConstant(0, PtrVT);
1084 // FIXME there is no actual debug info here
1085 DebugLoc dl = Op.getDebugLoc();
1087 if (TM.getRelocationModel() == Reloc::Static) {
1088 if (!ST->usingLargeMem()) {
1089 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, GA, Zero);
1091 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, GA, Zero);
1092 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, GA, Zero);
1093 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1096 report_fatal_error("LowerGlobalAddress: Relocation model other than static"
1104 //! Custom lower double precision floating point constants
1106 LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
1107 EVT VT = Op.getValueType();
1108 // FIXME there is no actual debug info here
1109 DebugLoc dl = Op.getDebugLoc();
1111 if (VT == MVT::f64) {
1112 ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.getNode());
1115 "LowerConstantFP: Node is not ConstantFPSDNode");
1117 uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
1118 SDValue T = DAG.getConstant(dbits, MVT::i64);
1119 SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
1120 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
1121 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Tvec));
1128 SPUTargetLowering::LowerFormalArguments(SDValue Chain,
1129 CallingConv::ID CallConv, bool isVarArg,
1130 const SmallVectorImpl<ISD::InputArg>
1132 DebugLoc dl, SelectionDAG &DAG,
1133 SmallVectorImpl<SDValue> &InVals)
1136 MachineFunction &MF = DAG.getMachineFunction();
1137 MachineFrameInfo *MFI = MF.getFrameInfo();
1138 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1139 SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
1141 unsigned ArgOffset = SPUFrameLowering::minStackSize();
1142 unsigned ArgRegIdx = 0;
1143 unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
1145 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1147 SmallVector<CCValAssign, 16> ArgLocs;
1148 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1149 getTargetMachine(), ArgLocs, *DAG.getContext());
1150 // FIXME: allow for other calling conventions
1151 CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU);
1153 // Add DAG nodes to load the arguments or copy them out of registers.
1154 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
1155 EVT ObjectVT = Ins[ArgNo].VT;
1156 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1158 CCValAssign &VA = ArgLocs[ArgNo];
1160 if (VA.isRegLoc()) {
1161 const TargetRegisterClass *ArgRegClass;
1163 switch (ObjectVT.getSimpleVT().SimpleTy) {
1165 report_fatal_error("LowerFormalArguments Unhandled argument type: " +
1166 Twine(ObjectVT.getEVTString()));
1168 ArgRegClass = &SPU::R8CRegClass;
1171 ArgRegClass = &SPU::R16CRegClass;
1174 ArgRegClass = &SPU::R32CRegClass;
1177 ArgRegClass = &SPU::R64CRegClass;
1180 ArgRegClass = &SPU::GPRCRegClass;
1183 ArgRegClass = &SPU::R32FPRegClass;
1186 ArgRegClass = &SPU::R64FPRegClass;
1194 ArgRegClass = &SPU::VECREGRegClass;
1198 unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
1199 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1200 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
1203 // We need to load the argument to a virtual register if we determined
1204 // above that we ran out of physical registers of the appropriate type
1205 // or we're forced to do vararg
1206 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true);
1207 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1208 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
1209 false, false, false, 0);
1210 ArgOffset += StackSlotSize;
1213 InVals.push_back(ArgVal);
1215 Chain = ArgVal.getOperand(0);
1220 // FIXME: we should be able to query the argument registers from
1221 // tablegen generated code.
1222 static const unsigned ArgRegs[] = {
1223 SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
1224 SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
1225 SPU::R17, SPU::R18, SPU::R19, SPU::R20, SPU::R21, SPU::R22, SPU::R23,
1226 SPU::R24, SPU::R25, SPU::R26, SPU::R27, SPU::R28, SPU::R29, SPU::R30,
1227 SPU::R31, SPU::R32, SPU::R33, SPU::R34, SPU::R35, SPU::R36, SPU::R37,
1228 SPU::R38, SPU::R39, SPU::R40, SPU::R41, SPU::R42, SPU::R43, SPU::R44,
1229 SPU::R45, SPU::R46, SPU::R47, SPU::R48, SPU::R49, SPU::R50, SPU::R51,
1230 SPU::R52, SPU::R53, SPU::R54, SPU::R55, SPU::R56, SPU::R57, SPU::R58,
1231 SPU::R59, SPU::R60, SPU::R61, SPU::R62, SPU::R63, SPU::R64, SPU::R65,
1232 SPU::R66, SPU::R67, SPU::R68, SPU::R69, SPU::R70, SPU::R71, SPU::R72,
1233 SPU::R73, SPU::R74, SPU::R75, SPU::R76, SPU::R77, SPU::R78, SPU::R79
1235 // size of ArgRegs array
1236 unsigned NumArgRegs = 77;
1238 // We will spill (79-3)+1 registers to the stack
1239 SmallVector<SDValue, 79-3+1> MemOps;
1241 // Create the frame slot
1242 for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
1243 FuncInfo->setVarArgsFrameIndex(
1244 MFI->CreateFixedObject(StackSlotSize, ArgOffset, true));
1245 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1246 unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::VECREGRegClass);
1247 SDValue ArgVal = DAG.getRegister(VReg, MVT::v16i8);
1248 SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, MachinePointerInfo(),
1250 Chain = Store.getOperand(0);
1251 MemOps.push_back(Store);
1253 // Increment address by stack slot size for the next stored argument
1254 ArgOffset += StackSlotSize;
1256 if (!MemOps.empty())
1257 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1258 &MemOps[0], MemOps.size());
1264 /// isLSAAddress - Return the immediate to use if the specified
1265 /// value is representable as a LSA address.
1266 static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
1267 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1270 int Addr = C->getZExtValue();
1271 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1272 (Addr << 14 >> 14) != Addr)
1273 return 0; // Top 14 bits have to be sext of immediate.
1275 return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode();
1279 SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1280 CallingConv::ID CallConv, bool isVarArg,
1282 const SmallVectorImpl<ISD::OutputArg> &Outs,
1283 const SmallVectorImpl<SDValue> &OutVals,
1284 const SmallVectorImpl<ISD::InputArg> &Ins,
1285 DebugLoc dl, SelectionDAG &DAG,
1286 SmallVectorImpl<SDValue> &InVals) const {
1287 // CellSPU target does not yet support tail call optimization.
1290 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
1291 unsigned NumOps = Outs.size();
1292 unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
1294 SmallVector<CCValAssign, 16> ArgLocs;
1295 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1296 getTargetMachine(), ArgLocs, *DAG.getContext());
1297 // FIXME: allow for other calling conventions
1298 CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
1300 const unsigned NumArgRegs = ArgLocs.size();
1303 // Handy pointer type
1304 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1306 // Set up a copy of the stack pointer for use loading and storing any
1307 // arguments that may not fit in the registers available for argument
1309 SDValue StackPtr = DAG.getRegister(SPU::R1, MVT::i32);
1311 // Figure out which arguments are going to go in registers, and which in
1313 unsigned ArgOffset = SPUFrameLowering::minStackSize(); // Just below [LR]
1314 unsigned ArgRegIdx = 0;
1316 // Keep track of registers passing arguments
1317 std::vector<std::pair<unsigned, SDValue> > RegsToPass;
1318 // And the arguments passed on the stack
1319 SmallVector<SDValue, 8> MemOpChains;
1321 for (; ArgRegIdx != NumOps; ++ArgRegIdx) {
1322 SDValue Arg = OutVals[ArgRegIdx];
1323 CCValAssign &VA = ArgLocs[ArgRegIdx];
1325 // PtrOff will be used to store the current argument to the stack if a
1326 // register cannot be found for it.
1327 SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1328 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
1330 switch (Arg.getValueType().getSimpleVT().SimpleTy) {
1331 default: llvm_unreachable("Unexpected ValueType for argument!");
1345 if (ArgRegIdx != NumArgRegs) {
1346 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1348 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
1349 MachinePointerInfo(),
1351 ArgOffset += StackSlotSize;
1357 // Accumulate how many bytes are to be pushed on the stack, including the
1358 // linkage area, and parameter passing area. According to the SPU ABI,
1359 // we minimally need space for [LR] and [SP].
1360 unsigned NumStackBytes = ArgOffset - SPUFrameLowering::minStackSize();
1362 // Insert a call sequence start
1363 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumStackBytes,
1366 if (!MemOpChains.empty()) {
1367 // Adjust the stack pointer for the stack arguments.
1368 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1369 &MemOpChains[0], MemOpChains.size());
1372 // Build a sequence of copy-to-reg nodes chained together with token chain
1373 // and flag operands which copy the outgoing args into the appropriate regs.
1375 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1376 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1377 RegsToPass[i].second, InFlag);
1378 InFlag = Chain.getValue(1);
1381 SmallVector<SDValue, 8> Ops;
1382 unsigned CallOpc = SPUISD::CALL;
1384 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1385 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1386 // node so that legalize doesn't hack it.
1387 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1388 const GlobalValue *GV = G->getGlobal();
1389 EVT CalleeVT = Callee.getValueType();
1390 SDValue Zero = DAG.getConstant(0, PtrVT);
1391 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, CalleeVT);
1393 if (!ST->usingLargeMem()) {
1394 // Turn calls to targets that are defined (i.e., have bodies) into BRSL
1395 // style calls, otherwise, external symbols are BRASL calls. This assumes
1396 // that declared/defined symbols are in the same compilation unit and can
1397 // be reached through PC-relative jumps.
1400 // This may be an unsafe assumption for JIT and really large compilation
1402 if (GV->isDeclaration()) {
1403 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, GA, Zero);
1405 Callee = DAG.getNode(SPUISD::PCRelAddr, dl, CalleeVT, GA, Zero);
1408 // "Large memory" mode: Turn all calls into indirect calls with a X-form
1410 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, GA, Zero);
1412 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1413 EVT CalleeVT = Callee.getValueType();
1414 SDValue Zero = DAG.getConstant(0, PtrVT);
1415 SDValue ExtSym = DAG.getTargetExternalSymbol(S->getSymbol(),
1416 Callee.getValueType());
1418 if (!ST->usingLargeMem()) {
1419 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, ExtSym, Zero);
1421 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, ExtSym, Zero);
1423 } else if (SDNode *Dest = isLSAAddress(Callee, DAG)) {
1424 // If this is an absolute destination address that appears to be a legal
1425 // local store address, use the munged value.
1426 Callee = SDValue(Dest, 0);
1429 Ops.push_back(Chain);
1430 Ops.push_back(Callee);
1432 // Add argument registers to the end of the list so that they are known live
1434 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1435 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1436 RegsToPass[i].second.getValueType()));
1438 if (InFlag.getNode())
1439 Ops.push_back(InFlag);
1440 // Returns a chain and a flag for retval copy to use.
1441 Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Glue),
1442 &Ops[0], Ops.size());
1443 InFlag = Chain.getValue(1);
1445 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumStackBytes, true),
1446 DAG.getIntPtrConstant(0, true), InFlag);
1448 InFlag = Chain.getValue(1);
1450 // If the function returns void, just return the chain.
1454 // Now handle the return value(s)
1455 SmallVector<CCValAssign, 16> RVLocs;
1456 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1457 getTargetMachine(), RVLocs, *DAG.getContext());
1458 CCRetInfo.AnalyzeCallResult(Ins, CCC_SPU);
1461 // If the call has results, copy the values out of the ret val registers.
1462 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1463 CCValAssign VA = RVLocs[i];
1465 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1467 Chain = Val.getValue(1);
1468 InFlag = Val.getValue(2);
1469 InVals.push_back(Val);
1476 SPUTargetLowering::LowerReturn(SDValue Chain,
1477 CallingConv::ID CallConv, bool isVarArg,
1478 const SmallVectorImpl<ISD::OutputArg> &Outs,
1479 const SmallVectorImpl<SDValue> &OutVals,
1480 DebugLoc dl, SelectionDAG &DAG) const {
1482 SmallVector<CCValAssign, 16> RVLocs;
1483 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1484 getTargetMachine(), RVLocs, *DAG.getContext());
1485 CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
1487 // If this is the first return lowered for this function, add the regs to the
1488 // liveout set for the function.
1489 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1490 for (unsigned i = 0; i != RVLocs.size(); ++i)
1491 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1496 // Copy the result values into the output registers.
1497 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1498 CCValAssign &VA = RVLocs[i];
1499 assert(VA.isRegLoc() && "Can only return in registers!");
1500 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1502 Flag = Chain.getValue(1);
1506 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1508 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain);
1512 //===----------------------------------------------------------------------===//
1513 // Vector related lowering:
1514 //===----------------------------------------------------------------------===//
1516 static ConstantSDNode *
1517 getVecImm(SDNode *N) {
1518 SDValue OpVal(0, 0);
1520 // Check to see if this buildvec has a single non-undef value in its elements.
1521 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1522 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
1523 if (OpVal.getNode() == 0)
1524 OpVal = N->getOperand(i);
1525 else if (OpVal != N->getOperand(i))
1529 if (OpVal.getNode() != 0) {
1530 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1538 /// get_vec_i18imm - Test if this vector is a vector filled with the same value
1539 /// and the value fits into an unsigned 18-bit constant, and if so, return the
1541 SDValue SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
1543 if (ConstantSDNode *CN = getVecImm(N)) {
1544 uint64_t Value = CN->getZExtValue();
1545 if (ValueType == MVT::i64) {
1546 uint64_t UValue = CN->getZExtValue();
1547 uint32_t upper = uint32_t(UValue >> 32);
1548 uint32_t lower = uint32_t(UValue);
1551 Value = Value >> 32;
1553 if (Value <= 0x3ffff)
1554 return DAG.getTargetConstant(Value, ValueType);
1560 /// get_vec_i16imm - Test if this vector is a vector filled with the same value
1561 /// and the value fits into a signed 16-bit constant, and if so, return the
1563 SDValue SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
1565 if (ConstantSDNode *CN = getVecImm(N)) {
1566 int64_t Value = CN->getSExtValue();
1567 if (ValueType == MVT::i64) {
1568 uint64_t UValue = CN->getZExtValue();
1569 uint32_t upper = uint32_t(UValue >> 32);
1570 uint32_t lower = uint32_t(UValue);
1573 Value = Value >> 32;
1575 if (Value >= -(1 << 15) && Value <= ((1 << 15) - 1)) {
1576 return DAG.getTargetConstant(Value, ValueType);
1583 /// get_vec_i10imm - Test if this vector is a vector filled with the same value
1584 /// and the value fits into a signed 10-bit constant, and if so, return the
1586 SDValue SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
1588 if (ConstantSDNode *CN = getVecImm(N)) {
1589 int64_t Value = CN->getSExtValue();
1590 if (ValueType == MVT::i64) {
1591 uint64_t UValue = CN->getZExtValue();
1592 uint32_t upper = uint32_t(UValue >> 32);
1593 uint32_t lower = uint32_t(UValue);
1596 Value = Value >> 32;
1598 if (isInt<10>(Value))
1599 return DAG.getTargetConstant(Value, ValueType);
1605 /// get_vec_i8imm - Test if this vector is a vector filled with the same value
1606 /// and the value fits into a signed 8-bit constant, and if so, return the
1609 /// @note: The incoming vector is v16i8 because that's the only way we can load
1610 /// constant vectors. Thus, we test to see if the upper and lower bytes are the
1612 SDValue SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
1614 if (ConstantSDNode *CN = getVecImm(N)) {
1615 int Value = (int) CN->getZExtValue();
1616 if (ValueType == MVT::i16
1617 && Value <= 0xffff /* truncated from uint64_t */
1618 && ((short) Value >> 8) == ((short) Value & 0xff))
1619 return DAG.getTargetConstant(Value & 0xff, ValueType);
1620 else if (ValueType == MVT::i8
1621 && (Value & 0xff) == Value)
1622 return DAG.getTargetConstant(Value, ValueType);
1628 /// get_ILHUvec_imm - Test if this vector is a vector filled with the same value
1629 /// and the value fits into a signed 16-bit constant, and if so, return the
1631 SDValue SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
1633 if (ConstantSDNode *CN = getVecImm(N)) {
1634 uint64_t Value = CN->getZExtValue();
1635 if ((ValueType == MVT::i32
1636 && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
1637 || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
1638 return DAG.getTargetConstant(Value >> 16, ValueType);
1644 /// get_v4i32_imm - Catch-all for general 32-bit constant vectors
1645 SDValue SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) {
1646 if (ConstantSDNode *CN = getVecImm(N)) {
1647 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i32);
1653 /// get_v4i32_imm - Catch-all for general 64-bit constant vectors
1654 SDValue SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) {
1655 if (ConstantSDNode *CN = getVecImm(N)) {
1656 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i64);
1662 //! Lower a BUILD_VECTOR instruction creatively:
1664 LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
1665 EVT VT = Op.getValueType();
1666 EVT EltVT = VT.getVectorElementType();
1667 DebugLoc dl = Op.getDebugLoc();
1668 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(Op.getNode());
1669 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerBUILD_VECTOR");
1670 unsigned minSplatBits = EltVT.getSizeInBits();
1672 if (minSplatBits < 16)
1675 APInt APSplatBits, APSplatUndef;
1676 unsigned SplatBitSize;
1679 if (!BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
1680 HasAnyUndefs, minSplatBits)
1681 || minSplatBits < SplatBitSize)
1682 return SDValue(); // Wasn't a constant vector or splat exceeded min
1684 uint64_t SplatBits = APSplatBits.getZExtValue();
1686 switch (VT.getSimpleVT().SimpleTy) {
1688 report_fatal_error("CellSPU: Unhandled VT in LowerBUILD_VECTOR, VT = " +
1689 Twine(VT.getEVTString()));
1692 uint32_t Value32 = uint32_t(SplatBits);
1693 assert(SplatBitSize == 32
1694 && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
1695 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1696 SDValue T = DAG.getConstant(Value32, MVT::i32);
1697 return DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,
1698 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
1702 uint64_t f64val = uint64_t(SplatBits);
1703 assert(SplatBitSize == 64
1704 && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
1705 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1706 SDValue T = DAG.getConstant(f64val, MVT::i64);
1707 return DAG.getNode(ISD::BITCAST, dl, MVT::v2f64,
1708 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
1712 // 8-bit constants have to be expanded to 16-bits
1713 unsigned short Value16 = SplatBits /* | (SplatBits << 8) */;
1714 SmallVector<SDValue, 8> Ops;
1716 Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
1717 return DAG.getNode(ISD::BITCAST, dl, VT,
1718 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
1721 unsigned short Value16 = SplatBits;
1722 SDValue T = DAG.getConstant(Value16, EltVT);
1723 SmallVector<SDValue, 8> Ops;
1726 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
1729 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1730 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T);
1733 return SPU::LowerV2I64Splat(VT, DAG, SplatBits, dl);
1743 SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
1745 uint32_t upper = uint32_t(SplatVal >> 32);
1746 uint32_t lower = uint32_t(SplatVal);
1748 if (upper == lower) {
1749 // Magic constant that can be matched by IL, ILA, et. al.
1750 SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
1751 return DAG.getNode(ISD::BITCAST, dl, OpVT,
1752 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1753 Val, Val, Val, Val));
1755 bool upper_special, lower_special;
1757 // NOTE: This code creates common-case shuffle masks that can be easily
1758 // detected as common expressions. It is not attempting to create highly
1759 // specialized masks to replace any and all 0's, 0xff's and 0x80's.
1761 // Detect if the upper or lower half is a special shuffle mask pattern:
1762 upper_special = (upper == 0 || upper == 0xffffffff || upper == 0x80000000);
1763 lower_special = (lower == 0 || lower == 0xffffffff || lower == 0x80000000);
1765 // Both upper and lower are special, lower to a constant pool load:
1766 if (lower_special && upper_special) {
1767 SDValue UpperVal = DAG.getConstant(upper, MVT::i32);
1768 SDValue LowerVal = DAG.getConstant(lower, MVT::i32);
1769 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1770 UpperVal, LowerVal, UpperVal, LowerVal);
1771 return DAG.getNode(ISD::BITCAST, dl, OpVT, BV);
1776 SmallVector<SDValue, 16> ShufBytes;
1779 // Create lower vector if not a special pattern
1780 if (!lower_special) {
1781 SDValue LO32C = DAG.getConstant(lower, MVT::i32);
1782 LO32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
1783 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1784 LO32C, LO32C, LO32C, LO32C));
1787 // Create upper vector if not a special pattern
1788 if (!upper_special) {
1789 SDValue HI32C = DAG.getConstant(upper, MVT::i32);
1790 HI32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
1791 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1792 HI32C, HI32C, HI32C, HI32C));
1795 // If either upper or lower are special, then the two input operands are
1796 // the same (basically, one of them is a "don't care")
1802 for (int i = 0; i < 4; ++i) {
1804 for (int j = 0; j < 4; ++j) {
1806 bool process_upper, process_lower;
1808 process_upper = (upper_special && (i & 1) == 0);
1809 process_lower = (lower_special && (i & 1) == 1);
1811 if (process_upper || process_lower) {
1812 if ((process_upper && upper == 0)
1813 || (process_lower && lower == 0))
1815 else if ((process_upper && upper == 0xffffffff)
1816 || (process_lower && lower == 0xffffffff))
1818 else if ((process_upper && upper == 0x80000000)
1819 || (process_lower && lower == 0x80000000))
1820 val |= (j == 0 ? 0xe0 : 0x80);
1822 val |= i * 4 + j + ((i & 1) * 16);
1825 ShufBytes.push_back(DAG.getConstant(val, MVT::i32));
1828 return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32,
1829 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1830 &ShufBytes[0], ShufBytes.size()));
1834 /// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3) to something on
1835 /// which the Cell can operate. The code inspects V3 to ascertain whether the
1836 /// permutation vector, V3, is monotonically increasing with one "exception"
1837 /// element, e.g., (0, 1, _, 3). If this is the case, then generate a
1838 /// SHUFFLE_MASK synthetic instruction. Otherwise, spill V3 to the constant pool.
1839 /// In either case, the net result is going to eventually invoke SHUFB to
1840 /// permute/shuffle the bytes from V1 and V2.
1842 /// SHUFFLE_MASK is eventually selected as one of the C*D instructions, generate
1843 /// control word for byte/halfword/word insertion. This takes care of a single
1844 /// element move from V2 into V1.
1846 /// SPUISD::SHUFB is eventually selected as Cell's <i>shufb</i> instructions.
1847 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
1848 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
1849 SDValue V1 = Op.getOperand(0);
1850 SDValue V2 = Op.getOperand(1);
1851 DebugLoc dl = Op.getDebugLoc();
1853 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1855 // If we have a single element being moved from V1 to V2, this can be handled
1856 // using the C*[DX] compute mask instructions, but the vector elements have
1857 // to be monotonically increasing with one exception element, and the source
1858 // slot of the element to move must be the same as the destination.
1859 EVT VecVT = V1.getValueType();
1860 EVT EltVT = VecVT.getVectorElementType();
1861 unsigned EltsFromV2 = 0;
1862 unsigned V2EltOffset = 0;
1863 unsigned V2EltIdx0 = 0;
1864 unsigned CurrElt = 0;
1865 unsigned MaxElts = VecVT.getVectorNumElements();
1866 unsigned PrevElt = 0;
1867 bool monotonic = true;
1870 EVT maskVT; // which of the c?d instructions to use
1872 if (EltVT == MVT::i8) {
1874 maskVT = MVT::v16i8;
1875 } else if (EltVT == MVT::i16) {
1877 maskVT = MVT::v8i16;
1878 } else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
1880 maskVT = MVT::v4i32;
1881 } else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
1883 maskVT = MVT::v2i64;
1885 llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
1887 for (unsigned i = 0; i != MaxElts; ++i) {
1888 if (SVN->getMaskElt(i) < 0)
1891 unsigned SrcElt = SVN->getMaskElt(i);
1894 if (SrcElt >= V2EltIdx0) {
1895 // TODO: optimize for the monotonic case when several consecutive
1896 // elements are taken form V2. Do we ever get such a case?
1897 if (EltsFromV2 == 0 && CurrElt == (SrcElt - V2EltIdx0))
1898 V2EltOffset = (SrcElt - V2EltIdx0) * (EltVT.getSizeInBits()/8);
1902 } else if (CurrElt != SrcElt) {
1910 if (PrevElt > 0 && SrcElt < MaxElts) {
1911 if ((PrevElt == SrcElt - 1)
1912 || (PrevElt == MaxElts - 1 && SrcElt == 0)) {
1917 } else if (i == 0 || (PrevElt==0 && SrcElt==1)) {
1918 // First time or after a "wrap around"
1922 // This isn't a rotation, takes elements from vector 2
1928 if (EltsFromV2 == 1 && monotonic) {
1929 // Compute mask and shuffle
1930 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1932 // As SHUFFLE_MASK becomes a c?d instruction, feed it an address
1933 // R1 ($sp) is used here only as it is guaranteed to have last bits zero
1934 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
1935 DAG.getRegister(SPU::R1, PtrVT),
1936 DAG.getConstant(V2EltOffset, MVT::i32));
1937 SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
1940 // Use shuffle mask in SHUFB synthetic instruction:
1941 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
1943 } else if (rotate) {
1946 rotamt *= EltVT.getSizeInBits()/8;
1947 return DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, V1.getValueType(),
1948 V1, DAG.getConstant(rotamt, MVT::i16));
1950 // Convert the SHUFFLE_VECTOR mask's input element units to the
1952 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
1954 SmallVector<SDValue, 16> ResultMask;
1955 for (unsigned i = 0, e = MaxElts; i != e; ++i) {
1956 unsigned SrcElt = SVN->getMaskElt(i) < 0 ? 0 : SVN->getMaskElt(i);
1958 for (unsigned j = 0; j < BytesPerElement; ++j)
1959 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,MVT::i8));
1961 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
1962 &ResultMask[0], ResultMask.size());
1963 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask);
1967 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
1968 SDValue Op0 = Op.getOperand(0); // Op0 = the scalar
1969 DebugLoc dl = Op.getDebugLoc();
1971 if (Op0.getNode()->getOpcode() == ISD::Constant) {
1972 // For a constant, build the appropriate constant vector, which will
1973 // eventually simplify to a vector register load.
1975 ConstantSDNode *CN = cast<ConstantSDNode>(Op0.getNode());
1976 SmallVector<SDValue, 16> ConstVecValues;
1980 // Create a constant vector:
1981 switch (Op.getValueType().getSimpleVT().SimpleTy) {
1982 default: llvm_unreachable("Unexpected constant value type in "
1983 "LowerSCALAR_TO_VECTOR");
1984 case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
1985 case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
1986 case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
1987 case MVT::v4f32: n_copies = 4; VT = MVT::f32; break;
1988 case MVT::v2i64: n_copies = 2; VT = MVT::i64; break;
1989 case MVT::v2f64: n_copies = 2; VT = MVT::f64; break;
1992 SDValue CValue = DAG.getConstant(CN->getZExtValue(), VT);
1993 for (size_t j = 0; j < n_copies; ++j)
1994 ConstVecValues.push_back(CValue);
1996 return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(),
1997 &ConstVecValues[0], ConstVecValues.size());
1999 // Otherwise, copy the value from one register to another:
2000 switch (Op0.getValueType().getSimpleVT().SimpleTy) {
2001 default: llvm_unreachable("Unexpected value type in LowerSCALAR_TO_VECTOR");
2008 return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0);
2015 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2016 EVT VT = Op.getValueType();
2017 SDValue N = Op.getOperand(0);
2018 SDValue Elt = Op.getOperand(1);
2019 DebugLoc dl = Op.getDebugLoc();
2022 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
2023 // Constant argument:
2024 int EltNo = (int) C->getZExtValue();
2027 if (VT == MVT::i8 && EltNo >= 16)
2028 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i8 extraction slot > 15");
2029 else if (VT == MVT::i16 && EltNo >= 8)
2030 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i16 extraction slot > 7");
2031 else if (VT == MVT::i32 && EltNo >= 4)
2032 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i32 extraction slot > 4");
2033 else if (VT == MVT::i64 && EltNo >= 2)
2034 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i64 extraction slot > 2");
2036 if (EltNo == 0 && (VT == MVT::i32 || VT == MVT::i64)) {
2037 // i32 and i64: Element 0 is the preferred slot
2038 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, N);
2041 // Need to generate shuffle mask and extract:
2042 int prefslot_begin = -1, prefslot_end = -1;
2043 int elt_byte = EltNo * VT.getSizeInBits() / 8;
2045 switch (VT.getSimpleVT().SimpleTy) {
2047 assert(false && "Invalid value type!");
2049 prefslot_begin = prefslot_end = 3;
2053 prefslot_begin = 2; prefslot_end = 3;
2058 prefslot_begin = 0; prefslot_end = 3;
2063 prefslot_begin = 0; prefslot_end = 7;
2068 assert(prefslot_begin != -1 && prefslot_end != -1 &&
2069 "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
2071 unsigned int ShufBytes[16] = {
2072 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2074 for (int i = 0; i < 16; ++i) {
2075 // zero fill uppper part of preferred slot, don't care about the
2077 unsigned int mask_val;
2078 if (i <= prefslot_end) {
2080 ((i < prefslot_begin)
2082 : elt_byte + (i - prefslot_begin));
2084 ShufBytes[i] = mask_val;
2086 ShufBytes[i] = ShufBytes[i % (prefslot_end + 1)];
2089 SDValue ShufMask[4];
2090 for (unsigned i = 0; i < sizeof(ShufMask)/sizeof(ShufMask[0]); ++i) {
2091 unsigned bidx = i * 4;
2092 unsigned int bits = ((ShufBytes[bidx] << 24) |
2093 (ShufBytes[bidx+1] << 16) |
2094 (ShufBytes[bidx+2] << 8) |
2096 ShufMask[i] = DAG.getConstant(bits, MVT::i32);
2099 SDValue ShufMaskVec =
2100 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2101 &ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0]));
2103 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2104 DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(),
2105 N, N, ShufMaskVec));
2107 // Variable index: Rotate the requested element into slot 0, then replicate
2108 // slot 0 across the vector
2109 EVT VecVT = N.getValueType();
2110 if (!VecVT.isSimple() || !VecVT.isVector()) {
2111 report_fatal_error("LowerEXTRACT_VECTOR_ELT: Must have a simple, 128-bit"
2115 // Make life easier by making sure the index is zero-extended to i32
2116 if (Elt.getValueType() != MVT::i32)
2117 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Elt);
2119 // Scale the index to a bit/byte shift quantity
2121 APInt(32, uint64_t(16 / N.getValueType().getVectorNumElements()), false);
2122 unsigned scaleShift = scaleFactor.logBase2();
2125 if (scaleShift > 0) {
2126 // Scale the shift factor:
2127 Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
2128 DAG.getConstant(scaleShift, MVT::i32));
2131 vecShift = DAG.getNode(SPUISD::SHL_BYTES, dl, VecVT, N, Elt);
2133 // Replicate the bytes starting at byte 0 across the entire vector (for
2134 // consistency with the notion of a unified register set)
2137 switch (VT.getSimpleVT().SimpleTy) {
2139 report_fatal_error("LowerEXTRACT_VECTOR_ELT(varable): Unhandled vector"
2143 SDValue factor = DAG.getConstant(0x00000000, MVT::i32);
2144 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2145 factor, factor, factor, factor);
2149 SDValue factor = DAG.getConstant(0x00010001, MVT::i32);
2150 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2151 factor, factor, factor, factor);
2156 SDValue factor = DAG.getConstant(0x00010203, MVT::i32);
2157 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2158 factor, factor, factor, factor);
2163 SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32);
2164 SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32);
2165 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2166 loFactor, hiFactor, loFactor, hiFactor);
2171 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2172 DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2173 vecShift, vecShift, replicate));
2179 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2180 SDValue VecOp = Op.getOperand(0);
2181 SDValue ValOp = Op.getOperand(1);
2182 SDValue IdxOp = Op.getOperand(2);
2183 DebugLoc dl = Op.getDebugLoc();
2184 EVT VT = Op.getValueType();
2185 EVT eltVT = ValOp.getValueType();
2187 // use 0 when the lane to insert to is 'undef'
2189 if (IdxOp.getOpcode() != ISD::UNDEF) {
2190 ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
2191 assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
2192 Offset = (CN->getSExtValue()) * eltVT.getSizeInBits()/8;
2195 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2196 // Use $sp ($1) because it's always 16-byte aligned and it's available:
2197 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
2198 DAG.getRegister(SPU::R1, PtrVT),
2199 DAG.getConstant(Offset, PtrVT));
2200 // widen the mask when dealing with half vectors
2201 EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
2202 128/ VT.getVectorElementType().getSizeInBits());
2203 SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, maskVT, Pointer);
2206 DAG.getNode(SPUISD::SHUFB, dl, VT,
2207 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
2209 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ShufMask));
2214 static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
2215 const TargetLowering &TLI)
2217 SDValue N0 = Op.getOperand(0); // Everything has at least one operand
2218 DebugLoc dl = Op.getDebugLoc();
2219 EVT ShiftVT = TLI.getShiftAmountTy(N0.getValueType());
2221 assert(Op.getValueType() == MVT::i8);
2224 llvm_unreachable("Unhandled i8 math operator");
2228 // 8-bit addition: Promote the arguments up to 16-bits and truncate
2230 SDValue N1 = Op.getOperand(1);
2231 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2232 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2233 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2234 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2239 // 8-bit subtraction: Promote the arguments up to 16-bits and truncate
2241 SDValue N1 = Op.getOperand(1);
2242 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2243 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2244 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2245 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2249 SDValue N1 = Op.getOperand(1);
2250 EVT N1VT = N1.getValueType();
2252 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2253 if (!N1VT.bitsEq(ShiftVT)) {
2254 unsigned N1Opc = N1.getValueType().bitsLT(ShiftVT)
2257 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2260 // Replicate lower 8-bits into upper 8:
2262 DAG.getNode(ISD::OR, dl, MVT::i16, N0,
2263 DAG.getNode(ISD::SHL, dl, MVT::i16,
2264 N0, DAG.getConstant(8, MVT::i32)));
2266 // Truncate back down to i8
2267 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2268 DAG.getNode(Opc, dl, MVT::i16, ExpandArg, N1));
2272 SDValue N1 = Op.getOperand(1);
2273 EVT N1VT = N1.getValueType();
2275 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2276 if (!N1VT.bitsEq(ShiftVT)) {
2277 unsigned N1Opc = ISD::ZERO_EXTEND;
2279 if (N1.getValueType().bitsGT(ShiftVT))
2280 N1Opc = ISD::TRUNCATE;
2282 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2285 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2286 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2289 SDValue N1 = Op.getOperand(1);
2290 EVT N1VT = N1.getValueType();
2292 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2293 if (!N1VT.bitsEq(ShiftVT)) {
2294 unsigned N1Opc = ISD::SIGN_EXTEND;
2296 if (N1VT.bitsGT(ShiftVT))
2297 N1Opc = ISD::TRUNCATE;
2298 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2301 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2302 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2305 SDValue N1 = Op.getOperand(1);
2307 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2308 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2309 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2310 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2318 //! Lower byte immediate operations for v16i8 vectors:
2320 LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
2323 EVT VT = Op.getValueType();
2324 DebugLoc dl = Op.getDebugLoc();
2326 ConstVec = Op.getOperand(0);
2327 Arg = Op.getOperand(1);
2328 if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
2329 if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
2330 ConstVec = ConstVec.getOperand(0);
2332 ConstVec = Op.getOperand(1);
2333 Arg = Op.getOperand(0);
2334 if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
2335 ConstVec = ConstVec.getOperand(0);
2340 if (ConstVec.getNode()->getOpcode() == ISD::BUILD_VECTOR) {
2341 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(ConstVec.getNode());
2342 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerByteImmed");
2344 APInt APSplatBits, APSplatUndef;
2345 unsigned SplatBitSize;
2347 unsigned minSplatBits = VT.getVectorElementType().getSizeInBits();
2349 if (BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
2350 HasAnyUndefs, minSplatBits)
2351 && minSplatBits <= SplatBitSize) {
2352 uint64_t SplatBits = APSplatBits.getZExtValue();
2353 SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
2355 SmallVector<SDValue, 16> tcVec;
2356 tcVec.assign(16, tc);
2357 return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg,
2358 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &tcVec[0], tcVec.size()));
2362 // These operations (AND, OR, XOR) are legal, they just couldn't be custom
2363 // lowered. Return the operation, rather than a null SDValue.
2367 //! Custom lowering for CTPOP (count population)
2369 Custom lowering code that counts the number ones in the input
2370 operand. SPU has such an instruction, but it counts the number of
2371 ones per byte, which then have to be accumulated.
2373 static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
2374 EVT VT = Op.getValueType();
2375 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
2376 VT, (128 / VT.getSizeInBits()));
2377 DebugLoc dl = Op.getDebugLoc();
2379 switch (VT.getSimpleVT().SimpleTy) {
2381 assert(false && "Invalid value type!");
2383 SDValue N = Op.getOperand(0);
2384 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2386 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2387 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2389 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, CNTB, Elt0);
2393 MachineFunction &MF = DAG.getMachineFunction();
2394 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2396 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R16CRegClass);
2398 SDValue N = Op.getOperand(0);
2399 SDValue Elt0 = DAG.getConstant(0, MVT::i16);
2400 SDValue Mask0 = DAG.getConstant(0x0f, MVT::i16);
2401 SDValue Shift1 = DAG.getConstant(8, MVT::i32);
2403 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2404 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2406 // CNTB_result becomes the chain to which all of the virtual registers
2407 // CNTB_reg, SUM1_reg become associated:
2408 SDValue CNTB_result =
2409 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, CNTB, Elt0);
2411 SDValue CNTB_rescopy =
2412 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2414 SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i16);
2416 return DAG.getNode(ISD::AND, dl, MVT::i16,
2417 DAG.getNode(ISD::ADD, dl, MVT::i16,
2418 DAG.getNode(ISD::SRL, dl, MVT::i16,
2425 MachineFunction &MF = DAG.getMachineFunction();
2426 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2428 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2429 unsigned SUM1_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2431 SDValue N = Op.getOperand(0);
2432 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2433 SDValue Mask0 = DAG.getConstant(0xff, MVT::i32);
2434 SDValue Shift1 = DAG.getConstant(16, MVT::i32);
2435 SDValue Shift2 = DAG.getConstant(8, MVT::i32);
2437 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2438 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2440 // CNTB_result becomes the chain to which all of the virtual registers
2441 // CNTB_reg, SUM1_reg become associated:
2442 SDValue CNTB_result =
2443 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, CNTB, Elt0);
2445 SDValue CNTB_rescopy =
2446 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2449 DAG.getNode(ISD::SRL, dl, MVT::i32,
2450 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32),
2454 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp1,
2455 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32));
2457 SDValue Sum1_rescopy =
2458 DAG.getCopyToReg(CNTB_result, dl, SUM1_reg, Sum1);
2461 DAG.getNode(ISD::SRL, dl, MVT::i32,
2462 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32),
2465 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp2,
2466 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32));
2468 return DAG.getNode(ISD::AND, dl, MVT::i32, Sum2, Mask0);
2478 //! Lower ISD::FP_TO_SINT, ISD::FP_TO_UINT for i32
2480 f32->i32 passes through unchanged, whereas f64->i32 expands to a libcall.
2481 All conversions to i64 are expanded to a libcall.
2483 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2484 const SPUTargetLowering &TLI) {
2485 EVT OpVT = Op.getValueType();
2486 SDValue Op0 = Op.getOperand(0);
2487 EVT Op0VT = Op0.getValueType();
2489 if ((OpVT == MVT::i32 && Op0VT == MVT::f64)
2490 || OpVT == MVT::i64) {
2491 // Convert f32 / f64 to i32 / i64 via libcall.
2493 (Op.getOpcode() == ISD::FP_TO_SINT)
2494 ? RTLIB::getFPTOSINT(Op0VT, OpVT)
2495 : RTLIB::getFPTOUINT(Op0VT, OpVT);
2496 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd fp-to-int conversion!");
2498 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2504 //! Lower ISD::SINT_TO_FP, ISD::UINT_TO_FP for i32
2506 i32->f32 passes through unchanged, whereas i32->f64 is expanded to a libcall.
2507 All conversions from i64 are expanded to a libcall.
2509 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2510 const SPUTargetLowering &TLI) {
2511 EVT OpVT = Op.getValueType();
2512 SDValue Op0 = Op.getOperand(0);
2513 EVT Op0VT = Op0.getValueType();
2515 if ((OpVT == MVT::f64 && Op0VT == MVT::i32)
2516 || Op0VT == MVT::i64) {
2517 // Convert i32, i64 to f64 via libcall:
2519 (Op.getOpcode() == ISD::SINT_TO_FP)
2520 ? RTLIB::getSINTTOFP(Op0VT, OpVT)
2521 : RTLIB::getUINTTOFP(Op0VT, OpVT);
2522 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd int-to-fp conversion!");
2524 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2530 //! Lower ISD::SETCC
2532 This handles MVT::f64 (double floating point) condition lowering
2534 static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
2535 const TargetLowering &TLI) {
2536 CondCodeSDNode *CC = dyn_cast<CondCodeSDNode>(Op.getOperand(2));
2537 DebugLoc dl = Op.getDebugLoc();
2538 assert(CC != 0 && "LowerSETCC: CondCodeSDNode should not be null here!\n");
2540 SDValue lhs = Op.getOperand(0);
2541 SDValue rhs = Op.getOperand(1);
2542 EVT lhsVT = lhs.getValueType();
2543 assert(lhsVT == MVT::f64 && "LowerSETCC: type other than MVT::64\n");
2545 EVT ccResultVT = TLI.getSetCCResultType(lhs.getValueType());
2546 APInt ccResultOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2547 EVT IntVT(MVT::i64);
2549 // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
2550 // selected to a NOP:
2551 SDValue i64lhs = DAG.getNode(ISD::BITCAST, dl, IntVT, lhs);
2553 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2554 DAG.getNode(ISD::SRL, dl, IntVT,
2555 i64lhs, DAG.getConstant(32, MVT::i32)));
2556 SDValue lhsHi32abs =
2557 DAG.getNode(ISD::AND, dl, MVT::i32,
2558 lhsHi32, DAG.getConstant(0x7fffffff, MVT::i32));
2560 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, i64lhs);
2562 // SETO and SETUO only use the lhs operand:
2563 if (CC->get() == ISD::SETO) {
2564 // Evaluates to true if Op0 is not [SQ]NaN - lowers to the inverse of
2566 APInt ccResultAllOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2567 return DAG.getNode(ISD::XOR, dl, ccResultVT,
2568 DAG.getSetCC(dl, ccResultVT,
2569 lhs, DAG.getConstantFP(0.0, lhsVT),
2571 DAG.getConstant(ccResultAllOnes, ccResultVT));
2572 } else if (CC->get() == ISD::SETUO) {
2573 // Evaluates to true if Op0 is [SQ]NaN
2574 return DAG.getNode(ISD::AND, dl, ccResultVT,
2575 DAG.getSetCC(dl, ccResultVT,
2577 DAG.getConstant(0x7ff00000, MVT::i32),
2579 DAG.getSetCC(dl, ccResultVT,
2581 DAG.getConstant(0, MVT::i32),
2585 SDValue i64rhs = DAG.getNode(ISD::BITCAST, dl, IntVT, rhs);
2587 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2588 DAG.getNode(ISD::SRL, dl, IntVT,
2589 i64rhs, DAG.getConstant(32, MVT::i32)));
2591 // If a value is negative, subtract from the sign magnitude constant:
2592 SDValue signMag2TC = DAG.getConstant(0x8000000000000000ULL, IntVT);
2594 // Convert the sign-magnitude representation into 2's complement:
2595 SDValue lhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2596 lhsHi32, DAG.getConstant(31, MVT::i32));
2597 SDValue lhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64lhs);
2599 DAG.getNode(ISD::SELECT, dl, IntVT,
2600 lhsSelectMask, lhsSignMag2TC, i64lhs);
2602 SDValue rhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2603 rhsHi32, DAG.getConstant(31, MVT::i32));
2604 SDValue rhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64rhs);
2606 DAG.getNode(ISD::SELECT, dl, IntVT,
2607 rhsSelectMask, rhsSignMag2TC, i64rhs);
2611 switch (CC->get()) {
2614 compareOp = ISD::SETEQ; break;
2617 compareOp = ISD::SETGT; break;
2620 compareOp = ISD::SETGE; break;
2623 compareOp = ISD::SETLT; break;
2626 compareOp = ISD::SETLE; break;
2629 compareOp = ISD::SETNE; break;
2631 report_fatal_error("CellSPU ISel Select: unimplemented f64 condition");
2635 DAG.getSetCC(dl, ccResultVT, lhsSelect, rhsSelect,
2636 (ISD::CondCode) compareOp);
2638 if ((CC->get() & 0x8) == 0) {
2639 // Ordered comparison:
2640 SDValue lhsNaN = DAG.getSetCC(dl, ccResultVT,
2641 lhs, DAG.getConstantFP(0.0, MVT::f64),
2643 SDValue rhsNaN = DAG.getSetCC(dl, ccResultVT,
2644 rhs, DAG.getConstantFP(0.0, MVT::f64),
2646 SDValue ordered = DAG.getNode(ISD::AND, dl, ccResultVT, lhsNaN, rhsNaN);
2648 result = DAG.getNode(ISD::AND, dl, ccResultVT, ordered, result);
2654 //! Lower ISD::SELECT_CC
2656 ISD::SELECT_CC can (generally) be implemented directly on the SPU using the
2659 \note Need to revisit this in the future: if the code path through the true
2660 and false value computations is longer than the latency of a branch (6
2661 cycles), then it would be more advantageous to branch and insert a new basic
2662 block and branch on the condition. However, this code does not make that
2663 assumption, given the simplisitc uses so far.
2666 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2667 const TargetLowering &TLI) {
2668 EVT VT = Op.getValueType();
2669 SDValue lhs = Op.getOperand(0);
2670 SDValue rhs = Op.getOperand(1);
2671 SDValue trueval = Op.getOperand(2);
2672 SDValue falseval = Op.getOperand(3);
2673 SDValue condition = Op.getOperand(4);
2674 DebugLoc dl = Op.getDebugLoc();
2676 // NOTE: SELB's arguments: $rA, $rB, $mask
2678 // SELB selects bits from $rA where bits in $mask are 0, bits from $rB
2679 // where bits in $mask are 1. CCond will be inverted, having 1s where the
2680 // condition was true and 0s where the condition was false. Hence, the
2681 // arguments to SELB get reversed.
2683 // Note: Really should be ISD::SELECT instead of SPUISD::SELB, but LLVM's
2684 // legalizer insists on combining SETCC/SELECT into SELECT_CC, so we end up
2685 // with another "cannot select select_cc" assert:
2687 SDValue compare = DAG.getNode(ISD::SETCC, dl,
2688 TLI.getSetCCResultType(Op.getValueType()),
2689 lhs, rhs, condition);
2690 return DAG.getNode(SPUISD::SELB, dl, VT, falseval, trueval, compare);
2693 //! Custom lower ISD::TRUNCATE
2694 static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
2696 // Type to truncate to
2697 EVT VT = Op.getValueType();
2698 MVT simpleVT = VT.getSimpleVT();
2699 EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
2700 VT, (128 / VT.getSizeInBits()));
2701 DebugLoc dl = Op.getDebugLoc();
2703 // Type to truncate from
2704 SDValue Op0 = Op.getOperand(0);
2705 EVT Op0VT = Op0.getValueType();
2707 if (Op0VT == MVT::i128 && simpleVT == MVT::i64) {
2708 // Create shuffle mask, least significant doubleword of quadword
2709 unsigned maskHigh = 0x08090a0b;
2710 unsigned maskLow = 0x0c0d0e0f;
2711 // Use a shuffle to perform the truncation
2712 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2713 DAG.getConstant(maskHigh, MVT::i32),
2714 DAG.getConstant(maskLow, MVT::i32),
2715 DAG.getConstant(maskHigh, MVT::i32),
2716 DAG.getConstant(maskLow, MVT::i32));
2718 SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2719 Op0, Op0, shufMask);
2721 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, truncShuffle);
2724 return SDValue(); // Leave the truncate unmolested
2728 * Emit the instruction sequence for i64/i32 -> i128 sign extend. The basic
2729 * algorithm is to duplicate the sign bit using rotmai to generate at
2730 * least one byte full of sign bits. Then propagate the "sign-byte" into
2731 * the leftmost words and the i64/i32 into the rightmost words using shufb.
2733 * @param Op The sext operand
2734 * @param DAG The current DAG
2735 * @return The SDValue with the entire instruction sequence
2737 static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
2739 DebugLoc dl = Op.getDebugLoc();
2741 // Type to extend to
2742 MVT OpVT = Op.getValueType().getSimpleVT();
2744 // Type to extend from
2745 SDValue Op0 = Op.getOperand(0);
2746 MVT Op0VT = Op0.getValueType().getSimpleVT();
2748 // extend i8 & i16 via i32
2749 if (Op0VT == MVT::i8 || Op0VT == MVT::i16) {
2750 Op0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Op0);
2754 // The type to extend to needs to be a i128 and
2755 // the type to extend from needs to be i64 or i32.
2756 assert((OpVT == MVT::i128 && (Op0VT == MVT::i64 || Op0VT == MVT::i32)) &&
2757 "LowerSIGN_EXTEND: input and/or output operand have wrong size");
2760 // Create shuffle mask
2761 unsigned mask1 = 0x10101010; // byte 0 - 3 and 4 - 7
2762 unsigned mask2 = Op0VT == MVT::i64 ? 0x00010203 : 0x10101010; // byte 8 - 11
2763 unsigned mask3 = Op0VT == MVT::i64 ? 0x04050607 : 0x00010203; // byte 12 - 15
2764 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2765 DAG.getConstant(mask1, MVT::i32),
2766 DAG.getConstant(mask1, MVT::i32),
2767 DAG.getConstant(mask2, MVT::i32),
2768 DAG.getConstant(mask3, MVT::i32));
2770 // Word wise arithmetic right shift to generate at least one byte
2771 // that contains sign bits.
2772 MVT mvt = Op0VT == MVT::i64 ? MVT::v2i64 : MVT::v4i32;
2773 SDValue sraVal = DAG.getNode(ISD::SRA,
2776 DAG.getNode(SPUISD::PREFSLOT2VEC, dl, mvt, Op0, Op0),
2777 DAG.getConstant(31, MVT::i32));
2779 // reinterpret as a i128 (SHUFB requires it). This gets lowered away.
2780 SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
2782 DAG.getTargetConstant(
2783 SPU::GPRCRegClass.getID(),
2785 // Shuffle bytes - Copy the sign bits into the upper 64 bits
2786 // and the input value into the lower 64 bits.
2787 SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
2788 extended, sraVal, shufMask);
2789 return DAG.getNode(ISD::BITCAST, dl, MVT::i128, extShuffle);
2792 //! Custom (target-specific) lowering entry point
2794 This is where LLVM's DAG selection process calls to do target-specific
2798 SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
2800 unsigned Opc = (unsigned) Op.getOpcode();
2801 EVT VT = Op.getValueType();
2806 errs() << "SPUTargetLowering::LowerOperation(): need to lower this!\n";
2807 errs() << "Op.getOpcode() = " << Opc << "\n";
2808 errs() << "*Op.getNode():\n";
2809 Op.getNode()->dump();
2811 llvm_unreachable(0);
2817 return LowerLOAD(Op, DAG, SPUTM.getSubtargetImpl());
2819 return LowerSTORE(Op, DAG, SPUTM.getSubtargetImpl());
2820 case ISD::ConstantPool:
2821 return LowerConstantPool(Op, DAG, SPUTM.getSubtargetImpl());
2822 case ISD::GlobalAddress:
2823 return LowerGlobalAddress(Op, DAG, SPUTM.getSubtargetImpl());
2824 case ISD::JumpTable:
2825 return LowerJumpTable(Op, DAG, SPUTM.getSubtargetImpl());
2826 case ISD::ConstantFP:
2827 return LowerConstantFP(Op, DAG);
2829 // i8, i64 math ops:
2838 return LowerI8Math(Op, DAG, Opc, *this);
2842 case ISD::FP_TO_SINT:
2843 case ISD::FP_TO_UINT:
2844 return LowerFP_TO_INT(Op, DAG, *this);
2846 case ISD::SINT_TO_FP:
2847 case ISD::UINT_TO_FP:
2848 return LowerINT_TO_FP(Op, DAG, *this);
2850 // Vector-related lowering.
2851 case ISD::BUILD_VECTOR:
2852 return LowerBUILD_VECTOR(Op, DAG);
2853 case ISD::SCALAR_TO_VECTOR:
2854 return LowerSCALAR_TO_VECTOR(Op, DAG);
2855 case ISD::VECTOR_SHUFFLE:
2856 return LowerVECTOR_SHUFFLE(Op, DAG);
2857 case ISD::EXTRACT_VECTOR_ELT:
2858 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2859 case ISD::INSERT_VECTOR_ELT:
2860 return LowerINSERT_VECTOR_ELT(Op, DAG);
2862 // Look for ANDBI, ORBI and XORBI opportunities and lower appropriately:
2866 return LowerByteImmed(Op, DAG);
2868 // Vector and i8 multiply:
2871 return LowerI8Math(Op, DAG, Opc, *this);
2874 return LowerCTPOP(Op, DAG);
2876 case ISD::SELECT_CC:
2877 return LowerSELECT_CC(Op, DAG, *this);
2880 return LowerSETCC(Op, DAG, *this);
2883 return LowerTRUNCATE(Op, DAG);
2885 case ISD::SIGN_EXTEND:
2886 return LowerSIGN_EXTEND(Op, DAG);
2892 void SPUTargetLowering::ReplaceNodeResults(SDNode *N,
2893 SmallVectorImpl<SDValue>&Results,
2894 SelectionDAG &DAG) const
2897 unsigned Opc = (unsigned) N->getOpcode();
2898 EVT OpVT = N->getValueType(0);
2902 errs() << "SPUTargetLowering::ReplaceNodeResults(): need to fix this!\n";
2903 errs() << "Op.getOpcode() = " << Opc << "\n";
2904 errs() << "*Op.getNode():\n";
2912 /* Otherwise, return unchanged */
2915 //===----------------------------------------------------------------------===//
2916 // Target Optimization Hooks
2917 //===----------------------------------------------------------------------===//
2920 SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
2923 TargetMachine &TM = getTargetMachine();
2925 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
2926 SelectionDAG &DAG = DCI.DAG;
2927 SDValue Op0 = N->getOperand(0); // everything has at least one operand
2928 EVT NodeVT = N->getValueType(0); // The node's value type
2929 EVT Op0VT = Op0.getValueType(); // The first operand's result
2930 SDValue Result; // Initially, empty result
2931 DebugLoc dl = N->getDebugLoc();
2933 switch (N->getOpcode()) {
2936 SDValue Op1 = N->getOperand(1);
2938 if (Op0.getOpcode() == SPUISD::IndirectAddr
2939 || Op1.getOpcode() == SPUISD::IndirectAddr) {
2940 // Normalize the operands to reduce repeated code
2941 SDValue IndirectArg = Op0, AddArg = Op1;
2943 if (Op1.getOpcode() == SPUISD::IndirectAddr) {
2948 if (isa<ConstantSDNode>(AddArg)) {
2949 ConstantSDNode *CN0 = cast<ConstantSDNode > (AddArg);
2950 SDValue IndOp1 = IndirectArg.getOperand(1);
2952 if (CN0->isNullValue()) {
2953 // (add (SPUindirect <arg>, <arg>), 0) ->
2954 // (SPUindirect <arg>, <arg>)
2956 #if !defined(NDEBUG)
2957 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2959 << "Replace: (add (SPUindirect <arg>, <arg>), 0)\n"
2960 << "With: (SPUindirect <arg>, <arg>)\n";
2965 } else if (isa<ConstantSDNode>(IndOp1)) {
2966 // (add (SPUindirect <arg>, <const>), <const>) ->
2967 // (SPUindirect <arg>, <const + const>)
2968 ConstantSDNode *CN1 = cast<ConstantSDNode > (IndOp1);
2969 int64_t combinedConst = CN0->getSExtValue() + CN1->getSExtValue();
2970 SDValue combinedValue = DAG.getConstant(combinedConst, Op0VT);
2972 #if !defined(NDEBUG)
2973 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2975 << "Replace: (add (SPUindirect <arg>, " << CN1->getSExtValue()
2976 << "), " << CN0->getSExtValue() << ")\n"
2977 << "With: (SPUindirect <arg>, "
2978 << combinedConst << ")\n";
2982 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2983 IndirectArg, combinedValue);
2989 case ISD::SIGN_EXTEND:
2990 case ISD::ZERO_EXTEND:
2991 case ISD::ANY_EXTEND: {
2992 if (Op0.getOpcode() == SPUISD::VEC2PREFSLOT && NodeVT == Op0VT) {
2993 // (any_extend (SPUextract_elt0 <arg>)) ->
2994 // (SPUextract_elt0 <arg>)
2995 // Types must match, however...
2996 #if !defined(NDEBUG)
2997 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2998 errs() << "\nReplace: ";
3000 errs() << "\nWith: ";
3001 Op0.getNode()->dump(&DAG);
3010 case SPUISD::IndirectAddr: {
3011 if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) {
3012 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1));
3013 if (CN != 0 && CN->isNullValue()) {
3014 // (SPUindirect (SPUaform <addr>, 0), 0) ->
3015 // (SPUaform <addr>, 0)
3017 DEBUG(errs() << "Replace: ");
3018 DEBUG(N->dump(&DAG));
3019 DEBUG(errs() << "\nWith: ");
3020 DEBUG(Op0.getNode()->dump(&DAG));
3021 DEBUG(errs() << "\n");
3025 } else if (Op0.getOpcode() == ISD::ADD) {
3026 SDValue Op1 = N->getOperand(1);
3027 if (ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(Op1)) {
3028 // (SPUindirect (add <arg>, <arg>), 0) ->
3029 // (SPUindirect <arg>, <arg>)
3030 if (CN1->isNullValue()) {
3032 #if !defined(NDEBUG)
3033 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
3035 << "Replace: (SPUindirect (add <arg>, <arg>), 0)\n"
3036 << "With: (SPUindirect <arg>, <arg>)\n";
3040 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
3041 Op0.getOperand(0), Op0.getOperand(1));
3047 case SPUISD::SHL_BITS:
3048 case SPUISD::SHL_BYTES:
3049 case SPUISD::ROTBYTES_LEFT: {
3050 SDValue Op1 = N->getOperand(1);
3052 // Kill degenerate vector shifts:
3053 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
3054 if (CN->isNullValue()) {
3060 case SPUISD::PREFSLOT2VEC: {
3061 switch (Op0.getOpcode()) {
3064 case ISD::ANY_EXTEND:
3065 case ISD::ZERO_EXTEND:
3066 case ISD::SIGN_EXTEND: {
3067 // (SPUprefslot2vec (any|zero|sign_extend (SPUvec2prefslot <arg>))) ->
3069 // but only if the SPUprefslot2vec and <arg> types match.
3070 SDValue Op00 = Op0.getOperand(0);
3071 if (Op00.getOpcode() == SPUISD::VEC2PREFSLOT) {
3072 SDValue Op000 = Op00.getOperand(0);
3073 if (Op000.getValueType() == NodeVT) {
3079 case SPUISD::VEC2PREFSLOT: {
3080 // (SPUprefslot2vec (SPUvec2prefslot <arg>)) ->
3082 Result = Op0.getOperand(0);
3090 // Otherwise, return unchanged.
3092 if (Result.getNode()) {
3093 DEBUG(errs() << "\nReplace.SPU: ");
3094 DEBUG(N->dump(&DAG));
3095 DEBUG(errs() << "\nWith: ");
3096 DEBUG(Result.getNode()->dump(&DAG));
3097 DEBUG(errs() << "\n");
3104 //===----------------------------------------------------------------------===//
3105 // Inline Assembly Support
3106 //===----------------------------------------------------------------------===//
3108 /// getConstraintType - Given a constraint letter, return the type of
3109 /// constraint it is for this target.
3110 SPUTargetLowering::ConstraintType
3111 SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const {
3112 if (ConstraintLetter.size() == 1) {
3113 switch (ConstraintLetter[0]) {
3120 return C_RegisterClass;
3123 return TargetLowering::getConstraintType(ConstraintLetter);
3126 /// Examine constraint type and operand type and determine a weight value.
3127 /// This object must already have been set up with the operand type
3128 /// and the current alternative constraint selected.
3129 TargetLowering::ConstraintWeight
3130 SPUTargetLowering::getSingleConstraintMatchWeight(
3131 AsmOperandInfo &info, const char *constraint) const {
3132 ConstraintWeight weight = CW_Invalid;
3133 Value *CallOperandVal = info.CallOperandVal;
3134 // If we don't have a value, we can't do a match,
3135 // but allow it at the lowest weight.
3136 if (CallOperandVal == NULL)
3138 // Look at the constraint type.
3139 switch (*constraint) {
3141 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3143 //FIXME: Seems like the supported constraint letters were just copied
3144 // from PPC, as the following doesn't correspond to the GCC docs.
3145 // I'm leaving it so until someone adds the corresponding lowering support.
3152 weight = CW_Register;
3158 std::pair<unsigned, const TargetRegisterClass*>
3159 SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
3162 if (Constraint.size() == 1) {
3163 // GCC RS6000 Constraint Letters
3164 switch (Constraint[0]) {
3168 return std::make_pair(0U, SPU::R64CRegisterClass);
3169 return std::make_pair(0U, SPU::R32CRegisterClass);
3172 return std::make_pair(0U, SPU::R32FPRegisterClass);
3173 else if (VT == MVT::f64)
3174 return std::make_pair(0U, SPU::R64FPRegisterClass);
3177 return std::make_pair(0U, SPU::GPRCRegisterClass);
3181 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
3184 //! Compute used/known bits for a SPU operand
3186 SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
3190 const SelectionDAG &DAG,
3191 unsigned Depth ) const {
3193 const uint64_t uint64_sizebits = sizeof(uint64_t) * CHAR_BIT;
3195 switch (Op.getOpcode()) {
3197 // KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
3203 case SPUISD::PREFSLOT2VEC:
3204 case SPUISD::LDRESULT:
3205 case SPUISD::VEC2PREFSLOT:
3206 case SPUISD::SHLQUAD_L_BITS:
3207 case SPUISD::SHLQUAD_L_BYTES:
3208 case SPUISD::VEC_ROTL:
3209 case SPUISD::VEC_ROTR:
3210 case SPUISD::ROTBYTES_LEFT:
3211 case SPUISD::SELECT_MASK:
3218 SPUTargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
3219 unsigned Depth) const {
3220 switch (Op.getOpcode()) {
3225 EVT VT = Op.getValueType();
3227 if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32) {
3230 return VT.getSizeInBits();
3235 // LowerAsmOperandForConstraint
3237 SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3238 std::string &Constraint,
3239 std::vector<SDValue> &Ops,
3240 SelectionDAG &DAG) const {
3241 // Default, for the time being, to the base class handler
3242 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3245 /// isLegalAddressImmediate - Return true if the integer value can be used
3246 /// as the offset of the target addressing mode.
3247 bool SPUTargetLowering::isLegalAddressImmediate(int64_t V,
3249 // SPU's addresses are 256K:
3250 return (V > -(1 << 18) && V < (1 << 18) - 1);
3253 bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
3258 SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3259 // The SPU target isn't yet aware of offsets.
3263 // can we compare to Imm without writing it into a register?
3264 bool SPUTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
3265 //ceqi, cgti, etc. all take s10 operand
3266 return isInt<10>(Imm);
3270 SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
3273 // A-form: 18bit absolute address.
3274 if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && AM.BaseOffs == 0)
3277 // D-form: reg + 14bit offset
3278 if (AM.BaseGV ==0 && AM.HasBaseReg && AM.Scale == 0 && isInt<14>(AM.BaseOffs))
3282 if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 1 && AM.BaseOffs ==0)