1 //===-- R600ISelLowering.cpp - R600 DAG Lowering Implementation -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief Custom DAG lowering for R600
13 //===----------------------------------------------------------------------===//
15 #include "R600ISelLowering.h"
16 #include "R600Defines.h"
17 #include "R600InstrInfo.h"
18 #include "R600MachineFunctionInfo.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/IR/Argument.h"
24 #include "llvm/IR/Function.h"
28 R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
29 AMDGPUTargetLowering(TM) {
30 addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass);
31 addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass);
32 addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass);
33 addRegisterClass(MVT::i32, &AMDGPU::R600_Reg32RegClass);
34 computeRegisterProperties();
36 setOperationAction(ISD::FADD, MVT::v4f32, Expand);
37 setOperationAction(ISD::FMUL, MVT::v4f32, Expand);
38 setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
39 setOperationAction(ISD::FSUB, MVT::v4f32, Expand);
41 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Expand);
42 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Expand);
43 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Expand);
44 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Expand);
45 setOperationAction(ISD::SETCC, MVT::v4i32, Expand);
47 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
48 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
50 setOperationAction(ISD::FSUB, MVT::f32, Expand);
52 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
53 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
54 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i1, Custom);
56 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
57 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
59 setOperationAction(ISD::SETCC, MVT::i32, Expand);
60 setOperationAction(ISD::SETCC, MVT::f32, Expand);
61 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Custom);
63 setOperationAction(ISD::SELECT, MVT::i32, Custom);
64 setOperationAction(ISD::SELECT, MVT::f32, Custom);
66 setOperationAction(ISD::VSELECT, MVT::v4i32, Expand);
67 setOperationAction(ISD::VSELECT, MVT::v2i32, Expand);
69 // Legalize loads and stores to the private address space.
70 setOperationAction(ISD::LOAD, MVT::i32, Custom);
71 setOperationAction(ISD::LOAD, MVT::v2i32, Expand);
72 setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
73 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Custom);
74 setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom);
75 setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Custom);
76 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Custom);
77 setOperationAction(ISD::STORE, MVT::i8, Custom);
78 setOperationAction(ISD::STORE, MVT::i32, Custom);
79 setOperationAction(ISD::STORE, MVT::v2i32, Expand);
80 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
82 setOperationAction(ISD::LOAD, MVT::i32, Custom);
83 setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
84 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
86 setTargetDAGCombine(ISD::FP_ROUND);
87 setTargetDAGCombine(ISD::FP_TO_SINT);
88 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
89 setTargetDAGCombine(ISD::SELECT_CC);
91 setBooleanContents(ZeroOrNegativeOneBooleanContent);
92 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
93 setSchedulingPreference(Sched::VLIW);
96 MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
97 MachineInstr * MI, MachineBasicBlock * BB) const {
98 MachineFunction * MF = BB->getParent();
99 MachineRegisterInfo &MRI = MF->getRegInfo();
100 MachineBasicBlock::iterator I = *MI;
101 const R600InstrInfo *TII =
102 static_cast<const R600InstrInfo*>(MF->getTarget().getInstrInfo());
104 switch (MI->getOpcode()) {
105 default: return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
106 case AMDGPU::CLAMP_R600: {
107 MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
109 MI->getOperand(0).getReg(),
110 MI->getOperand(1).getReg());
111 TII->addFlag(NewMI, 0, MO_FLAG_CLAMP);
115 case AMDGPU::FABS_R600: {
116 MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
118 MI->getOperand(0).getReg(),
119 MI->getOperand(1).getReg());
120 TII->addFlag(NewMI, 0, MO_FLAG_ABS);
124 case AMDGPU::FNEG_R600: {
125 MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
127 MI->getOperand(0).getReg(),
128 MI->getOperand(1).getReg());
129 TII->addFlag(NewMI, 0, MO_FLAG_NEG);
133 case AMDGPU::MASK_WRITE: {
134 unsigned maskedRegister = MI->getOperand(0).getReg();
135 assert(TargetRegisterInfo::isVirtualRegister(maskedRegister));
136 MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
137 TII->addFlag(defInstr, 0, MO_FLAG_MASK);
141 case AMDGPU::MOV_IMM_F32:
142 TII->buildMovImm(*BB, I, MI->getOperand(0).getReg(),
143 MI->getOperand(1).getFPImm()->getValueAPF()
144 .bitcastToAPInt().getZExtValue());
146 case AMDGPU::MOV_IMM_I32:
147 TII->buildMovImm(*BB, I, MI->getOperand(0).getReg(),
148 MI->getOperand(1).getImm());
150 case AMDGPU::CONST_COPY: {
151 MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, MI, AMDGPU::MOV,
152 MI->getOperand(0).getReg(), AMDGPU::ALU_CONST);
153 TII->setImmOperand(NewMI, R600Operands::SRC0_SEL,
154 MI->getOperand(1).getImm());
158 case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
159 case AMDGPU::RAT_WRITE_CACHELESS_128_eg: {
160 unsigned EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0;
162 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
163 .addOperand(MI->getOperand(0))
164 .addOperand(MI->getOperand(1))
165 .addImm(EOP); // Set End of program bit
170 unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
171 unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
172 MachineOperand &RID = MI->getOperand(4);
173 MachineOperand &SID = MI->getOperand(5);
174 unsigned TextureId = MI->getOperand(6).getImm();
175 unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3;
176 unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1;
188 case 8: // ShadowRect
199 case 11: // Shadow1DArray
203 case 12: // Shadow2DArray
207 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
208 .addOperand(MI->getOperand(3))
226 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
227 .addOperand(MI->getOperand(2))
245 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_G))
246 .addOperand(MI->getOperand(0))
247 .addOperand(MI->getOperand(1))
265 .addReg(T0, RegState::Implicit)
266 .addReg(T1, RegState::Implicit);
270 case AMDGPU::TXD_SHADOW: {
271 unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
272 unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
273 MachineOperand &RID = MI->getOperand(4);
274 MachineOperand &SID = MI->getOperand(5);
275 unsigned TextureId = MI->getOperand(6).getImm();
276 unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3;
277 unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1;
289 case 8: // ShadowRect
300 case 11: // Shadow1DArray
304 case 12: // Shadow2DArray
309 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
310 .addOperand(MI->getOperand(3))
328 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
329 .addOperand(MI->getOperand(2))
347 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_C_G))
348 .addOperand(MI->getOperand(0))
349 .addOperand(MI->getOperand(1))
367 .addReg(T0, RegState::Implicit)
368 .addReg(T1, RegState::Implicit);
373 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
374 .addOperand(MI->getOperand(0));
377 case AMDGPU::BRANCH_COND_f32: {
378 MachineInstr *NewMI =
379 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
380 AMDGPU::PREDICATE_BIT)
381 .addOperand(MI->getOperand(1))
382 .addImm(OPCODE_IS_NOT_ZERO)
384 TII->addFlag(NewMI, 0, MO_FLAG_PUSH);
385 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND))
386 .addOperand(MI->getOperand(0))
387 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
391 case AMDGPU::BRANCH_COND_i32: {
392 MachineInstr *NewMI =
393 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
394 AMDGPU::PREDICATE_BIT)
395 .addOperand(MI->getOperand(1))
396 .addImm(OPCODE_IS_NOT_ZERO_INT)
398 TII->addFlag(NewMI, 0, MO_FLAG_PUSH);
399 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND))
400 .addOperand(MI->getOperand(0))
401 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
405 case AMDGPU::EG_ExportSwz:
406 case AMDGPU::R600_ExportSwz: {
407 // Instruction is left unmodified if its not the last one of its type
408 bool isLastInstructionOfItsType = true;
409 unsigned InstExportType = MI->getOperand(1).getImm();
410 for (MachineBasicBlock::iterator NextExportInst = llvm::next(I),
411 EndBlock = BB->end(); NextExportInst != EndBlock;
412 NextExportInst = llvm::next(NextExportInst)) {
413 if (NextExportInst->getOpcode() == AMDGPU::EG_ExportSwz ||
414 NextExportInst->getOpcode() == AMDGPU::R600_ExportSwz) {
415 unsigned CurrentInstExportType = NextExportInst->getOperand(1)
417 if (CurrentInstExportType == InstExportType) {
418 isLastInstructionOfItsType = false;
423 bool EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN)? 1 : 0;
424 if (!EOP && !isLastInstructionOfItsType)
426 unsigned CfInst = (MI->getOpcode() == AMDGPU::EG_ExportSwz)? 84 : 40;
427 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
428 .addOperand(MI->getOperand(0))
429 .addOperand(MI->getOperand(1))
430 .addOperand(MI->getOperand(2))
431 .addOperand(MI->getOperand(3))
432 .addOperand(MI->getOperand(4))
433 .addOperand(MI->getOperand(5))
434 .addOperand(MI->getOperand(6))
439 case AMDGPU::RETURN: {
440 // RETURN instructions must have the live-out registers as implicit uses,
441 // otherwise they appear dead.
442 R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
443 MachineInstrBuilder MIB(*MF, MI);
444 for (unsigned i = 0, e = MFI->LiveOuts.size(); i != e; ++i)
445 MIB.addReg(MFI->LiveOuts[i], RegState::Implicit);
450 MI->eraseFromParent();
454 //===----------------------------------------------------------------------===//
455 // Custom DAG Lowering Operations
456 //===----------------------------------------------------------------------===//
458 SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
459 switch (Op.getOpcode()) {
460 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
461 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
462 case ISD::SELECT: return LowerSELECT(Op, DAG);
463 case ISD::STORE: return LowerSTORE(Op, DAG);
464 case ISD::LOAD: return LowerLOAD(Op, DAG);
465 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
466 case ISD::INTRINSIC_VOID: {
467 SDValue Chain = Op.getOperand(0);
468 unsigned IntrinsicID =
469 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
470 switch (IntrinsicID) {
471 case AMDGPUIntrinsic::AMDGPU_store_output: {
472 MachineFunction &MF = DAG.getMachineFunction();
473 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
474 int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
475 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex);
476 MFI->LiveOuts.push_back(Reg);
477 return DAG.getCopyToReg(Chain, SDLoc(Op), Reg, Op.getOperand(2));
479 case AMDGPUIntrinsic::R600_store_swizzle: {
480 const SDValue Args[8] = {
482 Op.getOperand(2), // Export Value
483 Op.getOperand(3), // ArrayBase
484 Op.getOperand(4), // Type
485 DAG.getConstant(0, MVT::i32), // SWZ_X
486 DAG.getConstant(1, MVT::i32), // SWZ_Y
487 DAG.getConstant(2, MVT::i32), // SWZ_Z
488 DAG.getConstant(3, MVT::i32) // SWZ_W
490 return DAG.getNode(AMDGPUISD::EXPORT, SDLoc(Op), Op.getValueType(),
494 // default for switch(IntrinsicID)
497 // break out of case ISD::INTRINSIC_VOID in switch(Op.getOpcode())
500 case ISD::INTRINSIC_WO_CHAIN: {
501 unsigned IntrinsicID =
502 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
503 EVT VT = Op.getValueType();
505 switch(IntrinsicID) {
506 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
507 case AMDGPUIntrinsic::R600_load_input: {
508 int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
509 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex);
510 MachineFunction &MF = DAG.getMachineFunction();
511 MachineRegisterInfo &MRI = MF.getRegInfo();
513 return DAG.getCopyFromReg(DAG.getEntryNode(),
514 SDLoc(DAG.getEntryNode()), Reg, VT);
517 case AMDGPUIntrinsic::R600_interp_input: {
518 int slot = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
519 int ijb = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
520 MachineSDNode *interp;
522 const MachineFunction &MF = DAG.getMachineFunction();
523 const R600InstrInfo *TII =
524 static_cast<const R600InstrInfo*>(MF.getTarget().getInstrInfo());
525 interp = DAG.getMachineNode(AMDGPU::INTERP_VEC_LOAD, DL,
526 MVT::v4f32, DAG.getTargetConstant(slot / 4 , MVT::i32));
527 return DAG.getTargetExtractSubreg(
528 TII->getRegisterInfo().getSubRegFromChannel(slot % 4),
529 DL, MVT::f32, SDValue(interp, 0));
532 MachineFunction &MF = DAG.getMachineFunction();
533 MachineRegisterInfo &MRI = MF.getRegInfo();
534 unsigned RegisterI = AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb);
535 unsigned RegisterJ = AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1);
536 MRI.addLiveIn(RegisterI);
537 MRI.addLiveIn(RegisterJ);
538 SDValue RegisterINode = DAG.getCopyFromReg(DAG.getEntryNode(),
539 SDLoc(DAG.getEntryNode()), RegisterI, MVT::f32);
540 SDValue RegisterJNode = DAG.getCopyFromReg(DAG.getEntryNode(),
541 SDLoc(DAG.getEntryNode()), RegisterJ, MVT::f32);
544 interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL,
545 MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32),
546 RegisterJNode, RegisterINode);
548 interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL,
549 MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32),
550 RegisterJNode, RegisterINode);
551 return SDValue(interp, slot % 2);
553 case AMDGPUIntrinsic::R600_tex:
554 case AMDGPUIntrinsic::R600_texc:
555 case AMDGPUIntrinsic::R600_txl:
556 case AMDGPUIntrinsic::R600_txlc:
557 case AMDGPUIntrinsic::R600_txb:
558 case AMDGPUIntrinsic::R600_txbc:
559 case AMDGPUIntrinsic::R600_txf:
560 case AMDGPUIntrinsic::R600_txq:
561 case AMDGPUIntrinsic::R600_ddx:
562 case AMDGPUIntrinsic::R600_ddy: {
564 switch (IntrinsicID) {
565 case AMDGPUIntrinsic::R600_tex:
568 case AMDGPUIntrinsic::R600_texc:
571 case AMDGPUIntrinsic::R600_txl:
574 case AMDGPUIntrinsic::R600_txlc:
577 case AMDGPUIntrinsic::R600_txb:
580 case AMDGPUIntrinsic::R600_txbc:
583 case AMDGPUIntrinsic::R600_txf:
586 case AMDGPUIntrinsic::R600_txq:
589 case AMDGPUIntrinsic::R600_ddx:
592 case AMDGPUIntrinsic::R600_ddy:
596 llvm_unreachable("Unknow Texture Operation");
599 SDValue TexArgs[19] = {
600 DAG.getConstant(TextureOp, MVT::i32),
602 DAG.getConstant(0, MVT::i32),
603 DAG.getConstant(1, MVT::i32),
604 DAG.getConstant(2, MVT::i32),
605 DAG.getConstant(3, MVT::i32),
609 DAG.getConstant(0, MVT::i32),
610 DAG.getConstant(1, MVT::i32),
611 DAG.getConstant(2, MVT::i32),
612 DAG.getConstant(3, MVT::i32),
620 return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, MVT::v4f32, TexArgs, 19);
622 case AMDGPUIntrinsic::AMDGPU_dp4: {
624 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
625 DAG.getConstant(0, MVT::i32)),
626 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
627 DAG.getConstant(0, MVT::i32)),
628 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
629 DAG.getConstant(1, MVT::i32)),
630 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
631 DAG.getConstant(1, MVT::i32)),
632 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
633 DAG.getConstant(2, MVT::i32)),
634 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
635 DAG.getConstant(2, MVT::i32)),
636 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
637 DAG.getConstant(3, MVT::i32)),
638 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
639 DAG.getConstant(3, MVT::i32))
641 return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args, 8);
644 case Intrinsic::r600_read_ngroups_x:
645 return LowerImplicitParameter(DAG, VT, DL, 0);
646 case Intrinsic::r600_read_ngroups_y:
647 return LowerImplicitParameter(DAG, VT, DL, 1);
648 case Intrinsic::r600_read_ngroups_z:
649 return LowerImplicitParameter(DAG, VT, DL, 2);
650 case Intrinsic::r600_read_global_size_x:
651 return LowerImplicitParameter(DAG, VT, DL, 3);
652 case Intrinsic::r600_read_global_size_y:
653 return LowerImplicitParameter(DAG, VT, DL, 4);
654 case Intrinsic::r600_read_global_size_z:
655 return LowerImplicitParameter(DAG, VT, DL, 5);
656 case Intrinsic::r600_read_local_size_x:
657 return LowerImplicitParameter(DAG, VT, DL, 6);
658 case Intrinsic::r600_read_local_size_y:
659 return LowerImplicitParameter(DAG, VT, DL, 7);
660 case Intrinsic::r600_read_local_size_z:
661 return LowerImplicitParameter(DAG, VT, DL, 8);
663 case Intrinsic::r600_read_tgid_x:
664 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
666 case Intrinsic::r600_read_tgid_y:
667 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
669 case Intrinsic::r600_read_tgid_z:
670 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
672 case Intrinsic::r600_read_tidig_x:
673 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
675 case Intrinsic::r600_read_tidig_y:
676 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
678 case Intrinsic::r600_read_tidig_z:
679 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
682 // break out of case ISD::INTRINSIC_WO_CHAIN in switch(Op.getOpcode())
685 } // end switch(Op.getOpcode())
689 void R600TargetLowering::ReplaceNodeResults(SDNode *N,
690 SmallVectorImpl<SDValue> &Results,
691 SelectionDAG &DAG) const {
692 switch (N->getOpcode()) {
694 case ISD::FP_TO_UINT: Results.push_back(LowerFPTOUINT(N->getOperand(0), DAG));
697 SDNode *Node = LowerLOAD(SDValue(N, 0), DAG).getNode();
698 Results.push_back(SDValue(Node, 0));
699 Results.push_back(SDValue(Node, 1));
700 // XXX: LLVM seems not to replace Chain Value inside CustomWidenLowerNode
702 DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), SDValue(Node, 1));
706 SDNode *Node = LowerSTORE(SDValue(N, 0), DAG).getNode();
707 Results.push_back(SDValue(Node, 0));
712 SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const {
717 Op, DAG.getConstantFP(0.0f, MVT::f32),
718 DAG.getCondCode(ISD::SETNE)
722 SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
724 unsigned DwordOffset) const {
725 unsigned ByteOffset = DwordOffset * 4;
726 PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
727 AMDGPUAS::PARAM_I_ADDRESS);
729 // We shouldn't be using an offset wider than 16-bits for implicit parameters.
730 assert(isInt<16>(ByteOffset));
732 return DAG.getLoad(VT, DL, DAG.getEntryNode(),
733 DAG.getConstant(ByteOffset, MVT::i32), // PTR
734 MachinePointerInfo(ConstantPointerNull::get(PtrType)),
735 false, false, false, 0);
738 SDValue R600TargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const {
740 MachineFunction &MF = DAG.getMachineFunction();
741 const AMDGPUFrameLowering *TFL =
742 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
744 FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
747 unsigned FrameIndex = FIN->getIndex();
748 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
749 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), MVT::i32);
752 bool R600TargetLowering::isZero(SDValue Op) const {
753 if(ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
754 return Cst->isNullValue();
755 } else if(ConstantFPSDNode *CstFP = dyn_cast<ConstantFPSDNode>(Op)){
756 return CstFP->isZero();
762 SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
764 EVT VT = Op.getValueType();
766 SDValue LHS = Op.getOperand(0);
767 SDValue RHS = Op.getOperand(1);
768 SDValue True = Op.getOperand(2);
769 SDValue False = Op.getOperand(3);
770 SDValue CC = Op.getOperand(4);
773 // LHS and RHS are guaranteed to be the same value type
774 EVT CompareVT = LHS.getValueType();
776 // Check if we can lower this to a native operation.
778 // Try to lower to a SET* instruction:
780 // SET* can match the following patterns:
782 // select_cc f32, f32, -1, 0, cc_any
783 // select_cc f32, f32, 1.0f, 0.0f, cc_any
784 // select_cc i32, i32, -1, 0, cc_any
787 // Move hardware True/False values to the correct operand.
788 if (isHWTrueValue(False) && isHWFalseValue(True)) {
789 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
790 std::swap(False, True);
791 CC = DAG.getCondCode(ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32));
794 if (isHWTrueValue(True) && isHWFalseValue(False) &&
795 (CompareVT == VT || VT == MVT::i32)) {
796 // This can be matched by a SET* instruction.
797 return DAG.getNode(ISD::SELECT_CC, DL, VT, LHS, RHS, True, False, CC);
800 // Try to lower to a CND* instruction:
802 // CND* can match the following patterns:
804 // select_cc f32, 0.0, f32, f32, cc_any
805 // select_cc f32, 0.0, i32, i32, cc_any
806 // select_cc i32, 0, f32, f32, cc_any
807 // select_cc i32, 0, i32, i32, cc_any
809 if (isZero(LHS) || isZero(RHS)) {
810 SDValue Cond = (isZero(LHS) ? RHS : LHS);
811 SDValue Zero = (isZero(LHS) ? LHS : RHS);
812 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
813 if (CompareVT != VT) {
814 // Bitcast True / False to the correct types. This will end up being
815 // a nop, but it allows us to define only a single pattern in the
816 // .TD files for each CND* instruction rather than having to have
817 // one pattern for integer True/False and one for fp True/False
818 True = DAG.getNode(ISD::BITCAST, DL, CompareVT, True);
819 False = DAG.getNode(ISD::BITCAST, DL, CompareVT, False);
822 CCOpcode = ISD::getSetCCSwappedOperands(CCOpcode);
835 CCOpcode = ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32);
843 SDValue SelectNode = DAG.getNode(ISD::SELECT_CC, DL, CompareVT,
846 DAG.getCondCode(CCOpcode));
847 return DAG.getNode(ISD::BITCAST, DL, VT, SelectNode);
851 // Possible Min/Max pattern
852 SDValue MinMax = LowerMinMax(Op, DAG);
853 if (MinMax.getNode()) {
857 // If we make it this for it means we have no native instructions to handle
858 // this SELECT_CC, so we must lower it.
859 SDValue HWTrue, HWFalse;
861 if (CompareVT == MVT::f32) {
862 HWTrue = DAG.getConstantFP(1.0f, CompareVT);
863 HWFalse = DAG.getConstantFP(0.0f, CompareVT);
864 } else if (CompareVT == MVT::i32) {
865 HWTrue = DAG.getConstant(-1, CompareVT);
866 HWFalse = DAG.getConstant(0, CompareVT);
869 assert(!"Unhandled value type in LowerSELECT_CC");
872 // Lower this unsupported SELECT_CC into a combination of two supported
873 // SELECT_CC operations.
874 SDValue Cond = DAG.getNode(ISD::SELECT_CC, DL, CompareVT, LHS, RHS, HWTrue, HWFalse, CC);
876 return DAG.getNode(ISD::SELECT_CC, DL, VT,
879 DAG.getCondCode(ISD::SETNE));
882 SDValue R600TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
883 return DAG.getNode(ISD::SELECT_CC,
887 DAG.getConstant(0, MVT::i32),
890 DAG.getCondCode(ISD::SETNE));
893 /// LLVM generates byte-addresed pointers. For indirect addressing, we need to
894 /// convert these pointers to a register index. Each register holds
895 /// 16 bytes, (4 x 32bit sub-register), but we need to take into account the
896 /// \p StackWidth, which tells us how many of the 4 sub-registrers will be used
897 /// for indirect addressing.
898 SDValue R600TargetLowering::stackPtrToRegIndex(SDValue Ptr,
900 SelectionDAG &DAG) const {
912 default: llvm_unreachable("Invalid stack width");
915 return DAG.getNode(ISD::SRL, SDLoc(Ptr), Ptr.getValueType(), Ptr,
916 DAG.getConstant(SRLPad, MVT::i32));
919 void R600TargetLowering::getStackAddress(unsigned StackWidth,
922 unsigned &PtrIncr) const {
923 switch (StackWidth) {
934 Channel = ElemIdx % 2;
948 SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
950 StoreSDNode *StoreNode = cast<StoreSDNode>(Op);
951 SDValue Chain = Op.getOperand(0);
952 SDValue Value = Op.getOperand(1);
953 SDValue Ptr = Op.getOperand(2);
955 if (StoreNode->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
956 Ptr->getOpcode() != AMDGPUISD::DWORDADDR) {
957 // Convert pointer from byte address to dword address.
958 Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, Ptr.getValueType(),
959 DAG.getNode(ISD::SRL, DL, Ptr.getValueType(),
960 Ptr, DAG.getConstant(2, MVT::i32)));
962 if (StoreNode->isTruncatingStore() || StoreNode->isIndexed()) {
963 assert(!"Truncated and indexed stores not supported yet");
965 Chain = DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand());
970 EVT ValueVT = Value.getValueType();
972 if (StoreNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) {
976 // Lowering for indirect addressing
978 const MachineFunction &MF = DAG.getMachineFunction();
979 const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering*>(
980 getTargetMachine().getFrameLowering());
981 unsigned StackWidth = TFL->getStackWidth(MF);
983 Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG);
985 if (ValueVT.isVector()) {
986 unsigned NumElemVT = ValueVT.getVectorNumElements();
987 EVT ElemVT = ValueVT.getVectorElementType();
990 assert(NumElemVT >= StackWidth && "Stack width cannot be greater than "
991 "vector width in load");
993 for (unsigned i = 0; i < NumElemVT; ++i) {
994 unsigned Channel, PtrIncr;
995 getStackAddress(StackWidth, i, Channel, PtrIncr);
996 Ptr = DAG.getNode(ISD::ADD, DL, MVT::i32, Ptr,
997 DAG.getConstant(PtrIncr, MVT::i32));
998 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT,
999 Value, DAG.getConstant(i, MVT::i32));
1001 Stores[i] = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
1003 DAG.getTargetConstant(Channel, MVT::i32));
1005 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores, NumElemVT);
1007 if (ValueVT == MVT::i8) {
1008 Value = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Value);
1010 Chain = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other, Chain, Value, Ptr,
1011 DAG.getTargetConstant(0, MVT::i32)); // Channel
1017 // return (512 + (kc_bank << 12)
1019 ConstantAddressBlock(unsigned AddressSpace) {
1020 switch (AddressSpace) {
1021 case AMDGPUAS::CONSTANT_BUFFER_0:
1023 case AMDGPUAS::CONSTANT_BUFFER_1:
1025 case AMDGPUAS::CONSTANT_BUFFER_2:
1026 return 512 + 4096 * 2;
1027 case AMDGPUAS::CONSTANT_BUFFER_3:
1028 return 512 + 4096 * 3;
1029 case AMDGPUAS::CONSTANT_BUFFER_4:
1030 return 512 + 4096 * 4;
1031 case AMDGPUAS::CONSTANT_BUFFER_5:
1032 return 512 + 4096 * 5;
1033 case AMDGPUAS::CONSTANT_BUFFER_6:
1034 return 512 + 4096 * 6;
1035 case AMDGPUAS::CONSTANT_BUFFER_7:
1036 return 512 + 4096 * 7;
1037 case AMDGPUAS::CONSTANT_BUFFER_8:
1038 return 512 + 4096 * 8;
1039 case AMDGPUAS::CONSTANT_BUFFER_9:
1040 return 512 + 4096 * 9;
1041 case AMDGPUAS::CONSTANT_BUFFER_10:
1042 return 512 + 4096 * 10;
1043 case AMDGPUAS::CONSTANT_BUFFER_11:
1044 return 512 + 4096 * 11;
1045 case AMDGPUAS::CONSTANT_BUFFER_12:
1046 return 512 + 4096 * 12;
1047 case AMDGPUAS::CONSTANT_BUFFER_13:
1048 return 512 + 4096 * 13;
1049 case AMDGPUAS::CONSTANT_BUFFER_14:
1050 return 512 + 4096 * 14;
1051 case AMDGPUAS::CONSTANT_BUFFER_15:
1052 return 512 + 4096 * 15;
1058 SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
1060 EVT VT = Op.getValueType();
1062 LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
1063 SDValue Chain = Op.getOperand(0);
1064 SDValue Ptr = Op.getOperand(1);
1065 SDValue LoweredLoad;
1067 int ConstantBlock = ConstantAddressBlock(LoadNode->getAddressSpace());
1068 if (ConstantBlock > -1) {
1070 if (dyn_cast<ConstantExpr>(LoadNode->getSrcValue()) ||
1071 dyn_cast<Constant>(LoadNode->getSrcValue()) ||
1072 dyn_cast<ConstantSDNode>(Ptr)) {
1074 for (unsigned i = 0; i < 4; i++) {
1075 // We want Const position encoded with the following formula :
1076 // (((512 + (kc_bank << 12) + const_index) << 2) + chan)
1077 // const_index is Ptr computed by llvm using an alignment of 16.
1078 // Thus we add (((512 + (kc_bank << 12)) + chan ) * 4 here and
1079 // then div by 4 at the ISel step
1080 SDValue NewPtr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
1081 DAG.getConstant(4 * i + ConstantBlock * 16, MVT::i32));
1082 Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr);
1084 Result = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Slots, 4);
1086 // non constant ptr cant be folded, keeps it as a v4f32 load
1087 Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32,
1088 DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, DAG.getConstant(4, MVT::i32)),
1089 DAG.getConstant(LoadNode->getAddressSpace() -
1090 AMDGPUAS::CONSTANT_BUFFER_0, MVT::i32)
1094 if (!VT.isVector()) {
1095 Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Result,
1096 DAG.getConstant(0, MVT::i32));
1099 SDValue MergedValues[2] = {
1103 return DAG.getMergeValues(MergedValues, 2, DL);
1106 if (LoadNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) {
1110 // Lowering for indirect addressing
1111 const MachineFunction &MF = DAG.getMachineFunction();
1112 const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering*>(
1113 getTargetMachine().getFrameLowering());
1114 unsigned StackWidth = TFL->getStackWidth(MF);
1116 Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG);
1118 if (VT.isVector()) {
1119 unsigned NumElemVT = VT.getVectorNumElements();
1120 EVT ElemVT = VT.getVectorElementType();
1123 assert(NumElemVT >= StackWidth && "Stack width cannot be greater than "
1124 "vector width in load");
1126 for (unsigned i = 0; i < NumElemVT; ++i) {
1127 unsigned Channel, PtrIncr;
1128 getStackAddress(StackWidth, i, Channel, PtrIncr);
1129 Ptr = DAG.getNode(ISD::ADD, DL, MVT::i32, Ptr,
1130 DAG.getConstant(PtrIncr, MVT::i32));
1131 Loads[i] = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, ElemVT,
1133 DAG.getTargetConstant(Channel, MVT::i32),
1136 for (unsigned i = NumElemVT; i < 4; ++i) {
1137 Loads[i] = DAG.getUNDEF(ElemVT);
1139 EVT TargetVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, 4);
1140 LoweredLoad = DAG.getNode(ISD::BUILD_VECTOR, DL, TargetVT, Loads, 4);
1142 LoweredLoad = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, VT,
1144 DAG.getTargetConstant(0, MVT::i32), // Channel
1149 Ops[0] = LoweredLoad;
1152 return DAG.getMergeValues(Ops, 2, DL);
1155 /// XXX Only kernel functions are supported, so we can assume for now that
1156 /// every function is a kernel function, but in the future we should use
1157 /// separate calling conventions for kernel and non-kernel functions.
1158 SDValue R600TargetLowering::LowerFormalArguments(
1160 CallingConv::ID CallConv,
1162 const SmallVectorImpl<ISD::InputArg> &Ins,
1163 SDLoc DL, SelectionDAG &DAG,
1164 SmallVectorImpl<SDValue> &InVals) const {
1165 unsigned ParamOffsetBytes = 36;
1166 Function::const_arg_iterator FuncArg =
1167 DAG.getMachineFunction().getFunction()->arg_begin();
1168 for (unsigned i = 0, e = Ins.size(); i < e; ++i, ++FuncArg) {
1170 Type *ArgType = FuncArg->getType();
1171 unsigned ArgSizeInBits = ArgType->isPointerTy() ?
1172 32 : ArgType->getPrimitiveSizeInBits();
1173 unsigned ArgBytes = ArgSizeInBits >> 3;
1175 if (ArgSizeInBits < VT.getSizeInBits()) {
1176 assert(!ArgType->isFloatTy() &&
1177 "Extending floating point arguments not supported yet");
1178 ArgVT = MVT::getIntegerVT(ArgSizeInBits);
1182 PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
1183 AMDGPUAS::PARAM_I_ADDRESS);
1184 SDValue Arg = DAG.getExtLoad(ISD::ZEXTLOAD, DL, VT, DAG.getRoot(),
1185 DAG.getConstant(ParamOffsetBytes, MVT::i32),
1186 MachinePointerInfo(UndefValue::get(PtrTy)),
1187 ArgVT, false, false, ArgBytes);
1188 InVals.push_back(Arg);
1189 ParamOffsetBytes += ArgBytes;
1194 EVT R600TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1195 if (!VT.isVector()) return MVT::i32;
1196 return VT.changeVectorElementTypeToInteger();
1200 CompactSwizzlableVector(SelectionDAG &DAG, SDValue VectorEntry,
1201 DenseMap<unsigned, unsigned> &RemapSwizzle) {
1202 assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR);
1203 assert(RemapSwizzle.empty());
1204 SDValue NewBldVec[4] = {
1205 VectorEntry.getOperand(0),
1206 VectorEntry.getOperand(1),
1207 VectorEntry.getOperand(2),
1208 VectorEntry.getOperand(3)
1211 for (unsigned i = 0; i < 4; i++) {
1212 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(NewBldVec[i])) {
1214 RemapSwizzle[i] = 4; // SEL_0
1215 NewBldVec[i] = DAG.getUNDEF(MVT::f32);
1216 } else if (C->isExactlyValue(1.0)) {
1217 RemapSwizzle[i] = 5; // SEL_1
1218 NewBldVec[i] = DAG.getUNDEF(MVT::f32);
1222 if (NewBldVec[i].getOpcode() == ISD::UNDEF)
1224 for (unsigned j = 0; j < i; j++) {
1225 if (NewBldVec[i] == NewBldVec[j]) {
1226 NewBldVec[i] = DAG.getUNDEF(NewBldVec[i].getValueType());
1227 RemapSwizzle[i] = j;
1233 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
1234 VectorEntry.getValueType(), NewBldVec, 4);
1237 static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
1238 DenseMap<unsigned, unsigned> &RemapSwizzle) {
1239 assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR);
1240 assert(RemapSwizzle.empty());
1241 SDValue NewBldVec[4] = {
1242 VectorEntry.getOperand(0),
1243 VectorEntry.getOperand(1),
1244 VectorEntry.getOperand(2),
1245 VectorEntry.getOperand(3)
1247 bool isUnmovable[4] = { false, false, false, false };
1249 for (unsigned i = 0; i < 4; i++) {
1250 if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
1251 unsigned Idx = dyn_cast<ConstantSDNode>(NewBldVec[i].getOperand(1))
1253 if (!isUnmovable[Idx]) {
1255 std::swap(NewBldVec[Idx], NewBldVec[i]);
1256 RemapSwizzle[Idx] = i;
1257 RemapSwizzle[i] = Idx;
1259 isUnmovable[Idx] = true;
1263 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
1264 VectorEntry.getValueType(), NewBldVec, 4);
1268 SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector,
1269 SDValue Swz[4], SelectionDAG &DAG) const {
1270 assert(BuildVector.getOpcode() == ISD::BUILD_VECTOR);
1271 // Old -> New swizzle values
1272 DenseMap<unsigned, unsigned> SwizzleRemap;
1274 BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap);
1275 for (unsigned i = 0; i < 4; i++) {
1276 unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue();
1277 if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
1278 Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
1281 SwizzleRemap.clear();
1282 BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap);
1283 for (unsigned i = 0; i < 4; i++) {
1284 unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue();
1285 if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
1286 Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
1293 //===----------------------------------------------------------------------===//
1294 // Custom DAG Optimizations
1295 //===----------------------------------------------------------------------===//
1297 SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
1298 DAGCombinerInfo &DCI) const {
1299 SelectionDAG &DAG = DCI.DAG;
1301 switch (N->getOpcode()) {
1302 // (f32 fp_round (f64 uint_to_fp a)) -> (f32 uint_to_fp a)
1303 case ISD::FP_ROUND: {
1304 SDValue Arg = N->getOperand(0);
1305 if (Arg.getOpcode() == ISD::UINT_TO_FP && Arg.getValueType() == MVT::f64) {
1306 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), N->getValueType(0),
1312 // (i32 fp_to_sint (fneg (select_cc f32, f32, 1.0, 0.0 cc))) ->
1313 // (i32 select_cc f32, f32, -1, 0 cc)
1315 // Mesa's GLSL frontend generates the above pattern a lot and we can lower
1316 // this to one of the SET*_DX10 instructions.
1317 case ISD::FP_TO_SINT: {
1318 SDValue FNeg = N->getOperand(0);
1319 if (FNeg.getOpcode() != ISD::FNEG) {
1322 SDValue SelectCC = FNeg.getOperand(0);
1323 if (SelectCC.getOpcode() != ISD::SELECT_CC ||
1324 SelectCC.getOperand(0).getValueType() != MVT::f32 || // LHS
1325 SelectCC.getOperand(2).getValueType() != MVT::f32 || // True
1326 !isHWTrueValue(SelectCC.getOperand(2)) ||
1327 !isHWFalseValue(SelectCC.getOperand(3))) {
1331 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N->getValueType(0),
1332 SelectCC.getOperand(0), // LHS
1333 SelectCC.getOperand(1), // RHS
1334 DAG.getConstant(-1, MVT::i32), // True
1335 DAG.getConstant(0, MVT::i32), // Flase
1336 SelectCC.getOperand(4)); // CC
1340 // Extract_vec (Build_vector) generated by custom lowering
1341 // also needs to be customly combined
1342 case ISD::EXTRACT_VECTOR_ELT: {
1343 SDValue Arg = N->getOperand(0);
1344 if (Arg.getOpcode() == ISD::BUILD_VECTOR) {
1345 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
1346 unsigned Element = Const->getZExtValue();
1347 return Arg->getOperand(Element);
1350 if (Arg.getOpcode() == ISD::BITCAST &&
1351 Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
1352 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
1353 unsigned Element = Const->getZExtValue();
1354 return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getVTList(),
1355 Arg->getOperand(0).getOperand(Element));
1360 case ISD::SELECT_CC: {
1361 // fold selectcc (selectcc x, y, a, b, cc), b, a, b, seteq ->
1362 // selectcc x, y, a, b, inv(cc)
1364 // fold selectcc (selectcc x, y, a, b, cc), b, a, b, setne ->
1365 // selectcc x, y, a, b, cc
1366 SDValue LHS = N->getOperand(0);
1367 if (LHS.getOpcode() != ISD::SELECT_CC) {
1371 SDValue RHS = N->getOperand(1);
1372 SDValue True = N->getOperand(2);
1373 SDValue False = N->getOperand(3);
1374 ISD::CondCode NCC = cast<CondCodeSDNode>(N->getOperand(4))->get();
1376 if (LHS.getOperand(2).getNode() != True.getNode() ||
1377 LHS.getOperand(3).getNode() != False.getNode() ||
1378 RHS.getNode() != False.getNode()) {
1383 default: return SDValue();
1384 case ISD::SETNE: return LHS;
1386 ISD::CondCode LHSCC = cast<CondCodeSDNode>(LHS.getOperand(4))->get();
1387 LHSCC = ISD::getSetCCInverse(LHSCC,
1388 LHS.getOperand(0).getValueType().isInteger());
1389 return DAG.getSelectCC(SDLoc(N),
1398 case AMDGPUISD::EXPORT: {
1399 SDValue Arg = N->getOperand(1);
1400 if (Arg.getOpcode() != ISD::BUILD_VECTOR)
1403 SDValue NewArgs[8] = {
1404 N->getOperand(0), // Chain
1406 N->getOperand(2), // ArrayBase
1407 N->getOperand(3), // Type
1408 N->getOperand(4), // SWZ_X
1409 N->getOperand(5), // SWZ_Y
1410 N->getOperand(6), // SWZ_Z
1411 N->getOperand(7) // SWZ_W
1414 NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[4], DAG);
1415 return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs, 8);
1417 case AMDGPUISD::TEXTURE_FETCH: {
1418 SDValue Arg = N->getOperand(1);
1419 if (Arg.getOpcode() != ISD::BUILD_VECTOR)
1422 SDValue NewArgs[19] = {
1443 NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG);
1444 return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, SDLoc(N), N->getVTList(),