#include "SIISelLowering.h"
#include "AMDGPU.h"
-#include "AMDIL.h"
#include "AMDILIntrinsicInfo.h"
#include "SIInstrInfo.h"
#include "SIMachineFunctionInfo.h"
using namespace llvm;
SITargetLowering::SITargetLowering(TargetMachine &TM) :
- AMDGPUTargetLowering(TM),
- TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo())),
- TRI(TM.getRegisterInfo()) {
+ AMDGPUTargetLowering(TM) {
addRegisterClass(MVT::i1, &AMDGPU::SReg_64RegClass);
addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+ setOperationAction(ISD::SIGN_EXTEND, MVT::i64, Custom);
+
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+
+ setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
+
setTargetDAGCombine(ISD::SELECT_CC);
setTargetDAGCombine(ISD::SETCC);
setSchedulingPreference(Sched::RegPressure);
}
+//===----------------------------------------------------------------------===//
+// TargetLowering queries
+//===----------------------------------------------------------------------===//
+
+bool SITargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
+ bool *IsFast) const {
+ // XXX: This depends on the address space and also we may want to revist
+ // the alignment values we specify in the DataLayout.
+ return VT.bitsGT(MVT::i32);
+}
+
+
+SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT,
+ SDLoc DL, SDValue Chain,
+ unsigned Offset) const {
+ MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
+ PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
+ AMDGPUAS::CONSTANT_ADDRESS);
+ EVT ArgVT = MVT::getIntegerVT(VT.getSizeInBits());
+ SDValue BasePtr = DAG.getCopyFromReg(Chain, DL,
+ MRI.getLiveInVirtReg(AMDGPU::SGPR0_SGPR1), MVT::i64);
+ SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
+ DAG.getConstant(Offset, MVT::i64));
+ return DAG.getExtLoad(ISD::ZEXTLOAD, DL, VT, Chain, Ptr,
+ MachinePointerInfo(UndefValue::get(PtrTy)),
+ VT, false, false, ArgVT.getSizeInBits() >> 3);
+
+}
+
SDValue SITargetLowering::LowerFormalArguments(
SDValue Chain,
CallingConv::ID CallConv,
}
// Second split vertices into their elements
- if (Arg.VT.isVector()) {
+ if (Info->ShaderType != ShaderType::COMPUTE && Arg.VT.isVector()) {
ISD::InputArg NewArg = Arg;
NewArg.Flags.setSplit();
NewArg.VT = Arg.VT.getVectorElementType();
CCInfo.AllocateReg(AMDGPU::VGPR1);
}
+ // The pointer to the list of arguments is stored in SGPR0, SGPR1
+ if (Info->ShaderType == ShaderType::COMPUTE) {
+ CCInfo.AllocateReg(AMDGPU::SGPR0);
+ CCInfo.AllocateReg(AMDGPU::SGPR1);
+ MF.addLiveIn(AMDGPU::SGPR0_SGPR1, &AMDGPU::SReg_64RegClass);
+ }
+
AnalyzeFormalArguments(CCInfo, Splits);
for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
}
CCValAssign &VA = ArgLocs[ArgIdx++];
+ EVT VT = VA.getLocVT();
+
+ if (VA.isMemLoc()) {
+ // The first 36 bytes of the input buffer contains information about
+ // thread group and global sizes.
+ SDValue Arg = LowerParameter(DAG, VT, DL, DAG.getRoot(),
+ 36 + VA.getLocMemOffset());
+ InVals.push_back(Arg);
+ continue;
+ }
assert(VA.isRegLoc() && "Parameter must be in a register!");
unsigned Reg = VA.getLocReg();
- MVT VT = VA.getLocVT();
if (VT == MVT::i64) {
// For now assume it is a pointer
return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
case AMDGPU::BRANCH: return BB;
case AMDGPU::SI_ADDR64_RSRC: {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
unsigned SuperReg = MI->getOperand(0).getReg();
unsigned SubRegLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
//===----------------------------------------------------------------------===//
SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
switch (Op.getOpcode()) {
default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
+ case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, DAG);
+ case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG);
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntrinsicID =
+ cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ //XXX: Hardcoded we only use two to store the pointer to the parameters.
+ unsigned NumUserSGPRs = 2;
+ switch (IntrinsicID) {
+ default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
+ case Intrinsic::r600_read_ngroups_x:
+ return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 0);
+ case Intrinsic::r600_read_ngroups_y:
+ return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 4);
+ case Intrinsic::r600_read_ngroups_z:
+ return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 8);
+ case Intrinsic::r600_read_global_size_x:
+ return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 12);
+ case Intrinsic::r600_read_global_size_y:
+ return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 16);
+ case Intrinsic::r600_read_global_size_z:
+ return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 20);
+ case Intrinsic::r600_read_local_size_x:
+ return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 24);
+ case Intrinsic::r600_read_local_size_y:
+ return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 28);
+ case Intrinsic::r600_read_local_size_z:
+ return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 32);
+ case Intrinsic::r600_read_tgid_x:
+ return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
+ AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 0), VT);
+ case Intrinsic::r600_read_tgid_y:
+ return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
+ AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 1), VT);
+ case Intrinsic::r600_read_tgid_z:
+ return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
+ AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 2), VT);
+ case Intrinsic::r600_read_tidig_x:
+ return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
+ AMDGPU::VGPR0, VT);
+ case Intrinsic::r600_read_tidig_y:
+ return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
+ AMDGPU::VGPR1, VT);
+ case Intrinsic::r600_read_tidig_z:
+ return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
+ AMDGPU::VGPR2, VT);
+
+ }
+ }
}
return SDValue();
}
return DAG.getNode(ISD::SELECT, DL, VT, Cond, True, False);
}
+SDValue SITargetLowering::LowerSIGN_EXTEND(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+
+ if (VT != MVT::i64) {
+ return SDValue();
+ }
+
+ SDValue Hi = DAG.getNode(ISD::SRA, DL, MVT::i32, Op.getOperand(0),
+ DAG.getConstant(31, MVT::i32));
+
+ return DAG.getNode(ISD::BUILD_PAIR, DL, VT, Op.getOperand(0), Hi);
+}
+
//===----------------------------------------------------------------------===//
// Custom DAG optimizations
//===----------------------------------------------------------------------===//
bool &ScalarSlotUsed) const {
MachineSDNode *Mov = dyn_cast<MachineSDNode>(Operand);
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
if (Mov == 0 || !TII->isMov(Mov->getMachineOpcode()))
return false;
SDNode *Node = Op.getNode();
const TargetRegisterClass *OpClass;
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
if (MachineSDNode *MN = dyn_cast<MachineSDNode>(Node)) {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
const MCInstrDesc &Desc = TII->get(MN->getMachineOpcode());
int OpClassID = Desc.OpInfo[Op.getResNo()].RegClass;
- if (OpClassID == -1)
- OpClass = getRegClassFor(Op.getSimpleValueType());
- else
+ if (OpClassID == -1) {
+ switch (MN->getMachineOpcode()) {
+ case AMDGPU::REG_SEQUENCE:
+ // Operand 0 is the register class id for REG_SEQUENCE instructions.
+ OpClass = TRI->getRegClass(
+ cast<ConstantSDNode>(MN->getOperand(0))->getZExtValue());
+ break;
+ default:
+ OpClass = getRegClassFor(Op.getSimpleValueType());
+ break;
+ }
+ } else {
OpClass = TRI->getRegClass(OpClassID);
+ }
} else if (Node->getOpcode() == ISD::CopyFromReg) {
RegisterSDNode *Reg = cast<RegisterSDNode>(Node->getOperand(1).getNode());
Operand = SDValue(Node, 0);
}
+/// \returns true if \p Node's operands are different from the SDValue list
+/// \p Ops
+static bool isNodeChanged(const SDNode *Node, const std::vector<SDValue> &Ops) {
+ for (unsigned i = 0, e = Node->getNumOperands(); i < e; ++i) {
+ if (Ops[i].getNode() != Node->getOperand(i).getNode()) {
+ return true;
+ }
+ }
+ return false;
+}
+
/// \brief Try to fold the Nodes operands into the Node
SDNode *SITargetLowering::foldOperands(MachineSDNode *Node,
SelectionDAG &DAG) const {
// Original encoding (either e32 or e64)
int Opcode = Node->getMachineOpcode();
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
const MCInstrDesc *Desc = &TII->get(Opcode);
unsigned NumDefs = Desc->getNumDefs();
for (unsigned i = NumOps - NumDefs, e = Node->getNumOperands(); i < e; ++i)
Ops.push_back(Node->getOperand(i));
+ // Nodes that have a glue result are not CSE'd by getMachineNode(), so in
+ // this case a brand new node is always be created, even if the operands
+ // are the same as before. So, manually check if anything has been changed.
+ if (Desc->Opcode == Opcode && !isNodeChanged(Node, Ops)) {
+ return Node;
+ }
+
// Create a complete new instruction
return DAG.getMachineNode(Desc->Opcode, SDLoc(Node), Node->getVTList(), Ops);
}
/// \brief Fold the instructions after slecting them
SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
SelectionDAG &DAG) const {
+ Node = AdjustRegClass(Node, DAG);
if (AMDGPU::isMIMG(Node->getMachineOpcode()) != -1)
adjustWritemask(Node, DAG);
MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
MRI.setRegClass(VReg, RC);
}
+
+MachineSDNode *SITargetLowering::AdjustRegClass(MachineSDNode *N,
+ SelectionDAG &DAG) const {
+
+ SDLoc DL(N);
+ unsigned NewOpcode = N->getMachineOpcode();
+
+ switch (N->getMachineOpcode()) {
+ default: return N;
+ case AMDGPU::REG_SEQUENCE: {
+ // MVT::i128 only use SGPRs, so i128 REG_SEQUENCEs don't need to be
+ // rewritten.
+ if (N->getValueType(0) == MVT::i128) {
+ return N;
+ }
+ const SDValue Ops[] = {
+ DAG.getTargetConstant(AMDGPU::VReg_64RegClassID, MVT::i32),
+ N->getOperand(1) , N->getOperand(2),
+ N->getOperand(3), N->getOperand(4)
+ };
+ return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::i64, Ops);
+ }
+
+ case AMDGPU::S_LOAD_DWORD_IMM:
+ NewOpcode = AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
+ // Fall-through
+ case AMDGPU::S_LOAD_DWORDX2_SGPR:
+ if (NewOpcode == N->getMachineOpcode()) {
+ NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
+ }
+ // Fall-through
+ case AMDGPU::S_LOAD_DWORDX4_IMM:
+ case AMDGPU::S_LOAD_DWORDX4_SGPR: {
+ if (NewOpcode == N->getMachineOpcode()) {
+ NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
+ }
+ if (fitsRegClass(DAG, N->getOperand(0), AMDGPU::SReg_64RegClassID)) {
+ return N;
+ }
+ ConstantSDNode *Offset = cast<ConstantSDNode>(N->getOperand(1));
+ SDValue Ops[] = {
+ SDValue(DAG.getMachineNode(AMDGPU::SI_ADDR64_RSRC, DL, MVT::i128,
+ DAG.getConstant(0, MVT::i64)), 0),
+ N->getOperand(0),
+ DAG.getConstant(Offset->getSExtValue() << 2, MVT::i32)
+ };
+ return DAG.getMachineNode(NewOpcode, DL, N->getVTList(), Ops);
+ }
+ }
+}
+
+SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
+ const TargetRegisterClass *RC,
+ unsigned Reg, EVT VT) const {
+ SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT);
+
+ return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()),
+ cast<RegisterSDNode>(VReg)->getReg(), VT);
+}