#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrAnnot.h"
#include "llvm/CodeGen/MachineCodeForInstruction.h"
-#include "llvm/CodeGen/MachineCodeForMethod.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/InstrForest.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/MachineRegInfo.h"
+#include "llvm/Target/MachineInstrInfo.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
#include "llvm/Type.h"
// FoldConstantIndices that does the actual folding.
//---------------------------------------------------------------------------
+
+// Check for a constant 0.
+inline bool
+IsZero(Value* idx)
+{
+ return (idx == ConstantSInt::getNullValue(idx->getType()));
+}
+
static Value*
-FoldGetElemChain(InstrTreeNode* ptrNode, vector<Value*>& chainIdxVec)
+FoldGetElemChain(InstrTreeNode* ptrNode, vector<Value*>& chainIdxVec,
+ bool lastInstHasLeadingNonZero)
{
InstructionNode* gepNode = dyn_cast<InstructionNode>(ptrNode);
GetElementPtrInst* gepInst =
// Return NULL if we don't fold any instructions in.
Value* ptrVal = NULL;
- // Remember if the last instruction had a leading [0] index.
- bool hasLeadingZero = false;
-
// Now chase the chain of getElementInstr instructions, if any.
// Check for any non-constant indices and stop there.
+ // Also, stop if the first index of child is a non-zero array index
+ // and the last index of the current node is a non-array index:
+ // in that case, a non-array declared type is being accessed as an array
+ // which is not type-safe, but could be legal.
//
InstructionNode* ptrChild = gepNode;
while (ptrChild && (ptrChild->getOpLabel() == Instruction::GetElementPtr ||
User::op_iterator lastIdx = gepInst->idx_end();
bool allConstantOffsets = true;
+ // The first index of every GEP must be an array index.
+ assert((*firstIdx)->getType() == Type::LongTy &&
+ "INTERNAL ERROR: Structure index for a pointer type!");
+
+ // If the last instruction had a leading non-zero index, check if the
+ // current one references a sequential (i.e., indexable) type.
+ // If not, the code is not type-safe and we would create an illegal GEP
+ // by folding them, so don't fold any more instructions.
+ //
+ if (lastInstHasLeadingNonZero)
+ if (! isa<SequentialType>(gepInst->getType()->getElementType()))
+ break; // cannot fold in any preceding getElementPtr instrs.
+
// Check that all offsets are constant for this instruction
for (OI = firstIdx; allConstantOffsets && OI != lastIdx; ++OI)
allConstantOffsets = isa<ConstantInt>(*OI);
{ // Get pointer value out of ptrChild.
ptrVal = gepInst->getPointerOperand();
- // Check for a leading [0] index, if any. It will be discarded later.
- hasLeadingZero = (*firstIdx ==
- Constant::getNullValue((*firstIdx)->getType()));
+ // Remember if it has leading zero index: it will be discarded later.
+ lastInstHasLeadingNonZero = ! IsZero(*firstIdx);
// Insert its index vector at the start, skipping any leading [0]
chainIdxVec.insert(chainIdxVec.begin(),
- firstIdx + hasLeadingZero, lastIdx);
+ firstIdx + !lastInstHasLeadingNonZero, lastIdx);
// Mark the folded node so no code is generated for it.
((InstructionNode*) ptrChild)->markFoldedIntoParent();
+
+ // Get the previous GEP instruction and continue trying to fold
+ ptrChild = dyn_cast<InstructionNode>(ptrChild->leftChild());
}
- else // cannot fold this getElementPtr instr. or any further ones
+ else // cannot fold this getElementPtr instr. or any preceding ones
break;
-
- ptrChild = dyn_cast<InstructionNode>(ptrChild->leftChild());
}
// If the first getElementPtr instruction had a leading [0], add it back.
// Note that this instruction is the *last* one successfully folded above.
- if (ptrVal && hasLeadingZero)
+ if (ptrVal && ! lastInstHasLeadingNonZero)
chainIdxVec.insert(chainIdxVec.begin(), ConstantSInt::get(Type::LongTy,0));
return ptrVal;
}
+//---------------------------------------------------------------------------
+// Function: GetGEPInstArgs
+//
+// Purpose:
+// Helper function for GetMemInstArgs that handles the final getElementPtr
+// instruction used by (or same as) the memory operation.
+// Extracts the indices of the current instruction and tries to fold in
+// preceding ones if all indices of the current one are constant.
+//---------------------------------------------------------------------------
+
+Value*
+GetGEPInstArgs(InstructionNode* gepNode,
+ vector<Value*>& idxVec,
+ bool& allConstantIndices)
+{
+ allConstantIndices = true;
+ GetElementPtrInst* gepI = cast<GetElementPtrInst>(gepNode->getInstruction());
+
+ // Default pointer is the one from the current instruction.
+ Value* ptrVal = gepI->getPointerOperand();
+ InstrTreeNode* ptrChild = gepNode->leftChild();
+
+ // Extract the index vector of the GEP instructin.
+ // If all indices are constant and first index is zero, try to fold
+ // in preceding GEPs with all constant indices.
+ for (User::op_iterator OI=gepI->idx_begin(), OE=gepI->idx_end();
+ allConstantIndices && OI != OE; ++OI)
+ if (! isa<Constant>(*OI))
+ allConstantIndices = false; // note: this also terminates loop!
+
+ // If we have only constant indices, fold chains of constant indices
+ // in this and any preceding GetElemPtr instructions.
+ bool foldedGEPs = false;
+ bool leadingNonZeroIdx = gepI && ! IsZero(*gepI->idx_begin());
+ if (allConstantIndices)
+ if (Value* newPtr = FoldGetElemChain(ptrChild, idxVec, leadingNonZeroIdx))
+ {
+ ptrVal = newPtr;
+ foldedGEPs = true;
+ }
+
+ // Append the index vector of the current instruction.
+ // Skip the leading [0] index if preceding GEPs were folded into this.
+ idxVec.insert(idxVec.end(),
+ gepI->idx_begin() + (foldedGEPs && !leadingNonZeroIdx),
+ gepI->idx_end());
+
+ return ptrVal;
+}
+
//---------------------------------------------------------------------------
// Function: GetMemInstArgs
//
// Returns true/false in allConstantIndices if all indices are/aren't const.
//---------------------------------------------------------------------------
-
-// Check for a constant (uint) 0.
-inline bool
-IsZero(Value* idx)
-{
- return (isa<ConstantInt>(idx) && cast<ConstantInt>(idx)->isNullValue());
-}
-
Value*
-GetMemInstArgs(const InstructionNode* memInstrNode,
+GetMemInstArgs(InstructionNode* memInstrNode,
vector<Value*>& idxVec,
bool& allConstantIndices)
{
- allConstantIndices = true;
+ allConstantIndices = false;
Instruction* memInst = memInstrNode->getInstruction();
+ assert(idxVec.size() == 0 && "Need empty vector to return indices");
// If there is a GetElemPtr instruction to fold in to this instr,
// it must be in the left child for Load and GetElemPtr, and in the
InstrTreeNode* ptrChild = (memInst->getOpcode() == Instruction::Store
? memInstrNode->rightChild()
: memInstrNode->leftChild());
-
+
// Default pointer is the one from the current instruction.
Value* ptrVal = ptrChild->getValue();
- // GEP is the only indexed memory instruction. gepI is used below.
- GetElementPtrInst* gepI = dyn_cast<GetElementPtrInst>(memInst);
-
- // If memInst is a GEP, check if all indices are constant for this instruction
- if (gepI)
- for (User::op_iterator OI=gepI->idx_begin(), OE=gepI->idx_end();
- allConstantIndices && OI != OE; ++OI)
- if (! isa<Constant>(*OI))
- allConstantIndices = false; // note: this also terminates loop!
-
- // If we have only constant indices, fold chains of constant indices
- // in this and any preceding GetElemPtr instructions.
- bool foldedGEPs = false;
- if (allConstantIndices)
- if (Value* newPtr = FoldGetElemChain(ptrChild, idxVec))
- {
- ptrVal = newPtr;
- foldedGEPs = true;
- assert((!gepI || IsZero(*gepI->idx_begin())) && "1st index not 0");
- }
-
- // Append the index vector of the current instruction, if any.
- // Skip the leading [0] index if preceding GEPs were folded into this.
- if (gepI)
- idxVec.insert(idxVec.end(), gepI->idx_begin() +foldedGEPs, gepI->idx_end());
+ // Find the "last" GetElemPtr instruction: this one or the immediate child.
+ // There will be none if this is a load or a store from a scalar pointer.
+ InstructionNode* gepNode = NULL;
+ if (isa<GetElementPtrInst>(memInst))
+ gepNode = memInstrNode;
+ else if (isa<InstructionNode>(ptrChild) && isa<GetElementPtrInst>(ptrVal))
+ { // Child of load/store is a GEP and memInst is its only use.
+ // Use its indices and mark it as folded.
+ gepNode = cast<InstructionNode>(ptrChild);
+ gepNode->markFoldedIntoParent();
+ }
- return ptrVal;
+ // If there are no indices, return the current pointer.
+ // Else extract the pointer from the GEP and fold the indices.
+ return (gepNode)? GetGEPInstArgs(gepNode, idxVec, allConstantIndices)
+ : ptrVal;
}
+
//------------------------------------------------------------------------
// Function Set2OperandsFromInstr
// Function Set3OperandsFromInstr
else if (CPV->getType()->isSigned())
intValue = cast<ConstantSInt>(CPV)->getValue();
else
- {
- assert(CPV->getType()->isUnsigned() && "Not pointer, bool, or integer?");
- uint64_t V = cast<ConstantUInt>(CPV)->getValue();
- if (V >= INT64_MAX) return MachineOperand::MO_VirtualRegister;
- intValue = (int64_t) V;
+ { // get the int value and sign-extend if original was less than 64 bits
+ intValue = (int64_t) cast<ConstantUInt>(CPV)->getValue();
+ switch(CPV->getType()->getPrimitiveID())
+ {
+ case Type::UByteTyID: intValue = (int64_t) (int8_t) intValue; break;
+ case Type::UShortTyID: intValue = (int64_t) (short) intValue; break;
+ case Type::UIntTyID: intValue = (int64_t) (int) intValue; break;
+ default: break;
+ }
}
return ChooseRegOrImmed(intValue, CPV->getType()->isSigned(),
// Skip the result position, preallocated machine registers, or operands
// that cannot be constants (CC regs or PC-relative displacements)
if (instrDesc.resultPos == (int) op ||
- mop.getOperandType() == MachineOperand::MO_MachineRegister ||
- mop.getOperandType() == MachineOperand::MO_CCRegister ||
- mop.getOperandType() == MachineOperand::MO_PCRelativeDisp)
+ mop.getType() == MachineOperand::MO_MachineRegister ||
+ mop.getType() == MachineOperand::MO_CCRegister ||
+ mop.getType() == MachineOperand::MO_PCRelativeDisp)
continue;
bool constantThatMustBeLoaded = false;
MachineOperand::MO_VirtualRegister;
// Operand may be a virtual register or a compile-time constant
- if (mop.getOperandType() == MachineOperand::MO_VirtualRegister)
+ if (mop.getType() == MachineOperand::MO_VirtualRegister)
{
assert(mop.getVRegValue() != NULL);
opValue = mop.getVRegValue();
}
else
{
- assert(mop.getOperandType() == MachineOperand::MO_SignExtendedImmed ||
- mop.getOperandType() == MachineOperand::MO_UnextendedImmed);
+ assert(mop.getType() == MachineOperand::MO_SignExtendedImmed ||
+ mop.getType() == MachineOperand::MO_UnextendedImmed);
- bool isSigned = (mop.getOperandType() ==
+ bool isSigned = (mop.getType() ==
MachineOperand::MO_SignExtendedImmed);
// Bit-selection flags indicate an instruction that is extracting
opCode, target, (immedPos == (int)op),
machineRegNum, immedValue);
- if (opType == mop.getOperandType())
+ if (opType == mop.getType())
continue; // no change: this is the most common case
if (opType == MachineOperand::MO_VirtualRegister)
{
constantThatMustBeLoaded = true;
opValue = isSigned
- ? ConstantSInt::get(Type::LongTy, immedValue)
- : ConstantUInt::get(Type::ULongTy, (uint64_t) immedValue);
+ ? (Value*)ConstantSInt::get(Type::LongTy, immedValue)
+ : (Value*)ConstantUInt::get(Type::ULongTy,(uint64_t)immedValue);
}
}