//
// The LLVM Compiler Infrastructure
//
-// This file was developed by the LLVM research group and is distributed under
-// the University of Illinois Open Source License. See LICENSE.TXT for details.
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
if (CE->getOpcode() == Instruction::GetElementPtr) {
// Cannot compute this if the element type of the pointer is missing size
// info.
- if (!cast<PointerType>(CE->getOperand(0)->getType())->getElementType()->isSized())
+ if (!cast<PointerType>(CE->getOperand(0)->getType())
+ ->getElementType()->isSized())
return false;
// If the base isn't a global+constant, we aren't either.
// Otherwise, add any offset that our operands provide.
gep_type_iterator GTI = gep_type_begin(CE);
- for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i, ++GTI) {
- ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(i));
+ for (User::const_op_iterator i = CE->op_begin() + 1, e = CE->op_end();
+ i != e; ++i, ++GTI) {
+ ConstantInt *CI = dyn_cast<ConstantInt>(*i);
if (!CI) return false; // Index isn't a simple constant?
if (CI->getZExtValue() == 0) continue; // Not adding anything.
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
} else {
const SequentialType *SQT = cast<SequentialType>(*GTI);
- Offset += TD.getTypeSize(SQT->getElementType())*CI->getSExtValue();
+ Offset += TD.getABITypeSize(SQT->getElementType())*CI->getSExtValue();
}
}
return true;
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
/// constant expression, do so.
-static Constant *SymbolicallyEvaluateGEP(Constant** Ops, unsigned NumOps,
+static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,
const Type *ResultTy,
const TargetData *TD) {
Constant *Ptr = Ops[0];
- if (!cast<PointerType>(Ptr->getType())->getElementType()->isSized())
+ if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized())
return 0;
- if (TD && Ptr->isNullValue()) {
- // If this is a constant expr gep that is effectively computing an
- // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
- bool isFoldableGEP = true;
- for (unsigned i = 1; i != NumOps; ++i)
- if (!isa<ConstantInt>(Ops[i])) {
- isFoldableGEP = false;
- break;
+ uint64_t BasePtr = 0;
+ if (!Ptr->isNullValue()) {
+ // If this is a inttoptr from a constant int, we can fold this as the base,
+ // otherwise we can't.
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
+ if (CE->getOpcode() == Instruction::IntToPtr)
+ if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
+ BasePtr = Base->getZExtValue();
+
+ if (BasePtr == 0)
+ return 0;
+ }
+
+ // If this is a constant expr gep that is effectively computing an
+ // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
+ for (unsigned i = 1; i != NumOps; ++i)
+ if (!isa<ConstantInt>(Ops[i]))
+ return false;
+
+ uint64_t Offset = TD->getIndexedOffset(Ptr->getType(),
+ (Value**)Ops+1, NumOps-1);
+ Constant *C = ConstantInt::get(TD->getIntPtrType(), Offset+BasePtr);
+ return ConstantExpr::getIntToPtr(C, ResultTy);
+}
+
+/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
+/// targetdata. Return 0 if unfoldable.
+static Constant *FoldBitCast(Constant *C, const Type *DestTy,
+ const TargetData &TD) {
+ // If this is a bitcast from constant vector -> vector, fold it.
+ if (ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
+ if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
+ // If the element types match, VMCore can fold it.
+ unsigned NumDstElt = DestVTy->getNumElements();
+ unsigned NumSrcElt = CV->getNumOperands();
+ if (NumDstElt == NumSrcElt)
+ return 0;
+
+ const Type *SrcEltTy = CV->getType()->getElementType();
+ const Type *DstEltTy = DestVTy->getElementType();
+
+ // Otherwise, we're changing the number of elements in a vector, which
+ // requires endianness information to do the right thing. For example,
+ // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
+ // folds to (little endian):
+ // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
+ // and to (big endian):
+ // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
+
+ // First thing is first. We only want to think about integer here, so if
+ // we have something in FP form, recast it as integer.
+ if (DstEltTy->isFloatingPoint()) {
+ // Fold to an vector of integers with same size as our FP type.
+ unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
+ const Type *DestIVTy = VectorType::get(IntegerType::get(FPWidth),
+ NumDstElt);
+ // Recursively handle this integer conversion, if possible.
+ C = FoldBitCast(C, DestIVTy, TD);
+ if (!C) return 0;
+
+ // Finally, VMCore can handle this now that #elts line up.
+ return ConstantExpr::getBitCast(C, DestTy);
+ }
+
+ // Okay, we know the destination is integer, if the input is FP, convert
+ // it to integer first.
+ if (SrcEltTy->isFloatingPoint()) {
+ unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
+ const Type *SrcIVTy = VectorType::get(IntegerType::get(FPWidth),
+ NumSrcElt);
+ // Ask VMCore to do the conversion now that #elts line up.
+ C = ConstantExpr::getBitCast(C, SrcIVTy);
+ CV = dyn_cast<ConstantVector>(C);
+ if (!CV) return 0; // If VMCore wasn't able to fold it, bail out.
+ }
+
+ // Now we know that the input and output vectors are both integer vectors
+ // of the same size, and that their #elements is not the same. Do the
+ // conversion here, which depends on whether the input or output has
+ // more elements.
+ bool isLittleEndian = TD.isLittleEndian();
+
+ SmallVector<Constant*, 32> Result;
+ if (NumDstElt < NumSrcElt) {
+ // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
+ Constant *Zero = Constant::getNullValue(DstEltTy);
+ unsigned Ratio = NumSrcElt/NumDstElt;
+ unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
+ unsigned SrcElt = 0;
+ for (unsigned i = 0; i != NumDstElt; ++i) {
+ // Build each element of the result.
+ Constant *Elt = Zero;
+ unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
+ for (unsigned j = 0; j != Ratio; ++j) {
+ Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(SrcElt++));
+ if (!Src) return 0; // Reject constantexpr elements.
+
+ // Zero extend the element to the right size.
+ Src = ConstantExpr::getZExt(Src, Elt->getType());
+
+ // Shift it to the right place, depending on endianness.
+ Src = ConstantExpr::getShl(Src,
+ ConstantInt::get(Src->getType(), ShiftAmt));
+ ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
+
+ // Mix it in.
+ Elt = ConstantExpr::getOr(Elt, Src);
+ }
+ Result.push_back(Elt);
+ }
+ } else {
+ // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
+ unsigned Ratio = NumDstElt/NumSrcElt;
+ unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
+
+ // Loop over each source value, expanding into multiple results.
+ for (unsigned i = 0; i != NumSrcElt; ++i) {
+ Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(i));
+ if (!Src) return 0; // Reject constantexpr elements.
+
+ unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
+ for (unsigned j = 0; j != Ratio; ++j) {
+ // Shift the piece of the value into the right place, depending on
+ // endianness.
+ Constant *Elt = ConstantExpr::getLShr(Src,
+ ConstantInt::get(Src->getType(), ShiftAmt));
+ ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
+
+ // Truncate and remember this piece.
+ Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
+ }
+ }
}
- if (isFoldableGEP) {
- uint64_t Offset = TD->getIndexedOffset(Ptr->getType(),
- (Value**)Ops+1, NumOps-1);
- Constant *C = ConstantInt::get(TD->getIntPtrType(), Offset);
- return ConstantExpr::getIntToPtr(C, ResultTy);
+
+ return ConstantVector::get(&Result[0], Result.size());
}
}
// Scan the operand list, checking to see if they are all constants, if so,
// hand off to ConstantFoldInstOperands.
SmallVector<Constant*, 8> Ops;
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
- if (Constant *Op = dyn_cast<Constant>(I->getOperand(i)))
+ for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i)
+ if (Constant *Op = dyn_cast<Constant>(*i))
Ops.push_back(Op);
else
return 0; // All operands not constant!
- return ConstantFoldInstOperands(I, &Ops[0], Ops.size(), TD);
+ if (const CmpInst *CI = dyn_cast<CmpInst>(I))
+ return ConstantFoldCompareInstOperands(CI->getPredicate(),
+ &Ops[0], Ops.size(), TD);
+ else
+ return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
+ &Ops[0], Ops.size(), TD);
+}
+
+/// ConstantFoldConstantExpression - Attempt to fold the constant expression
+/// using the specified TargetData. If successful, the constant result is
+/// result is returned, if not, null is returned.
+Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE,
+ const TargetData *TD) {
+ assert(TD && "ConstantFoldConstantExpression requires a valid TargetData.");
+
+ SmallVector<Constant*, 8> Ops;
+ for (User::op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i)
+ Ops.push_back(cast<Constant>(*i));
+
+ if (CE->isCompare())
+ return ConstantFoldCompareInstOperands(CE->getPredicate(),
+ &Ops[0], Ops.size(), TD);
+ else
+ return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(),
+ &Ops[0], Ops.size(), TD);
}
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
/// attempting to fold instructions like loads and stores, which have no
/// constant expression form.
///
-Constant *llvm::ConstantFoldInstOperands(const Instruction* I,
- Constant** Ops, unsigned NumOps,
+Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
+ Constant* const* Ops, unsigned NumOps,
const TargetData *TD) {
- unsigned Opc = I->getOpcode();
- const Type *DestTy = I->getType();
-
// Handle easy binops first.
- if (isa<BinaryOperator>(I)) {
+ if (Instruction::isBinaryOp(Opcode)) {
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1]))
- if (Constant *C = SymbolicallyEvaluateBinop(I->getOpcode(), Ops[0],
- Ops[1], TD))
+ if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD))
return C;
- return ConstantExpr::get(Opc, Ops[0], Ops[1]);
+ return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
}
- switch (Opc) {
+ switch (Opcode) {
default: return 0;
case Instruction::Call:
if (Function *F = dyn_cast<Function>(Ops[0]))
return 0;
case Instruction::ICmp:
case Instruction::FCmp:
- return ConstantExpr::getCompare(cast<CmpInst>(I)->getPredicate(), Ops[0],
- Ops[1]);
+ assert(0 &&"This function is invalid for compares: no predicate specified");
case Instruction::PtrToInt:
// If the input is a inttoptr, eliminate the pair. This requires knowing
// the width of a pointer, so it can't be done in ConstantExpr::getCast.
TD->getPointerSizeInBits()));
Input = ConstantExpr::getAnd(Input, Mask);
// Do a zext or trunc to get to the dest size.
- return ConstantExpr::getIntegerCast(Input, I->getType(), false);
+ return ConstantExpr::getIntegerCast(Input, DestTy, false);
}
}
- // FALL THROUGH.
+ return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
case Instruction::IntToPtr:
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SIToFP:
case Instruction::FPToUI:
case Instruction::FPToSI:
+ return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
case Instruction::BitCast:
- return ConstantExpr::getCast(Opc, Ops[0], DestTy);
+ if (TD)
+ if (Constant *C = FoldBitCast(Ops[0], DestTy, *TD))
+ return C;
+ return ConstantExpr::getBitCast(Ops[0], DestTy);
case Instruction::Select:
return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
case Instruction::ExtractElement:
case Instruction::ShuffleVector:
return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
case Instruction::GetElementPtr:
- if (Constant *C = SymbolicallyEvaluateGEP(Ops, NumOps, I->getType(), TD))
+ if (Constant *C = SymbolicallyEvaluateGEP(Ops, NumOps, DestTy, TD))
return C;
return ConstantExpr::getGetElementPtr(Ops[0], Ops+1, NumOps-1);
}
}
+/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
+/// instruction (icmp/fcmp) with the specified operands. If it fails, it
+/// returns a constant expression of the specified operands.
+///
+Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
+ Constant*const * Ops,
+ unsigned NumOps,
+ const TargetData *TD) {
+ // fold: icmp (inttoptr x), null -> icmp x, 0
+ // fold: icmp (ptrtoint x), 0 -> icmp x, null
+ // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
+ // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
+ //
+ // ConstantExpr::getCompare cannot do this, because it doesn't have TD
+ // around to know if bit truncation is happening.
+ if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops[0])) {
+ if (TD && Ops[1]->isNullValue()) {
+ const Type *IntPtrTy = TD->getIntPtrType();
+ if (CE0->getOpcode() == Instruction::IntToPtr) {
+ // Convert the integer value to the right size to ensure we get the
+ // proper extension or truncation.
+ Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
+ IntPtrTy, false);
+ Constant *NewOps[] = { C, Constant::getNullValue(C->getType()) };
+ return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, TD);
+ }
+
+ // Only do this transformation if the int is intptrty in size, otherwise
+ // there is a truncation or extension that we aren't modeling.
+ if (CE0->getOpcode() == Instruction::PtrToInt &&
+ CE0->getType() == IntPtrTy) {
+ Constant *C = CE0->getOperand(0);
+ Constant *NewOps[] = { C, Constant::getNullValue(C->getType()) };
+ // FIXME!
+ return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, TD);
+ }
+ }
+
+ if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops[1])) {
+ if (TD && CE0->getOpcode() == CE1->getOpcode()) {
+ const Type *IntPtrTy = TD->getIntPtrType();
+
+ if (CE0->getOpcode() == Instruction::IntToPtr) {
+ // Convert the integer value to the right size to ensure we get the
+ // proper extension or truncation.
+ Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
+ IntPtrTy, false);
+ Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
+ IntPtrTy, false);
+ Constant *NewOps[] = { C0, C1 };
+ return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, TD);
+ }
+
+ // Only do this transformation if the int is intptrty in size, otherwise
+ // there is a truncation or extension that we aren't modeling.
+ if ((CE0->getOpcode() == Instruction::PtrToInt &&
+ CE0->getType() == IntPtrTy &&
+ CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType())) {
+ Constant *NewOps[] = {
+ CE0->getOperand(0), CE1->getOperand(0)
+ };
+ return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, TD);
+ }
+ }
+ }
+ }
+ return ConstantExpr::getCompare(Predicate, Ops[0], Ops[1]);
+}
+
+
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
/// getelementptr constantexpr, return the constant value being addressed by the
/// constant expression, or null if something is funny and we can't decide.
/// canConstantFoldCallTo - Return true if its even possible to fold a call to
/// the specified function.
bool
-llvm::canConstantFoldCallTo(Function *F) {
+llvm::canConstantFoldCallTo(const Function *F) {
switch (F->getIntrinsicID()) {
- case Intrinsic::sqrt_f32:
- case Intrinsic::sqrt_f64:
- case Intrinsic::powi_f32:
- case Intrinsic::powi_f64:
+ case Intrinsic::sqrt:
+ case Intrinsic::powi:
case Intrinsic::bswap:
case Intrinsic::ctpop:
case Intrinsic::ctlz:
if (Len == 3)
return !strcmp(Str, "sin");
if (Len == 4)
- return !strcmp(Str, "sinh") || !strcmp(Str, "sqrt");
+ return !strcmp(Str, "sinh") || !strcmp(Str, "sqrt") ||
+ !strcmp(Str, "sinf");
if (Len == 5)
return !strcmp(Str, "sqrtf");
return false;
const Type *Ty) {
errno = 0;
V = NativeFP(V);
- if (errno == 0) {
- if (Ty==Type::FloatTy)
- return ConstantFP::get(Ty, APFloat((float)V));
- else if (Ty==Type::DoubleTy)
- return ConstantFP::get(Ty, APFloat(V));
- else
- assert(0);
+ if (errno != 0) {
+ errno = 0;
+ return 0;
}
- errno = 0;
- return 0;
+
+ if (Ty == Type::FloatTy)
+ return ConstantFP::get(APFloat((float)V));
+ if (Ty == Type::DoubleTy)
+ return ConstantFP::get(APFloat(V));
+ assert(0 && "Can only constant fold float/double");
+ return 0; // dummy return to suppress warning
}
static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
const Type *Ty) {
errno = 0;
V = NativeFP(V, W);
- if (errno == 0) {
- if (Ty==Type::FloatTy)
- return ConstantFP::get(Ty, APFloat((float)V));
- else if (Ty==Type::DoubleTy)
- return ConstantFP::get(Ty, APFloat(V));
- else
- assert(0);
+ if (errno != 0) {
+ errno = 0;
+ return 0;
}
- errno = 0;
- return 0;
+
+ if (Ty == Type::FloatTy)
+ return ConstantFP::get(APFloat((float)V));
+ if (Ty == Type::DoubleTy)
+ return ConstantFP::get(APFloat(V));
+ assert(0 && "Can only constant fold float/double");
+ return 0; // dummy return to suppress warning
}
/// ConstantFoldCall - Attempt to constant fold a call to the specified function
/// with the specified arguments, returning null if unsuccessful.
Constant *
-llvm::ConstantFoldCall(Function *F, Constant** Operands, unsigned NumOperands) {
+llvm::ConstantFoldCall(Function *F,
+ Constant* const* Operands, unsigned NumOperands) {
const ValueName *NameVal = F->getValueName();
if (NameVal == 0) return 0;
const char *Str = NameVal->getKeyData();
return ConstantFoldFP(cos, V, Ty);
else if (Len == 4 && !strcmp(Str, "cosh"))
return ConstantFoldFP(cosh, V, Ty);
+ else if (Len == 4 && !strcmp(Str, "cosf"))
+ return ConstantFoldFP(cos, V, Ty);
break;
case 'e':
if (Len == 3 && !strcmp(Str, "exp"))
if (V >= -0.0)
return ConstantFoldFP(sqrt, V, Ty);
else // Undefined
- return ConstantFP::get(Ty, Ty==Type::FloatTy ? APFloat(0.0f) :
- APFloat(0.0));
+ return Constant::getNullValue(Ty);
}
break;
case 's':
return ConstantFoldFP(sqrt, V, Ty);
else if (Len == 5 && !strcmp(Str, "sqrtf") && V >= 0)
return ConstantFoldFP(sqrt, V, Ty);
+ else if (Len == 4 && !strcmp(Str, "sinf"))
+ return ConstantFoldFP(sin, V, Ty);
break;
case 't':
if (Len == 3 && !strcmp(Str, "tan"))
break;
}
} else if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) {
- if (Len > 11 && !memcmp(Str, "llvm.bswap", 10)) {
+ if (Len > 11 && !memcmp(Str, "llvm.bswap", 10))
return ConstantInt::get(Op->getValue().byteSwap());
- } else if (Len > 11 && !memcmp(Str, "llvm.ctpop", 10)) {
- uint64_t ctpop = Op->getValue().countPopulation();
- return ConstantInt::get(Ty, ctpop);
- } else if (Len > 10 && !memcmp(Str, "llvm.cttz", 9)) {
- uint64_t cttz = Op->getValue().countTrailingZeros();
- return ConstantInt::get(Ty, cttz);
- } else if (Len > 10 && !memcmp(Str, "llvm.ctlz", 9)) {
- uint64_t ctlz = Op->getValue().countLeadingZeros();
- return ConstantInt::get(Ty, ctlz);
- }
+ else if (Len > 11 && !memcmp(Str, "llvm.ctpop", 10))
+ return ConstantInt::get(Ty, Op->getValue().countPopulation());
+ else if (Len > 10 && !memcmp(Str, "llvm.cttz", 9))
+ return ConstantInt::get(Ty, Op->getValue().countTrailingZeros());
+ else if (Len > 10 && !memcmp(Str, "llvm.ctlz", 9))
+ return ConstantInt::get(Ty, Op->getValue().countLeadingZeros());
}
} else if (NumOperands == 2) {
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
+ if (Ty!=Type::FloatTy && Ty!=Type::DoubleTy)
+ return 0;
double Op1V = Ty==Type::FloatTy ?
(double)Op1->getValueAPF().convertToFloat():
Op1->getValueAPF().convertToDouble();
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
- if (Ty!=Type::FloatTy && Ty!=Type::DoubleTy)
- return 0;
double Op2V = Ty==Type::FloatTy ?
(double)Op2->getValueAPF().convertToFloat():
Op2->getValueAPF().convertToDouble();
}
} else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
if (!strcmp(Str, "llvm.powi.f32")) {
- return ConstantFP::get(Ty, APFloat((float)std::pow((float)Op1V,
- (int)Op2C->getZExtValue())));
+ return ConstantFP::get(APFloat((float)std::pow((float)Op1V,
+ (int)Op2C->getZExtValue())));
} else if (!strcmp(Str, "llvm.powi.f64")) {
- return ConstantFP::get(Ty, APFloat((double)std::pow((double)Op1V,
- (int)Op2C->getZExtValue())));
+ return ConstantFP::get(APFloat((double)std::pow((double)Op1V,
+ (int)Op2C->getZExtValue())));
}
}
}