}
static std::pair<unsigned, bool>
-getX86SSECondtionCode(CmpInst::Predicate Predicate) {
+getX86SSEConditionCode(CmpInst::Predicate Predicate) {
unsigned CC;
bool NeedSwap = false;
X86::CondCode CC;
bool SwapArgs;
std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);
- assert(CC <= X86::LAST_VALID_COND && "Unexpected conditon code.");
+ assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
unsigned Opc = X86::getSETFromCond(CC);
if (SwapArgs)
Predicate = CmpInst::getInversePredicate(Predicate);
}
- // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/conditon
+ // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition
// code check. Instead two branch instructions are required to check all
- // the flags. First we change the predicate to a supported conditon code,
+ // the flags. First we change the predicate to a supported condition code,
// which will be the first branch. Later one we will emit the second
// branch.
bool NeedExtraBranch = false;
bool SwapArgs;
unsigned BranchOpc;
std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);
- assert(CC <= X86::LAST_VALID_COND && "Unexpected conditon code.");
+ assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
BranchOpc = X86::GetCondBranchFromCond(CC);
if (SwapArgs)
bool NeedTest = true;
X86::CondCode CC = X86::COND_NE;
- // Optimize conditons coming from a compare if both instructions are in the
+ // Optimize conditions coming from a compare if both instructions are in the
// same basic block (values defined in other basic blocks may not have
// initialized registers).
const auto *CI = dyn_cast<CmpInst>(Cond);
/// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary
/// SSE instructions are available.
bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
- // Optimize conditons coming from a compare if both instructions are in the
+ // Optimize conditions coming from a compare if both instructions are in the
// same basic block (values defined in other basic blocks may not have
// initialized registers).
const auto *CI = dyn_cast<FCmpInst>(I->getOperand(0));
unsigned CC;
bool NeedSwap;
- std::tie(CC, NeedSwap) = getX86SSECondtionCode(Predicate);
+ std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate);
if (CC > 7)
return false;
const Value *Cond = I->getOperand(0);
X86::CondCode CC = X86::COND_NE;
- // Optimize conditons coming from a compare if both instructions are in the
+ // Optimize conditions coming from a compare if both instructions are in the
// same basic block (values defined in other basic blocks may not have
// initialized registers).
const auto *CI = dyn_cast<CmpInst>(Cond);
if (X86FastEmitCMoveSelect(RetVT, I))
return true;
- // Try to use a sequence of SSE instructions to simulate a conditonal move.
+ // Try to use a sequence of SSE instructions to simulate a conditional move.
if (X86FastEmitSSESelect(RetVT, I))
return true;
if (!isTypeLegal(RetTy, VT))
return false;
- // Unfortunatelly we can't use FastEmit_r, because the AVX version of FSQRT
+ // Unfortunately we can't use FastEmit_r, because the AVX version of FSQRT
// is not generated by FastISel yet.
// FIXME: Update this code once tablegen can handle it.
static const unsigned SqrtOpc[2][2] = {
case Intrinsic::smul_with_overflow:
case Intrinsic::umul_with_overflow: {
// This implements the basic lowering of the xalu with overflow intrinsics
- // into add/sub/mul folowed by either seto or setb.
+ // into add/sub/mul followed by either seto or setb.
const Function *Callee = I.getCalledFunction();
auto *Ty = cast<StructType>(Callee->getReturnType());
Type *RetTy = Ty->getTypeAtIndex(0U);
const Value *LHS = I.getArgOperand(0);
const Value *RHS = I.getArgOperand(1);
- // Canonicalize immediates to the RHS.
+ // Canonicalize immediate to the RHS.
if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
isCommutativeIntrinsic(I))
std::swap(LHS, RHS);