From e220c4b3d97bbdc9f6e8cf040942514612349c41 Mon Sep 17 00:00:00 2001 From: Dan Gohman Date: Fri, 18 Sep 2009 19:59:53 +0000 Subject: [PATCH] Add support for using the FLAGS result of or, xor, and and instructions on x86, to avoid explicit test instructions. A few existing tests changed due to arbitrary register allocation differences. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@82263 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 44 ++++- lib/Target/X86/X86ISelLowering.h | 2 +- lib/Target/X86/X86Instr64bit.td | 96 +++++++++ lib/Target/X86/X86InstrInfo.td | 228 ++++++++++++++++++++++ test/CodeGen/X86/2008-10-16-SpillerBug.ll | 2 +- test/CodeGen/X86/peep-test-3.ll | 88 +++++++++ test/CodeGen/X86/stack-color-with-reg.ll | 5 +- 7 files changed, 456 insertions(+), 9 deletions(-) create mode 100644 test/CodeGen/X86/peep-test-3.ll diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 11cc6789440..6154d2641c6 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5363,21 +5363,48 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, Opcode = X86ISD::ADD; NumOperands = 2; break; + case ISD::AND: { + // If the primary and result isn't used, don't bother using X86ISD::AND, + // because a TEST instruction will be better. + bool NonFlagUse = false; + for (SDNode::use_iterator UI = Op.getNode()->use_begin(), + UE = Op.getNode()->use_end(); UI != UE; ++UI) + if (UI->getOpcode() != ISD::BRCOND && + UI->getOpcode() != ISD::SELECT && + UI->getOpcode() != ISD::SETCC) { + NonFlagUse = true; + break; + } + if (!NonFlagUse) + break; + } + // FALL THROUGH case ISD::SUB: - // Due to the ISEL shortcoming noted above, be conservative if this sub is + case ISD::OR: + case ISD::XOR: + // Due to the ISEL shortcoming noted above, be conservative if this op is // likely to be selected as part of a load-modify-store instruction. for (SDNode::use_iterator UI = Op.getNode()->use_begin(), UE = Op.getNode()->use_end(); UI != UE; ++UI) if (UI->getOpcode() == ISD::STORE) goto default_case; - // Otherwise use a regular EFLAGS-setting sub. - Opcode = X86ISD::SUB; + // Otherwise use a regular EFLAGS-setting instruction. + switch (Op.getNode()->getOpcode()) { + case ISD::SUB: Opcode = X86ISD::SUB; break; + case ISD::OR: Opcode = X86ISD::OR; break; + case ISD::XOR: Opcode = X86ISD::XOR; break; + case ISD::AND: Opcode = X86ISD::AND; break; + default: llvm_unreachable("unexpected operator!"); + } NumOperands = 2; break; case X86ISD::ADD: case X86ISD::SUB: case X86ISD::INC: case X86ISD::DEC: + case X86ISD::OR: + case X86ISD::XOR: + case X86ISD::AND: return SDValue(Op.getNode(), 1); default: default_case: @@ -5605,7 +5632,10 @@ static bool isX86LogicalCmp(SDValue Op) { Opc == X86ISD::SMUL || Opc == X86ISD::UMUL || Opc == X86ISD::INC || - Opc == X86ISD::DEC)) + Opc == X86ISD::DEC || + Opc == X86ISD::OR || + Opc == X86ISD::XOR || + Opc == X86ISD::AND)) return true; return false; @@ -7133,6 +7163,9 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::UMUL: return "X86ISD::UMUL"; case X86ISD::INC: return "X86ISD::INC"; case X86ISD::DEC: return "X86ISD::DEC"; + case X86ISD::OR: return "X86ISD::OR"; + case X86ISD::XOR: return "X86ISD::XOR"; + case X86ISD::AND: return "X86ISD::AND"; case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; case X86ISD::PTEST: return "X86ISD::PTEST"; case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; @@ -8094,6 +8127,9 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, case X86ISD::UMUL: case X86ISD::INC: case X86ISD::DEC: + case X86ISD::OR: + case X86ISD::XOR: + case X86ISD::AND: // These nodes' second result is a boolean. if (Op.getResNo() == 0) break; diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 021797e2210..beb9ec312b9 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -237,7 +237,7 @@ namespace llvm { // ADD, SUB, SMUL, UMUL, etc. - Arithmetic operations with FLAGS results. ADD, SUB, SMUL, UMUL, - INC, DEC, + INC, DEC, OR, XOR, AND, // MUL_IMM - X86 specific multiply by immediate. MUL_IMM, diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td index dc5e1739d3e..ef19823a283 100644 --- a/lib/Target/X86/X86Instr64bit.td +++ b/lib/Target/X86/X86Instr64bit.td @@ -2082,6 +2082,102 @@ def : Pat<(parallel (store (i64 (X86dec_flag (loadi64 addr:$dst))), addr:$dst), (implicit EFLAGS)), (DEC64m addr:$dst)>; +// Register-Register Logical Or with EFLAGS result +def : Pat<(parallel (X86or_flag GR64:$src1, GR64:$src2), + (implicit EFLAGS)), + (OR64rr GR64:$src1, GR64:$src2)>; + +// Register-Integer Logical Or with EFLAGS result +def : Pat<(parallel (X86or_flag GR64:$src1, i64immSExt8:$src2), + (implicit EFLAGS)), + (OR64ri8 GR64:$src1, i64immSExt8:$src2)>; +def : Pat<(parallel (X86or_flag GR64:$src1, i64immSExt32:$src2), + (implicit EFLAGS)), + (OR64ri32 GR64:$src1, i64immSExt32:$src2)>; + +// Register-Memory Logical Or with EFLAGS result +def : Pat<(parallel (X86or_flag GR64:$src1, (loadi64 addr:$src2)), + (implicit EFLAGS)), + (OR64rm GR64:$src1, addr:$src2)>; + +// Memory-Register Logical Or with EFLAGS result +def : Pat<(parallel (store (X86or_flag (loadi64 addr:$dst), GR64:$src2), + addr:$dst), + (implicit EFLAGS)), + (OR64mr addr:$dst, GR64:$src2)>; +def : Pat<(parallel (store (X86or_flag (loadi64 addr:$dst), i64immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)), + (OR64mi8 addr:$dst, i64immSExt8:$src2)>; +def : Pat<(parallel (store (X86or_flag (loadi64 addr:$dst), i64immSExt32:$src2), + addr:$dst), + (implicit EFLAGS)), + (OR64mi32 addr:$dst, i64immSExt32:$src2)>; + +// Register-Register Logical XOr with EFLAGS result +def : Pat<(parallel (X86xor_flag GR64:$src1, GR64:$src2), + (implicit EFLAGS)), + (XOR64rr GR64:$src1, GR64:$src2)>; + +// Register-Integer Logical XOr with EFLAGS result +def : Pat<(parallel (X86xor_flag GR64:$src1, i64immSExt8:$src2), + (implicit EFLAGS)), + (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>; +def : Pat<(parallel (X86xor_flag GR64:$src1, i64immSExt32:$src2), + (implicit EFLAGS)), + (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>; + +// Register-Memory Logical XOr with EFLAGS result +def : Pat<(parallel (X86xor_flag GR64:$src1, (loadi64 addr:$src2)), + (implicit EFLAGS)), + (XOR64rm GR64:$src1, addr:$src2)>; + +// Memory-Register Logical XOr with EFLAGS result +def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst), GR64:$src2), + addr:$dst), + (implicit EFLAGS)), + (XOR64mr addr:$dst, GR64:$src2)>; +def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst), i64immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)), + (XOR64mi8 addr:$dst, i64immSExt8:$src2)>; +def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst), i64immSExt32:$src2), + addr:$dst), + (implicit EFLAGS)), + (XOR64mi32 addr:$dst, i64immSExt32:$src2)>; + +// Register-Register Logical And with EFLAGS result +def : Pat<(parallel (X86and_flag GR64:$src1, GR64:$src2), + (implicit EFLAGS)), + (AND64rr GR64:$src1, GR64:$src2)>; + +// Register-Integer Logical And with EFLAGS result +def : Pat<(parallel (X86and_flag GR64:$src1, i64immSExt8:$src2), + (implicit EFLAGS)), + (AND64ri8 GR64:$src1, i64immSExt8:$src2)>; +def : Pat<(parallel (X86and_flag GR64:$src1, i64immSExt32:$src2), + (implicit EFLAGS)), + (AND64ri32 GR64:$src1, i64immSExt32:$src2)>; + +// Register-Memory Logical And with EFLAGS result +def : Pat<(parallel (X86and_flag GR64:$src1, (loadi64 addr:$src2)), + (implicit EFLAGS)), + (AND64rm GR64:$src1, addr:$src2)>; + +// Memory-Register Logical And with EFLAGS result +def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst), GR64:$src2), + addr:$dst), + (implicit EFLAGS)), + (AND64mr addr:$dst, GR64:$src2)>; +def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst), i64immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)), + (AND64mi8 addr:$dst, i64immSExt8:$src2)>; +def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst), i64immSExt32:$src2), + addr:$dst), + (implicit EFLAGS)), + (AND64mi32 addr:$dst, i64immSExt32:$src2)>; + //===----------------------------------------------------------------------===// // X86-64 SSE Instructions //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 21730df618a..101b7a8f3ca 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -162,6 +162,9 @@ def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags>; def X86umul_flag : SDNode<"X86ISD::UMUL", SDTUnaryArithWithFlags>; def X86inc_flag : SDNode<"X86ISD::INC", SDTUnaryArithWithFlags>; def X86dec_flag : SDNode<"X86ISD::DEC", SDTUnaryArithWithFlags>; +def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags>; +def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags>; +def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags>; def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>; @@ -4418,6 +4421,231 @@ def : Pat<(parallel (store (i32 (X86dec_flag (loadi32 addr:$dst))), addr:$dst), (implicit EFLAGS)), (DEC32m addr:$dst)>, Requires<[In32BitMode]>; +// Register-Register Or with EFLAGS result +def : Pat<(parallel (X86or_flag GR8:$src1, GR8:$src2), + (implicit EFLAGS)), + (OR8rr GR8:$src1, GR8:$src2)>; +def : Pat<(parallel (X86or_flag GR16:$src1, GR16:$src2), + (implicit EFLAGS)), + (OR16rr GR16:$src1, GR16:$src2)>; +def : Pat<(parallel (X86or_flag GR32:$src1, GR32:$src2), + (implicit EFLAGS)), + (OR32rr GR32:$src1, GR32:$src2)>; + +// Register-Memory Or with EFLAGS result +def : Pat<(parallel (X86or_flag GR8:$src1, (loadi8 addr:$src2)), + (implicit EFLAGS)), + (OR8rm GR8:$src1, addr:$src2)>; +def : Pat<(parallel (X86or_flag GR16:$src1, (loadi16 addr:$src2)), + (implicit EFLAGS)), + (OR16rm GR16:$src1, addr:$src2)>; +def : Pat<(parallel (X86or_flag GR32:$src1, (loadi32 addr:$src2)), + (implicit EFLAGS)), + (OR32rm GR32:$src1, addr:$src2)>; + +// Register-Integer Or with EFLAGS result +def : Pat<(parallel (X86or_flag GR8:$src1, imm:$src2), + (implicit EFLAGS)), + (OR8ri GR8:$src1, imm:$src2)>; +def : Pat<(parallel (X86or_flag GR16:$src1, imm:$src2), + (implicit EFLAGS)), + (OR16ri GR16:$src1, imm:$src2)>; +def : Pat<(parallel (X86or_flag GR32:$src1, imm:$src2), + (implicit EFLAGS)), + (OR32ri GR32:$src1, imm:$src2)>; +def : Pat<(parallel (X86or_flag GR16:$src1, i16immSExt8:$src2), + (implicit EFLAGS)), + (OR16ri8 GR16:$src1, i16immSExt8:$src2)>; +def : Pat<(parallel (X86or_flag GR32:$src1, i32immSExt8:$src2), + (implicit EFLAGS)), + (OR32ri8 GR32:$src1, i32immSExt8:$src2)>; + +// Memory-Register Or with EFLAGS result +def : Pat<(parallel (store (X86or_flag (loadi8 addr:$dst), GR8:$src2), + addr:$dst), + (implicit EFLAGS)), + (OR8mr addr:$dst, GR8:$src2)>; +def : Pat<(parallel (store (X86or_flag (loadi16 addr:$dst), GR16:$src2), + addr:$dst), + (implicit EFLAGS)), + (OR16mr addr:$dst, GR16:$src2)>; +def : Pat<(parallel (store (X86or_flag (loadi32 addr:$dst), GR32:$src2), + addr:$dst), + (implicit EFLAGS)), + (OR32mr addr:$dst, GR32:$src2)>; + +// Memory-Integer Or with EFLAGS result +def : Pat<(parallel (store (X86or_flag (loadi8 addr:$dst), imm:$src2), + addr:$dst), + (implicit EFLAGS)), + (OR8mi addr:$dst, imm:$src2)>; +def : Pat<(parallel (store (X86or_flag (loadi16 addr:$dst), imm:$src2), + addr:$dst), + (implicit EFLAGS)), + (OR16mi addr:$dst, imm:$src2)>; +def : Pat<(parallel (store (X86or_flag (loadi32 addr:$dst), imm:$src2), + addr:$dst), + (implicit EFLAGS)), + (OR32mi addr:$dst, imm:$src2)>; +def : Pat<(parallel (store (X86or_flag (loadi16 addr:$dst), i16immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)), + (OR16mi8 addr:$dst, i16immSExt8:$src2)>; +def : Pat<(parallel (store (X86or_flag (loadi32 addr:$dst), i32immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)), + (OR32mi8 addr:$dst, i32immSExt8:$src2)>; + +// Register-Register XOr with EFLAGS result +def : Pat<(parallel (X86xor_flag GR8:$src1, GR8:$src2), + (implicit EFLAGS)), + (XOR8rr GR8:$src1, GR8:$src2)>; +def : Pat<(parallel (X86xor_flag GR16:$src1, GR16:$src2), + (implicit EFLAGS)), + (XOR16rr GR16:$src1, GR16:$src2)>; +def : Pat<(parallel (X86xor_flag GR32:$src1, GR32:$src2), + (implicit EFLAGS)), + (XOR32rr GR32:$src1, GR32:$src2)>; + +// Register-Memory XOr with EFLAGS result +def : Pat<(parallel (X86xor_flag GR8:$src1, (loadi8 addr:$src2)), + (implicit EFLAGS)), + (XOR8rm GR8:$src1, addr:$src2)>; +def : Pat<(parallel (X86xor_flag GR16:$src1, (loadi16 addr:$src2)), + (implicit EFLAGS)), + (XOR16rm GR16:$src1, addr:$src2)>; +def : Pat<(parallel (X86xor_flag GR32:$src1, (loadi32 addr:$src2)), + (implicit EFLAGS)), + (XOR32rm GR32:$src1, addr:$src2)>; + +// Register-Integer XOr with EFLAGS result +def : Pat<(parallel (X86xor_flag GR8:$src1, imm:$src2), + (implicit EFLAGS)), + (XOR8ri GR8:$src1, imm:$src2)>; +def : Pat<(parallel (X86xor_flag GR16:$src1, imm:$src2), + (implicit EFLAGS)), + (XOR16ri GR16:$src1, imm:$src2)>; +def : Pat<(parallel (X86xor_flag GR32:$src1, imm:$src2), + (implicit EFLAGS)), + (XOR32ri GR32:$src1, imm:$src2)>; +def : Pat<(parallel (X86xor_flag GR16:$src1, i16immSExt8:$src2), + (implicit EFLAGS)), + (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>; +def : Pat<(parallel (X86xor_flag GR32:$src1, i32immSExt8:$src2), + (implicit EFLAGS)), + (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>; + +// Memory-Register XOr with EFLAGS result +def : Pat<(parallel (store (X86xor_flag (loadi8 addr:$dst), GR8:$src2), + addr:$dst), + (implicit EFLAGS)), + (XOR8mr addr:$dst, GR8:$src2)>; +def : Pat<(parallel (store (X86xor_flag (loadi16 addr:$dst), GR16:$src2), + addr:$dst), + (implicit EFLAGS)), + (XOR16mr addr:$dst, GR16:$src2)>; +def : Pat<(parallel (store (X86xor_flag (loadi32 addr:$dst), GR32:$src2), + addr:$dst), + (implicit EFLAGS)), + (XOR32mr addr:$dst, GR32:$src2)>; + +// Memory-Integer XOr with EFLAGS result +def : Pat<(parallel (store (X86xor_flag (loadi8 addr:$dst), imm:$src2), + addr:$dst), + (implicit EFLAGS)), + (XOR8mi addr:$dst, imm:$src2)>; +def : Pat<(parallel (store (X86xor_flag (loadi16 addr:$dst), imm:$src2), + addr:$dst), + (implicit EFLAGS)), + (XOR16mi addr:$dst, imm:$src2)>; +def : Pat<(parallel (store (X86xor_flag (loadi32 addr:$dst), imm:$src2), + addr:$dst), + (implicit EFLAGS)), + (XOR32mi addr:$dst, imm:$src2)>; +def : Pat<(parallel (store (X86xor_flag (loadi16 addr:$dst), i16immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)), + (XOR16mi8 addr:$dst, i16immSExt8:$src2)>; +def : Pat<(parallel (store (X86xor_flag (loadi32 addr:$dst), i32immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)), + (XOR32mi8 addr:$dst, i32immSExt8:$src2)>; + +// Register-Register And with EFLAGS result +def : Pat<(parallel (X86and_flag GR8:$src1, GR8:$src2), + (implicit EFLAGS)), + (AND8rr GR8:$src1, GR8:$src2)>; +def : Pat<(parallel (X86and_flag GR16:$src1, GR16:$src2), + (implicit EFLAGS)), + (AND16rr GR16:$src1, GR16:$src2)>; +def : Pat<(parallel (X86and_flag GR32:$src1, GR32:$src2), + (implicit EFLAGS)), + (AND32rr GR32:$src1, GR32:$src2)>; + +// Register-Memory And with EFLAGS result +def : Pat<(parallel (X86and_flag GR8:$src1, (loadi8 addr:$src2)), + (implicit EFLAGS)), + (AND8rm GR8:$src1, addr:$src2)>; +def : Pat<(parallel (X86and_flag GR16:$src1, (loadi16 addr:$src2)), + (implicit EFLAGS)), + (AND16rm GR16:$src1, addr:$src2)>; +def : Pat<(parallel (X86and_flag GR32:$src1, (loadi32 addr:$src2)), + (implicit EFLAGS)), + (AND32rm GR32:$src1, addr:$src2)>; + +// Register-Integer And with EFLAGS result +def : Pat<(parallel (X86and_flag GR8:$src1, imm:$src2), + (implicit EFLAGS)), + (AND8ri GR8:$src1, imm:$src2)>; +def : Pat<(parallel (X86and_flag GR16:$src1, imm:$src2), + (implicit EFLAGS)), + (AND16ri GR16:$src1, imm:$src2)>; +def : Pat<(parallel (X86and_flag GR32:$src1, imm:$src2), + (implicit EFLAGS)), + (AND32ri GR32:$src1, imm:$src2)>; +def : Pat<(parallel (X86and_flag GR16:$src1, i16immSExt8:$src2), + (implicit EFLAGS)), + (AND16ri8 GR16:$src1, i16immSExt8:$src2)>; +def : Pat<(parallel (X86and_flag GR32:$src1, i32immSExt8:$src2), + (implicit EFLAGS)), + (AND32ri8 GR32:$src1, i32immSExt8:$src2)>; + +// Memory-Register And with EFLAGS result +def : Pat<(parallel (store (X86and_flag (loadi8 addr:$dst), GR8:$src2), + addr:$dst), + (implicit EFLAGS)), + (AND8mr addr:$dst, GR8:$src2)>; +def : Pat<(parallel (store (X86and_flag (loadi16 addr:$dst), GR16:$src2), + addr:$dst), + (implicit EFLAGS)), + (AND16mr addr:$dst, GR16:$src2)>; +def : Pat<(parallel (store (X86and_flag (loadi32 addr:$dst), GR32:$src2), + addr:$dst), + (implicit EFLAGS)), + (AND32mr addr:$dst, GR32:$src2)>; + +// Memory-Integer And with EFLAGS result +def : Pat<(parallel (store (X86and_flag (loadi8 addr:$dst), imm:$src2), + addr:$dst), + (implicit EFLAGS)), + (AND8mi addr:$dst, imm:$src2)>; +def : Pat<(parallel (store (X86and_flag (loadi16 addr:$dst), imm:$src2), + addr:$dst), + (implicit EFLAGS)), + (AND16mi addr:$dst, imm:$src2)>; +def : Pat<(parallel (store (X86and_flag (loadi32 addr:$dst), imm:$src2), + addr:$dst), + (implicit EFLAGS)), + (AND32mi addr:$dst, imm:$src2)>; +def : Pat<(parallel (store (X86and_flag (loadi16 addr:$dst), i16immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)), + (AND16mi8 addr:$dst, i16immSExt8:$src2)>; +def : Pat<(parallel (store (X86and_flag (loadi32 addr:$dst), i32immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)), + (AND32mi8 addr:$dst, i32immSExt8:$src2)>; + // -disable-16bit support. def : Pat<(truncstorei16 (i32 imm:$src), addr:$dst), (MOV16mi addr:$dst, imm:$src)>; diff --git a/test/CodeGen/X86/2008-10-16-SpillerBug.ll b/test/CodeGen/X86/2008-10-16-SpillerBug.ll index 7582f63fc8c..b8ca364d179 100644 --- a/test/CodeGen/X86/2008-10-16-SpillerBug.ll +++ b/test/CodeGen/X86/2008-10-16-SpillerBug.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -relocation-model=pic -disable-fp-elim -mtriple=i386-apple-darwin | grep {andl.*7.*edx} +; RUN: llc < %s -relocation-model=pic -disable-fp-elim -mtriple=i386-apple-darwin | grep {andl.*7.*edi} %struct.XXDActiveTextureTargets = type { i64, i64, i64, i64, i64, i64 } %struct.XXDAlphaTest = type { float, i16, i8, i8 } diff --git a/test/CodeGen/X86/peep-test-3.ll b/test/CodeGen/X86/peep-test-3.ll new file mode 100644 index 00000000000..a96351a3037 --- /dev/null +++ b/test/CodeGen/X86/peep-test-3.ll @@ -0,0 +1,88 @@ +; RUN: llc < %s -march=x86 | FileCheck %s + +; LLVM should omit the testl and use the flags result from the orl. + +; CHECK: or: +define void @or(float* %A, i32 %IA, i32 %N) nounwind { +entry: + %0 = ptrtoint float* %A to i32 ; [#uses=1] + %1 = and i32 %0, 3 ; [#uses=1] + %2 = xor i32 %IA, 1 ; [#uses=1] +; CHECK: orl %ecx, %edx +; CHECK-NEXT: je .LBB1_2 + %3 = or i32 %2, %1 ; [#uses=1] + %4 = icmp eq i32 %3, 0 ; [#uses=1] + br i1 %4, label %return, label %bb + +bb: ; preds = %entry + store float 0.000000e+00, float* %A, align 4 + ret void + +return: ; preds = %entry + ret void +} +; CHECK: xor: +define void @xor(float* %A, i32 %IA, i32 %N) nounwind { +entry: + %0 = ptrtoint float* %A to i32 ; [#uses=1] + %1 = and i32 %0, 3 ; [#uses=1] +; CHECK: xorl $1, %e +; CHECK-NEXT: je .LBB2_2 + %2 = xor i32 %IA, 1 ; [#uses=1] + %3 = xor i32 %2, %1 ; [#uses=1] + %4 = icmp eq i32 %3, 0 ; [#uses=1] + br i1 %4, label %return, label %bb + +bb: ; preds = %entry + store float 0.000000e+00, float* %A, align 4 + ret void + +return: ; preds = %entry + ret void +} +; CHECK: and: +define void @and(float* %A, i32 %IA, i32 %N, i8* %p) nounwind { +entry: + store i8 0, i8* %p + %0 = ptrtoint float* %A to i32 ; [#uses=1] + %1 = and i32 %0, 3 ; [#uses=1] + %2 = xor i32 %IA, 1 ; [#uses=1] +; CHECK: andl $3, % +; CHECK-NEXT: movb % +; CHECK-NEXT: je .LBB3_2 + %3 = and i32 %2, %1 ; [#uses=1] + %t = trunc i32 %3 to i8 + store i8 %t, i8* %p + %4 = icmp eq i32 %3, 0 ; [#uses=1] + br i1 %4, label %return, label %bb + +bb: ; preds = %entry + store float 0.000000e+00, float* null, align 4 + ret void + +return: ; preds = %entry + ret void +} + +; Just like @and, but without the trunc+store. This should use a testl +; instead of an andl. +; CHECK: test: +define void @test(float* %A, i32 %IA, i32 %N, i8* %p) nounwind { +entry: + store i8 0, i8* %p + %0 = ptrtoint float* %A to i32 ; [#uses=1] + %1 = and i32 %0, 3 ; [#uses=1] + %2 = xor i32 %IA, 1 ; [#uses=1] +; CHECK: testb $3, % +; CHECK-NEXT: je .LBB4_2 + %3 = and i32 %2, %1 ; [#uses=1] + %4 = icmp eq i32 %3, 0 ; [#uses=1] + br i1 %4, label %return, label %bb + +bb: ; preds = %entry + store float 0.000000e+00, float* null, align 4 + ret void + +return: ; preds = %entry + ret void +} diff --git a/test/CodeGen/X86/stack-color-with-reg.ll b/test/CodeGen/X86/stack-color-with-reg.ll index f085b3f8f38..a8ae8e8168e 100644 --- a/test/CodeGen/X86/stack-color-with-reg.ll +++ b/test/CodeGen/X86/stack-color-with-reg.ll @@ -1,7 +1,6 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -relocation-model=pic -disable-fp-elim -color-ss-with-regs -stats -info-output-file - > %t -; RUN: grep stackcoloring %t | grep "loads eliminated" -; RUN: grep stackcoloring %t | grep "stack slot refs replaced with reg refs" | grep 5 -; RUN: grep asm-printer %t | grep 181 +; RUN: grep stackcoloring %t | grep "stack slot refs replaced with reg refs" | grep 8 +; RUN: grep asm-printer %t | grep 182 type { [62 x %struct.Bitvec*] } ; type %0 type { i8* } ; type %1 -- 2.34.1