From 172862a2a74f5deace29e3baaf8a77f6bd2cecbe Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Mon, 19 Oct 2009 19:51:42 +0000 Subject: [PATCH] remove strings from instructions who are never asmprinted. All of these "subreg32" modifier instructions are handled explicitly by the MCInst lowering phase. If they got to the asmprinter, they would explode. They should eventually be replace with correct use of subregs. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@84526 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../X86/AsmPrinter/X86ATTInstPrinter.cpp | 4 +--- lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h | 3 +-- lib/Target/X86/X86Instr64bit.td | 21 +++++++------------ lib/Target/X86/X86InstrInfo.td | 17 ++++++--------- 4 files changed, 15 insertions(+), 30 deletions(-) diff --git a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp index bc70ffe8d63..8ec5b62192e 100644 --- a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp +++ b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp @@ -57,9 +57,7 @@ void X86ATTInstPrinter::print_pcrel_imm(const MCInst *MI, unsigned OpNo) { } } -void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, - const char *Modifier) { - assert(Modifier == 0 && "Modifiers should not be used"); +void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isReg()) { diff --git a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h index 5f28fa46f5f..3180618d126 100644 --- a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h +++ b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h @@ -32,8 +32,7 @@ public: static const char *getRegisterName(unsigned RegNo); - void printOperand(const MCInst *MI, unsigned OpNo, - const char *Modifier = 0); + void printOperand(const MCInst *MI, unsigned OpNo); void printMemReference(const MCInst *MI, unsigned Op); void printLeaMemReference(const MCInst *MI, unsigned Op); void printSSECC(const MCInst *MI, unsigned Op); diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td index f7d5ef275fe..c1b7b8f827e 100644 --- a/lib/Target/X86/X86Instr64bit.td +++ b/lib/Target/X86/X86Instr64bit.td @@ -368,19 +368,15 @@ def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src), // Use movzbl instead of movzbq when the destination is a register; it's // equivalent due to implicit zero-extending, and it has a smaller encoding. def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src), - "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", - [(set GR64:$dst, (zext GR8:$src))]>, TB; + "", [(set GR64:$dst, (zext GR8:$src))]>, TB; def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src), - "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", - [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB; + "", [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB; // Use movzwl instead of movzwq when the destination is a register; it's // equivalent due to implicit zero-extending, and it has a smaller encoding. def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src), - "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", - [(set GR64:$dst, (zext GR16:$src))]>, TB; + "", [(set GR64:$dst, (zext GR16:$src))]>, TB; def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), - "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", - [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB; + "", [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB; // There's no movzlq instruction, but movl can be used for this purpose, using // implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero @@ -390,11 +386,9 @@ def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), // necessarily all zero. In such cases, we fall back to these explicit zext // instructions. def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src), - "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", - [(set GR64:$dst, (zext GR32:$src))]>; + "", [(set GR64:$dst, (zext GR32:$src))]>; def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src), - "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", - [(set GR64:$dst, (zextloadi64i32 addr:$src))]>; + "", [(set GR64:$dst, (zextloadi64i32 addr:$src))]>; // Any instruction that defines a 32-bit result leaves the high half of the // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may @@ -1455,8 +1449,7 @@ def : Pat<(i64 0), // Materialize i64 constant where top 32-bits are zero. let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src), - "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", - [(set GR64:$dst, i64immZExt32:$src)]>; + "", [(set GR64:$dst, i64immZExt32:$src)]>; //===----------------------------------------------------------------------===// // Thread Local Storage Instructions diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 30b57d85d01..9d474d3b7d1 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -3392,11 +3392,9 @@ def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2), // of the register here. This has a smaller encoding and avoids a // partial-register update. def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src), - "movs{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", - [(set GR16:$dst, (sext GR8:$src))]>, TB; + "", [(set GR16:$dst, (sext GR8:$src))]>, TB; def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src), - "movs{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", - [(set GR16:$dst, (sextloadi16i8 addr:$src))]>, TB; + "", [(set GR16:$dst, (sextloadi16i8 addr:$src))]>, TB; def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src), "movs{bl|x}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (sext GR8:$src))]>, TB; @@ -3414,11 +3412,9 @@ def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src), // of the register here. This has a smaller encoding and avoids a // partial-register update. def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src), - "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", - [(set GR16:$dst, (zext GR8:$src))]>, TB; + "", [(set GR16:$dst, (zext GR8:$src))]>, TB; def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src), - "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", - [(set GR16:$dst, (zextloadi16i8 addr:$src))]>, TB; + "", [(set GR16:$dst, (zextloadi16i8 addr:$src))]>, TB; def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src), "movz{bl|x}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (zext GR8:$src))]>, TB; @@ -3474,9 +3470,8 @@ def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), [(set GR8:$dst, 0)]>; // Use xorl instead of xorw since we don't care about the high 16 bits, // it's smaller, and it avoids a partial-register update. -def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins), - "xor{l}\t${dst:subreg32}, ${dst:subreg32}", - [(set GR16:$dst, 0)]>; +def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins), + "", [(set GR16:$dst, 0)]>; def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "xor{l}\t$dst, $dst", [(set GR32:$dst, 0)]>; -- 2.34.1