//===---------------------------------------------------------------------===//
-For this:
-
-extern int dst[];
-extern int* ptr;
-
-void test(void) {
- ptr = dst;
-}
-
-We generate this code for static relocation model:
-
-_test:
- leaq _dst(%rip), %rax
- movq %rax, _ptr(%rip)
- ret
-
-If we are in small code model, they we can treat _dst as a 32-bit constant.
- movq $_dst, _ptr(%rip)
-
-Note, however, we should continue to use RIP relative addressing mode as much as
-possible. The above is actually one byte shorter than
- movq $_dst, _ptr
-
-A better example is the code from PR1018. We are generating:
- leaq xcalloc2(%rip), %rax
- movq %rax, 8(%rsp)
-when we should be generating:
- movq $xcalloc2, 8(%rsp)
-
-The reason the better codegen isn't done now is support for static small
-code model in JIT mode. The JIT cannot ensure that all GV's are placed in the
-lower 4G so we are not treating GV labels as 32-bit values.
-
-//===---------------------------------------------------------------------===//
-
Right now the asm printer assumes GlobalAddress are accessed via RIP relative
addressing. Therefore, it is not possible to generate this:
movabsq $__ZTV10polynomialIdE+16, %rax
}
void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
- const char *Modifier) {
+ const char *Modifier, bool NotRIPRel) {
const MachineOperand &MO = MI->getOperand(OpNo);
const MRegisterInfo &RI = *TM.getRegisterInfo();
switch (MO.getType()) {
if (X86PICStyle == PICStyle::Stub &&
TM.getRelocationModel() == Reloc::PIC_)
O << "-\"L" << getFunctionNumber() << "$pb\"";
- if (isMemOp && Subtarget->is64Bit())
+ if (isMemOp && Subtarget->is64Bit() && !NotRIPRel)
O << "(%rip)";
return;
}
else if (Offset < 0)
O << Offset;
- if (isMemOp && Subtarget->is64Bit())
+ if (isMemOp && Subtarget->is64Bit() && !NotRIPRel)
O << "(%rip)";
return;
}
if (isMemOp && Subtarget->is64Bit()) {
if (isExt && TM.getRelocationModel() != Reloc::Static)
- O << "@GOTPCREL";
- O << "(%rip)";
+ O << "@GOTPCREL(%rip)";
+ else if (!NotRIPRel)
+ // Use rip when possible to reduce code size, except when index or
+ // base register are also part of the address. e.g.
+ // foo(%rip)(%rcx,%rax,4) is not legal
+ O << "(%rip)";
}
return;
return;
}
+ bool NotRIPRel = IndexReg.getReg() || BaseReg.getReg();
if (DispSpec.isGlobalAddress() ||
DispSpec.isConstantPoolIndex() ||
DispSpec.isJumpTableIndex()) {
- printOperand(MI, Op+3, "mem");
+ printOperand(MI, Op+3, "mem", NotRIPRel);
} else {
int DispVal = DispSpec.getImmedValue();
if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg()))
// These methods are used by the tablegen'erated instruction printer.
void printOperand(const MachineInstr *MI, unsigned OpNo,
- const char *Modifier = 0);
+ const char *Modifier = 0, bool NotRIPRel = false);
void printi8mem(const MachineInstr *MI, unsigned OpNo) {
printMemReference(MI, OpNo);
}
// Under X86-64 non-small code model, GV (and friends) are 64-bits.
if (is64Bit && TM.getCodeModel() != CodeModel::Small)
break;
-
+ if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
+ break;
// If value is available in a register both base and index components have
// been picked, we can't fit the result available in the register in the
// addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
if (!Available || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
- // For X86-64 PIC code, only allow GV / CP + displacement so we can use
- // RIP relative addressing mode.
- if (is64Bit &&
- (AM.Base.Reg.Val || AM.Scale > 1 || AM.IndexReg.Val ||
- AM.BaseType == X86ISelAddressMode::FrameIndexBase))
- break;
- if (ConstantPoolSDNode *CP =
- dyn_cast<ConstantPoolSDNode>(N.getOperand(0))) {
- if (AM.CP == 0) {
+ bool isStatic = TM.getRelocationModel() == Reloc::Static;
+ SDOperand N0 = N.getOperand(0);
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
+ GlobalValue *GV = G->getGlobal();
+ bool isAbs32 = !is64Bit ||
+ (isStatic && !(GV->isExternal() || GV->hasWeakLinkage() ||
+ GV->hasLinkOnceLinkage()));
+ if (isAbs32 || isRoot) {
+ AM.GV = G->getGlobal();
+ AM.Disp += G->getOffset();
+ AM.isRIPRel = !isAbs32;
+ return false;
+ }
+ } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
+ if (!is64Bit || isStatic || isRoot) {
AM.CP = CP->getConstVal();
AM.Align = CP->getAlignment();
AM.Disp += CP->getOffset();
- AM.isRIPRel = is64Bit;
- return false;
- }
- } else if (GlobalAddressSDNode *G =
- dyn_cast<GlobalAddressSDNode>(N.getOperand(0))) {
- if (AM.GV == 0) {
- AM.GV = G->getGlobal();
- AM.Disp += G->getOffset();
- AM.isRIPRel = is64Bit;
+ AM.isRIPRel = !isStatic;
return false;
}
- } else if (isRoot && is64Bit) {
- if (ExternalSymbolSDNode *S =
- dyn_cast<ExternalSymbolSDNode>(N.getOperand(0))) {
+ } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
+ if (isStatic || isRoot) {
AM.ES = S->getSymbol();
- AM.isRIPRel = true;
+ AM.isRIPRel = !isStatic;
return false;
- } else if (JumpTableSDNode *J =
- dyn_cast<JumpTableSDNode>(N.getOperand(0))) {
+ }
+ } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
+ if (isStatic || isRoot) {
AM.JT = J->getIndex();
- AM.isRIPRel = true;
+ AM.isRIPRel = !isStatic;
return false;
}
}
if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
// For X86-64, we should always use lea to materialize RIP relative
// addresses.
- if (Subtarget->is64Bit())
+ if (Subtarget->is64Bit() && TM.getRelocationModel() != Reloc::Static)
Complexity = 4;
else
Complexity += 2;
//===----------------------------------------------------------------------===//
// X86 Instruction Predicate Definitions.
-def HasMMX : Predicate<"Subtarget->hasMMX()">;
-def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
-def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
-def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
-def FPStack : Predicate<"!Subtarget->hasSSE2()">;
-def In32BitMode : Predicate<"!Subtarget->is64Bit()">;
-def In64BitMode : Predicate<"Subtarget->is64Bit()">;
-def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
-def NotSmallCode :Predicate<"TM.getCodeModel() != CodeModel::Small">;
+def HasMMX : Predicate<"Subtarget->hasMMX()">;
+def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
+def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
+def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
+def FPStack : Predicate<"!Subtarget->hasSSE2()">;
+def In32BitMode : Predicate<"!Subtarget->is64Bit()">;
+def In64BitMode : Predicate<"Subtarget->is64Bit()">;
+def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
+def NotSmallCode : Predicate<"TM.getCodeModel() != CodeModel::Small">;
+def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
//===----------------------------------------------------------------------===//
// X86 specific pattern fragments.
def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
(MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
-/*
+def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, tconstpool:$src)>,
+ Requires<[SmallCode, IsStatic]>;
+def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, tjumptable:$src)>,
+ Requires<[SmallCode, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
- (MOV64mi32 addr:$dst, tglobaladdr:$src)>, Requires<[SmallCode]>;
+ (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
+ Requires<[SmallCode, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
- (MOV64mi32 addr:$dst, texternalsym:$src)>, Requires<[SmallCode]>;
-*/
+ (MOV64mi32 addr:$dst, texternalsym:$src)>,
+ Requires<[SmallCode, IsStatic]>;
// Calls
// Direct PC relative function call for small code model. 32-bit displacement
MachineCodeEmitter &MCE) {
// FIXME: Move this to TargetJITInfo!
setRelocationModel(Reloc::Static);
+ // JIT cannot ensure globals are placed in the lower 4G of address.
+ if (Subtarget.is64Bit())
+ setCodeModel(CodeModel::Large);
PM.add(createX86CodeEmitterPass(*this, MCE));
return false;