// variants are one cycle cheaper.
switch (DefMCID->getOpcode()) {
default: break;
- case ARM::LDRrs: case ARM::ATOMIC_LDRrs:
- case ARM::LDRBrs: case ARM::ATOMIC_LDRBrs: {
+ case ARM::LDRrs:
+ case ARM::LDRBrs: {
unsigned ShOpVal = DefMI->getOperand(3).getImm();
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
if (ShImm == 0 ||
--Adjust;
break;
}
- case ARM::t2LDRs: case ARM::ATOMIC_t2LDRs:
- case ARM::t2LDRBs: case ARM::ATOMIC_t2LDRBs:
- case ARM::t2LDRHs: case ARM::ATOMIC_t2LDRHs:
+ case ARM::t2LDRs:
+ case ARM::t2LDRBs:
+ case ARM::t2LDRHs:
case ARM::t2LDRSHs: {
// Thumb2 mode: lsl only.
unsigned ShAmt = DefMI->getOperand(3).getImm();
// variants are one cycle cheaper.
switch (DefMCID.getOpcode()) {
default: break;
- case ARM::LDRrs: case ARM::ATOMIC_LDRrs:
- case ARM::LDRBrs: case ARM::ATOMIC_LDRBrs: {
+ case ARM::LDRrs:
+ case ARM::LDRBrs: {
unsigned ShOpVal =
cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
--Latency;
break;
}
- case ARM::t2LDRs: case ARM::ATOMIC_t2LDRs:
- case ARM::t2LDRBs: case ARM::ATOMIC_t2LDRBs:
- case ARM::t2LDRHs: case ARM::ATOMIC_t2LDRHs:
+ case ARM::t2LDRs:
+ case ARM::t2LDRBs:
+ case ARM::t2LDRHs:
case ARM::t2LDRSHs: {
// Thumb2 mode: lsl only.
unsigned ShAmt =
// return false for everything else.
unsigned Opc = MI->getOpcode();
switch (Opc) {
- case ARM::LDRi12: case ARM::ATOMIC_LDRi12:
- case ARM::LDRH: case ARM::ATOMIC_LDRH:
- case ARM::LDRBi12: case ARM::ATOMIC_LDRBi12:
+ case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
- case ARM::t2LDRi12: case ARM::ATOMIC_t2LDRi12:
- case ARM::t2LDRi8: case ARM::ATOMIC_t2LDRi8:
- case ARM::t2STRi12:
- case ARM::t2STRi8:
+ case ARM::t2LDRi12: case ARM::t2LDRi8:
+ case ARM::t2STRi12: case ARM::t2STRi8:
case ARM::VLDRS: case ARM::VLDRD:
case ARM::VSTRS: case ARM::VSTRD:
case ARM::tSTRspi: case ARM::tLDRspi:
}
}
-// Pseudo-instructions for atomic loads.
-// These are marked with mayStore so they can't be reordered.
-let mayLoad = 1, mayStore = 1, hasSideEffects = 0 in {
-def ATOMIC_LDRBrs : ARMPseudoExpand<(outs GPRnopc:$Rt),
- (ins ldst_so_reg:$shift, pred:$p),
- 4, IIC_iLoad_bh_r, [],
- (LDRBrs GPRnopc:$Rt, ldst_so_reg:$shift, pred:$p)>;
-def ATOMIC_LDRBi12 : ARMPseudoExpand<(outs GPRnopc:$Rt),
- (ins addrmode_imm12:$addr, pred:$p),
- 4, IIC_iLoad_bh_si, [],
- (LDRBi12 GPRnopc:$Rt, addrmode_imm12:$addr, pred:$p)> {
- let AM = AddrMode_i12;
-}
-def ATOMIC_LDRH : ARMPseudoExpand<(outs GPR:$Rt),
- (ins addrmode3:$addr, pred:$p),
- 4, IIC_iLoad_bh_r, [],
- (LDRH GPR:$Rt, addrmode3:$addr, pred:$p)> {
- let AM = AddrMode3;
-}
-def ATOMIC_LDRi12 : ARMPseudoExpand<(outs GPR:$Rt),
- (ins addrmode_imm12:$addr, pred:$p),
- 4, IIC_iLoad_si, [],
- (LDRi12 GPR:$Rt, addrmode_imm12:$addr, pred:$p)> {
- let AM = AddrMode_i12;
-}
-def ATOMIC_LDRrs : ARMPseudoExpand<(outs GPR:$Rt),
- (ins ldst_so_reg:$shift, pred:$p),
- 4, IIC_iLoad_r, [],
- (LDRrs GPR:$Rt, ldst_so_reg:$shift, pred:$p)>;
-}
-
let usesCustomInserter = 1 in {
def COPY_STRUCT_BYVAL_I32 : PseudoInst<
(outs), (ins GPR:$dst, GPR:$src, i32imm:$size, i32imm:$alignment),
// Atomic load/store patterns
def : ARMPat<(atomic_load_8 ldst_so_reg:$src),
- (ATOMIC_LDRBrs ldst_so_reg:$src)>;
+ (LDRBrs ldst_so_reg:$src)>;
def : ARMPat<(atomic_load_8 addrmode_imm12:$src),
- (ATOMIC_LDRBi12 addrmode_imm12:$src)>;
+ (LDRBi12 addrmode_imm12:$src)>;
def : ARMPat<(atomic_load_16 addrmode3:$src),
- (ATOMIC_LDRH addrmode3:$src)>;
+ (LDRH addrmode3:$src)>;
def : ARMPat<(atomic_load_32 ldst_so_reg:$src),
- (ATOMIC_LDRrs ldst_so_reg:$src)>;
+ (LDRrs ldst_so_reg:$src)>;
def : ARMPat<(atomic_load_32 addrmode_imm12:$src),
- (ATOMIC_LDRi12 addrmode_imm12:$src)>;
+ (LDRi12 addrmode_imm12:$src)>;
def : ARMPat<(atomic_store_8 ldst_so_reg:$ptr, GPR:$val),
(STRBrs GPR:$val, ldst_so_reg:$ptr)>;
def : ARMPat<(atomic_store_8 addrmode_imm12:$ptr, GPR:$val),
let Inst{7-0} = addr;
}
-// Atomic loads. These pseudos expand to the loads above, but the have mayStore
-// = 1 so they can't be reordered.
-let mayLoad = 1, mayStore = 1, hasSideEffects = 0 in {
-let AM = AddrModeT1_1 in {
-def ATOMIC_tLDRBi : tPseudoExpand<(outs tGPR:$Rt),
- (ins t_addrmode_is1:$addr, pred:$p),
- 2, IIC_iLoad_bh_i, [],
- (tLDRBi tGPR:$Rt, t_addrmode_is1:$addr, pred:$p)>;
-def ATOMIC_tLDRBr : tPseudoExpand<(outs tGPR:$Rt),
- (ins t_addrmode_rrs1:$addr, pred:$p),
- 2, IIC_iLoad_bh_r, [],
- (tLDRBr tGPR:$Rt, t_addrmode_rrs1:$addr, pred:$p)>;
-}
-let AM = AddrModeT1_2 in {
-def ATOMIC_tLDRHi : tPseudoExpand<(outs tGPR:$Rt),
- (ins t_addrmode_is2:$addr, pred:$p),
- 2, IIC_iLoad_bh_i, [],
- (tLDRHi tGPR:$Rt, t_addrmode_is2:$addr, pred:$p)>;
-def ATOMIC_tLDRHr : tPseudoExpand<(outs tGPR:$Rt),
- (ins t_addrmode_rrs2:$addr, pred:$p),
- 2, IIC_iLoad_bh_r, [],
- (tLDRHr tGPR:$Rt, t_addrmode_rrs2:$addr, pred:$p)>;
-}
-let AM = AddrModeT1_4 in {
-def ATOMIC_tLDRi : tPseudoExpand<(outs tGPR:$Rt),
- (ins t_addrmode_is4:$addr, pred:$p),
- 2, IIC_iLoad_i, [],
- (tLDRi tGPR:$Rt, t_addrmode_is4:$addr, pred:$p)>;
-def ATOMIC_tLDRr : tPseudoExpand<(outs tGPR:$Rt),
- (ins t_addrmode_rrs4:$addr, pred:$p),
- 2, IIC_iLoad_r, [],
- (tLDRr tGPR:$Rt, t_addrmode_rrs4:$addr, pred:$p)>;
-}
-}
-
//===----------------------------------------------------------------------===//
// Load / store multiple Instructions.
//
(tASRri (tLSLri (tLDRHi t_addrmode_is2:$addr), 16), 16)>;
def : T1Pat<(atomic_load_8 t_addrmode_is1:$src),
- (ATOMIC_tLDRBi t_addrmode_is1:$src)>;
+ (tLDRBi t_addrmode_is1:$src)>;
def : T1Pat<(atomic_load_8 t_addrmode_rrs1:$src),
- (ATOMIC_tLDRBr t_addrmode_rrs1:$src)>;
+ (tLDRBr t_addrmode_rrs1:$src)>;
def : T1Pat<(atomic_load_16 t_addrmode_is2:$src),
- (ATOMIC_tLDRHi t_addrmode_is2:$src)>;
+ (tLDRHi t_addrmode_is2:$src)>;
def : T1Pat<(atomic_load_16 t_addrmode_rrs2:$src),
- (ATOMIC_tLDRHr t_addrmode_rrs2:$src)>;
+ (tLDRHr t_addrmode_rrs2:$src)>;
def : T1Pat<(atomic_load_32 t_addrmode_is4:$src),
- (ATOMIC_tLDRi t_addrmode_is4:$src)>;
+ (tLDRi t_addrmode_is4:$src)>;
def : T1Pat<(atomic_load_32 t_addrmode_rrs4:$src),
- (ATOMIC_tLDRr t_addrmode_rrs4:$src)>;
+ (tLDRr t_addrmode_rrs4:$src)>;
def : T1Pat<(atomic_store_8 t_addrmode_is1:$ptr, tGPR:$val),
(tSTRBi tGPR:$val, t_addrmode_is1:$ptr)>;
def : T1Pat<(atomic_store_8 t_addrmode_rrs1:$ptr, tGPR:$val),
defm t2PLDW : T2Ipl<1, 0, "pldw">, Requires<[IsThumb2,HasV7,HasMP]>;
defm t2PLI : T2Ipl<0, 1, "pli">, Requires<[IsThumb2,HasV7]>;
-// Pseudos for atomic loads. Setting mayStore prevents reordering.
-let mayLoad = 1, mayStore = 1, hasSideEffects = 0 in {
-def ATOMIC_t2LDRBi12 : t2PseudoExpand<(outs rGPR:$Rt),
- (ins t2addrmode_imm12:$addr, pred:$p),
- 4, IIC_iLoad_bh_i, [],
- (t2LDRBi12 rGPR:$Rt, t2addrmode_imm12:$addr, pred:$p)>;
-def ATOMIC_t2LDRBi8 : t2PseudoExpand<(outs rGPR:$Rt),
- (ins t2addrmode_negimm8:$addr, pred:$p),
- 4, IIC_iLoad_bh_i, [],
- (t2LDRBi8 rGPR:$Rt, t2addrmode_negimm8:$addr, pred:$p)>;
-def ATOMIC_t2LDRBs : t2PseudoExpand<(outs rGPR:$Rt),
- (ins t2addrmode_so_reg:$addr, pred:$p),
- 4, IIC_iLoad_bh_si, [],
- (t2LDRBs rGPR:$Rt, t2addrmode_so_reg:$addr, pred:$p)>;
-def ATOMIC_t2LDRHi12 : t2PseudoExpand<(outs rGPR:$Rt),
- (ins t2addrmode_imm12:$addr, pred:$p),
- 4, IIC_iLoad_bh_i, [],
- (t2LDRHi12 rGPR:$Rt, t2addrmode_imm12:$addr, pred:$p)>;
-def ATOMIC_t2LDRHi8 : t2PseudoExpand<(outs rGPR:$Rt),
- (ins t2addrmode_negimm8:$addr, pred:$p),
- 4, IIC_iLoad_bh_i, [],
- (t2LDRHi8 rGPR:$Rt, t2addrmode_negimm8:$addr, pred:$p)>;
-def ATOMIC_t2LDRHs : t2PseudoExpand<(outs rGPR:$Rt),
- (ins t2addrmode_so_reg:$addr, pred:$p),
- 4, IIC_iLoad_bh_si, [],
- (t2LDRHs rGPR:$Rt, t2addrmode_so_reg:$addr, pred:$p)>;
-def ATOMIC_t2LDRi12 : t2PseudoExpand<(outs GPR:$Rt),
- (ins t2addrmode_imm12:$addr, pred:$p),
- 4, IIC_iLoad_i, [],
- (t2LDRi12 GPR:$Rt, t2addrmode_imm12:$addr, pred:$p)>;
-def ATOMIC_t2LDRi8 : t2PseudoExpand<(outs GPR:$Rt),
- (ins t2addrmode_negimm8:$addr, pred:$p),
- 4, IIC_iLoad_i, [],
- (t2LDRi8 GPR:$Rt, t2addrmode_negimm8:$addr, pred:$p)>;
-def ATOMIC_t2LDRs : t2PseudoExpand<(outs GPR:$Rt),
- (ins t2addrmode_so_reg:$addr, pred:$p),
- 4, IIC_iLoad_si, [],
- (t2LDRs GPR:$Rt, t2addrmode_so_reg:$addr, pred:$p)>;
-}
-
//===----------------------------------------------------------------------===//
// Load / store multiple Instructions.
//
Requires<[HasT2ExtractPack, IsThumb2]>;
// Atomic load/store patterns
-def : T2Pat<(atomic_load_8 t2addrmode_imm12:$addr),
- (ATOMIC_t2LDRBi12 t2addrmode_imm12:$addr)>;
-def : T2Pat<(atomic_load_8 t2addrmode_negimm8:$addr),
- (ATOMIC_t2LDRBi8 t2addrmode_negimm8:$addr)>;
+def : T2Pat<(atomic_load_8 t2addrmode_imm12:$addr),
+ (t2LDRBi12 t2addrmode_imm12:$addr)>;
+def : T2Pat<(atomic_load_8 t2addrmode_negimm8:$addr),
+ (t2LDRBi8 t2addrmode_negimm8:$addr)>;
def : T2Pat<(atomic_load_8 t2addrmode_so_reg:$addr),
- (ATOMIC_t2LDRBs t2addrmode_so_reg:$addr)>;
-def : T2Pat<(atomic_load_16 t2addrmode_imm12:$addr),
- (ATOMIC_t2LDRHi12 t2addrmode_imm12:$addr)>;
-def : T2Pat<(atomic_load_16 t2addrmode_negimm8:$addr),
- (ATOMIC_t2LDRHi8 t2addrmode_negimm8:$addr)>;
+ (t2LDRBs t2addrmode_so_reg:$addr)>;
+def : T2Pat<(atomic_load_16 t2addrmode_imm12:$addr),
+ (t2LDRHi12 t2addrmode_imm12:$addr)>;
+def : T2Pat<(atomic_load_16 t2addrmode_negimm8:$addr),
+ (t2LDRHi8 t2addrmode_negimm8:$addr)>;
def : T2Pat<(atomic_load_16 t2addrmode_so_reg:$addr),
- (ATOMIC_t2LDRHs t2addrmode_so_reg:$addr)>;
-def : T2Pat<(atomic_load_32 t2addrmode_imm12:$addr),
- (ATOMIC_t2LDRi12 t2addrmode_imm12:$addr)>;
+ (t2LDRHs t2addrmode_so_reg:$addr)>;
+def : T2Pat<(atomic_load_32 t2addrmode_imm12:$addr),
+ (t2LDRi12 t2addrmode_imm12:$addr)>;
def : T2Pat<(atomic_load_32 t2addrmode_negimm8:$addr),
- (ATOMIC_t2LDRi8 t2addrmode_negimm8:$addr)>;
+ (t2LDRi8 t2addrmode_negimm8:$addr)>;
def : T2Pat<(atomic_load_32 t2addrmode_so_reg:$addr),
- (ATOMIC_t2LDRs t2addrmode_so_reg:$addr)>;
+ (t2LDRs t2addrmode_so_reg:$addr)>;
def : T2Pat<(atomic_store_8 t2addrmode_imm12:$addr, GPR:$val),
(t2STRBi12 GPR:$val, t2addrmode_imm12:$addr)>;
def : T2Pat<(atomic_store_8 t2addrmode_negimm8:$addr, GPR:$val),
{ ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1 },
{ ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1 },
{ ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1 },
-
- // At this point it is safe to translate acquire loads to normal loads.
- // There is no risk of reordering loads.
- { ARM::ATOMIC_t2LDRi12,
- ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 0,1 },
- { ARM::ATOMIC_t2LDRs,
- ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 0,1 },
- { ARM::ATOMIC_t2LDRBi12,
- ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 0,1 },
- { ARM::ATOMIC_t2LDRBs,
- ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 0,1 },
- { ARM::ATOMIC_t2LDRHi12,
- ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 0,1 },
- { ARM::ATOMIC_t2LDRHs,
- ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1 },
-
{ ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1 },
{ ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1 },
{ ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1 },
switch (Entry.WideOpc) {
default:
llvm_unreachable("Unexpected Thumb2 load / store opcode!");
- case ARM::t2LDRi12: case ARM::ATOMIC_t2LDRi12:
+ case ARM::t2LDRi12:
case ARM::t2STRi12:
if (MI->getOperand(1).getReg() == ARM::SP) {
Opc = Entry.NarrowOpc2;
HasImmOffset = true;
HasOffReg = false;
break;
- case ARM::t2LDRBi12: case ARM::ATOMIC_t2LDRBi12:
+ case ARM::t2LDRBi12:
case ARM::t2STRBi12:
HasImmOffset = true;
HasOffReg = false;
HasImmOffset = true;
HasOffReg = false;
break;
- case ARM::t2LDRs: case ARM::ATOMIC_t2LDRs:
- case ARM::t2LDRBs: case ARM::ATOMIC_t2LDRBs:
- case ARM::t2LDRHs: case ARM::ATOMIC_t2LDRHs:
+ case ARM::t2LDRs:
+ case ARM::t2LDRBs:
+ case ARM::t2LDRHs:
case ARM::t2LDRSBs:
case ARM::t2LDRSHs:
case ARM::t2STRs: