//===----------------------------------------------------------------------===//
let hasCtrlDep = 1, Defs = [R1], Uses = [R1] in {
- def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm:$amt),
+ def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm_i32:$amt),
"${:comment} ADJCALLSTACKDOWN",
[(callseq_start imm:$amt)]>;
- def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt),
+ def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm_i32:$amt),
"${:comment} ADJCALLSTACKUP",
[(callseq_end imm:$amt)]>;
}
: RI16Form<0b0010010, (outs), (ins VECREG:$rT, addr256k:$src),
"stqa\t$rT, $src",
LoadStore,
- [(store (vectype VECREG:$rT), aform_addr:$src)]>
-{ }
+ [(store (vectype VECREG:$rT), aform_addr:$src)]>;
class StoreAForm<RegisterClass rclass>
: RI16Form<0b001001, (outs), (ins rclass:$rT, addr256k:$src),
"stqa\t$rT, $src",
LoadStore,
- [(store rclass:$rT, aform_addr:$src)]>
-{ }
+ [(store rclass:$rT, aform_addr:$src)]>;
multiclass StoreAForms
{
"cwd\t$rT, $src", ShuffleOp,
[(set (v4i32 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
+def CWDf32 : RI7Form<0b01101111100, (outs VECREG:$rT), (ins memri7:$src),
+ "cwd\t$rT, $src", ShuffleOp,
+ [(set (v4f32 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
+
def CWX : RRForm<0b01101011100, (outs VECREG:$rT), (ins memrr:$src),
"cwx\t$rT, $src", ShuffleOp,
[(set (v4i32 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
+def CWXf32 : RRForm<0b01101011100, (outs VECREG:$rT), (ins memrr:$src),
+ "cwx\t$rT, $src", ShuffleOp,
+ [(set (v4f32 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
+
def CDD : RI7Form<0b11101111100, (outs VECREG:$rT), (ins memri7:$src),
"cdd\t$rT, $src", ShuffleOp,
[(set (v2i64 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
+def CDDf64 : RI7Form<0b11101111100, (outs VECREG:$rT), (ins memri7:$src),
+ "cdd\t$rT, $src", ShuffleOp,
+ [(set (v2f64 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
+
def CDX : RRForm<0b11101011100, (outs VECREG:$rT), (ins memrr:$src),
"cdx\t$rT, $src", ShuffleOp,
[(set (v2i64 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
+def CDXf64 : RRForm<0b11101011100, (outs VECREG:$rT), (ins memrr:$src),
+ "cdx\t$rT, $src", ShuffleOp,
+ [(set (v2f64 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
+
//===----------------------------------------------------------------------===//
// Constant formation:
//===----------------------------------------------------------------------===//
[(set R8C:$rT, immSExt8:$val)]>;
// IL does sign extension!
-def ILr64:
- RI16Form<0b100000010, (outs R64C:$rT), (ins s16imm_i64:$val),
- "il\t$rT, $val", ImmLoad,
- [(set R64C:$rT, immSExt16:$val)]>;
-
-def ILv2i64:
- RI16Form<0b100000010, (outs VECREG:$rT), (ins s16imm_i64:$val),
- "il\t$rT, $val", ImmLoad,
- [(set VECREG:$rT, (v2i64 v2i64SExt16Imm:$val))]>;
-
-def ILv4i32:
- RI16Form<0b100000010, (outs VECREG:$rT), (ins s16imm:$val),
- "il\t$rT, $val", ImmLoad,
- [(set VECREG:$rT, (v4i32 v4i32SExt16Imm:$val))]>;
-
-def ILr32:
- RI16Form<0b100000010, (outs R32C:$rT), (ins s16imm_i32:$val),
- "il\t$rT, $val", ImmLoad,
- [(set R32C:$rT, immSExt16:$val)]>;
-
-def ILf32:
- RI16Form<0b100000010, (outs R32FP:$rT), (ins s16imm_f32:$val),
- "il\t$rT, $val", ImmLoad,
- [(set R32FP:$rT, fpimmSExt16:$val)]>;
-
-def ILf64:
- RI16Form<0b100000010, (outs R64FP:$rT), (ins s16imm_f64:$val),
- "il\t$rT, $val", ImmLoad,
- [(set R64FP:$rT, fpimmSExt16:$val)]>;
-
-def ILHUv4i32:
- RI16Form<0b010000010, (outs VECREG:$rT), (ins u16imm:$val),
- "ilhu\t$rT, $val", ImmLoad,
- [(set VECREG:$rT, (v4i32 immILHUvec:$val))]>;
-
-def ILHUr32:
- RI16Form<0b010000010, (outs R32C:$rT), (ins u16imm:$val),
- "ilhu\t$rT, $val", ImmLoad,
- [(set R32C:$rT, hi16:$val)]>;
-
-// ILHUf32: Used to custom lower float constant loads
-def ILHUf32:
- RI16Form<0b010000010, (outs R32FP:$rT), (ins f16imm:$val),
- "ilhu\t$rT, $val", ImmLoad,
- [(set R32FP:$rT, hi16_f32:$val)]>;
-
-// ILHUhi: Used for loading high portion of an address. Note the symbolHi
-// printer used for the operand.
-def ILHUhi:
- RI16Form<0b010000010, (outs R32C:$rT), (ins symbolHi:$val),
- "ilhu\t$rT, $val", ImmLoad,
- [(set R32C:$rT, hi16:$val)]>;
+
+class ILInst<dag OOL, dag IOL, list<dag> pattern>:
+ RI16Form<0b100000010, OOL, IOL, "il\t$rT, $val",
+ ImmLoad, pattern>;
+
+class ILVecInst<ValueType vectype, Operand immtype, PatLeaf xform>:
+ ILInst<(outs VECREG:$rT), (ins immtype:$val),
+ [(set (vectype VECREG:$rT), (vectype xform:$val))]>;
+
+class ILRegInst<RegisterClass rclass, Operand immtype, PatLeaf xform>:
+ ILInst<(outs rclass:$rT), (ins immtype:$val),
+ [(set rclass:$rT, xform:$val)]>;
+
+multiclass ImmediateLoad
+{
+ def v2i64: ILVecInst<v2i64, s16imm_i64, v2i64SExt16Imm>;
+ def v4i32: ILVecInst<v4i32, s16imm_i32, v4i32SExt16Imm>;
+
+ // TODO: Need v2f64, v4f32
+
+ def r64: ILRegInst<R64C, s16imm_i64, immSExt16>;
+ def r32: ILRegInst<R32C, s16imm_i32, immSExt16>;
+ def f32: ILRegInst<R32FP, s16imm_f32, fpimmSExt16>;
+ def f64: ILRegInst<R64FP, s16imm_f64, fpimmSExt16>;
+}
+
+defm IL : ImmediateLoad;
+
+class ILHUInst<dag OOL, dag IOL, list<dag> pattern>:
+ RI16Form<0b010000010, OOL, IOL, "ilhu\t$rT, $val",
+ ImmLoad, pattern>;
+
+class ILHUVecInst<ValueType vectype, Operand immtype, PatLeaf xform>:
+ ILHUInst<(outs VECREG:$rT), (ins immtype:$val),
+ [(set (vectype VECREG:$rT), (vectype xform:$val))]>;
+
+class ILHURegInst<RegisterClass rclass, Operand immtype, PatLeaf xform>:
+ ILHUInst<(outs rclass:$rT), (ins immtype:$val),
+ [(set rclass:$rT, xform:$val)]>;
+
+multiclass ImmLoadHalfwordUpper
+{
+ def v2i64: ILHUVecInst<v2i64, u16imm_i64, immILHUvec_i64>;
+ def v4i32: ILHUVecInst<v4i32, u16imm_i32, immILHUvec>;
+
+ def r64: ILHURegInst<R64C, u16imm_i64, hi16>;
+ def r32: ILHURegInst<R32C, u16imm_i32, hi16>;
+
+ // Loads the high portion of an address
+ def hi: ILHURegInst<R32C, symbolHi, hi16>;
+
+ // Used in custom lowering constant SFP loads:
+ def f32: ILHURegInst<R32FP, f16imm, hi16_f32>;
+}
+
+defm ILHU : ImmLoadHalfwordUpper;
// Immediate load address (can also be used to load 18-bit unsigned constants,
// see the zext 16->32 pattern)
+
class ILAInst<dag OOL, dag IOL, list<dag> pattern>:
RI18Form<0b1000010, OOL, IOL, "ila\t$rT, $val",
LoadNOP, pattern>;
-multiclass ImmLoadAddress
-{
- def v2i64: ILAInst<(outs VECREG:$rT), (ins u18imm:$val),
- [(set (v2i64 VECREG:$rT), v2i64Uns18Imm:$val)]>;
-
- def v4i32: ILAInst<(outs VECREG:$rT), (ins u18imm:$val),
- [(set (v4i32 VECREG:$rT), v4i32Uns18Imm:$val)]>;
+class ILAVecInst<ValueType vectype, Operand immtype, PatLeaf xform>:
+ ILAInst<(outs VECREG:$rT), (ins immtype:$val),
+ [(set (vectype VECREG:$rT), (vectype xform:$val))]>;
- def r64: ILAInst<(outs R64C:$rT), (ins u18imm_i64:$val),
- [(set R64C:$rT, imm18:$val)]>;
+class ILARegInst<RegisterClass rclass, Operand immtype, PatLeaf xform>:
+ ILAInst<(outs rclass:$rT), (ins immtype:$val),
+ [(set rclass:$rT, xform:$val)]>;
- def r32: ILAInst<(outs R32C:$rT), (ins u18imm:$val),
- [(set R32C:$rT, imm18:$val)]>;
-
- def f32: ILAInst<(outs R32FP:$rT), (ins f18imm:$val),
- [(set R32FP:$rT, fpimm18:$val)]>;
+multiclass ImmLoadAddress
+{
+ def v2i64: ILAVecInst<v2i64, u18imm, v2i64Uns18Imm>;
+ def v4i32: ILAVecInst<v4i32, u18imm, v4i32Uns18Imm>;
- def f64: ILAInst<(outs R64FP:$rT), (ins f18imm_f64:$val),
- [(set R64FP:$rT, fpimm18:$val)]>;
+ def r64: ILARegInst<R64C, u18imm_i64, imm18>;
+ def r32: ILARegInst<R32C, u18imm, imm18>;
+ def f32: ILARegInst<R32FP, f18imm, fpimm18>;
+ def f64: ILARegInst<R64FP, f18imm_f64, fpimm18>;
- def lo: ILAInst<(outs R32C:$rT), (ins symbolLo:$val),
- [(set R32C:$rT, imm18:$val)]>;
+ def lo: ILARegInst<R32C, symbolLo, imm18>;
def lsa: ILAInst<(outs R32C:$rT), (ins symbolLSA:$val),
[/* no pattern */]>;
// Note that these are really two operand instructions, but they're encoded
// as three operands with the first two arguments tied-to each other.
-def IOHLvec:
- RI16Form<0b100000110, (outs VECREG:$rT), (ins VECREG:$rS, u16imm:$val),
- "iohl\t$rT, $val", ImmLoad,
- [/* insert intrinsic here */]>,
- RegConstraint<"$rS = $rT">,
- NoEncode<"$rS">;
-
-def IOHLr32:
- RI16Form<0b100000110, (outs R32C:$rT), (ins R32C:$rS, i32imm:$val),
- "iohl\t$rT, $val", ImmLoad,
- [/* insert intrinsic here */]>,
- RegConstraint<"$rS = $rT">,
- NoEncode<"$rS">;
-
-def IOHLf32:
- RI16Form<0b100000110, (outs R32FP:$rT), (ins R32FP:$rS, f32imm:$val),
- "iohl\t$rT, $val", ImmLoad,
- [/* insert intrinsic here */]>,
- RegConstraint<"$rS = $rT">,
- NoEncode<"$rS">;
-
-def IOHLlo:
- RI16Form<0b100000110, (outs R32C:$rT), (ins R32C:$rS, symbolLo:$val),
- "iohl\t$rT, $val", ImmLoad,
- [/* no pattern */]>,
- RegConstraint<"$rS = $rT">,
- NoEncode<"$rS">;
+class IOHLInst<dag OOL, dag IOL, list<dag> pattern>:
+ RI16Form<0b100000110, OOL, IOL, "iohl\t$rT, $val",
+ ImmLoad, pattern>,
+ RegConstraint<"$rS = $rT">,
+ NoEncode<"$rS">;
+
+class IOHLVecInst<ValueType vectype, Operand immtype /* , PatLeaf xform */>:
+ IOHLInst<(outs VECREG:$rT), (ins VECREG:$rS, immtype:$val),
+ [/* no pattern */]>;
+
+class IOHLRegInst<RegisterClass rclass, Operand immtype /* , PatLeaf xform */>:
+ IOHLInst<(outs rclass:$rT), (ins rclass:$rS, immtype:$val),
+ [/* no pattern */]>;
+
+multiclass ImmOrHalfwordLower
+{
+ def v2i64: IOHLVecInst<v2i64, u16imm_i64>;
+ def v4i32: IOHLVecInst<v4i32, u16imm_i32>;
+
+ def r32: IOHLRegInst<R32C, i32imm>;
+ def f32: IOHLRegInst<R32FP, f32imm>;
+
+ def lo: IOHLRegInst<R32C, symbolLo>;
+}
+
+defm IOHL: ImmOrHalfwordLower;
// Form select mask for bytes using immediate, used in conjunction with the
// SELB instruction:
-class FSMBIVec<ValueType vectype>
- : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
- "fsmbi\t$rT, $val",
- SelectOp,
- [(set (vectype VECREG:$rT), (SPUfsmbi immU16:$val))]>
-{ }
+class FSMBIVec<ValueType vectype>:
+ RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
+ "fsmbi\t$rT, $val",
+ SelectOp,
+ [(set (vectype VECREG:$rT), (SPUselmask (i16 immU16:$val)))]>;
multiclass FormSelectMaskBytesImm
{
// fsmb: Form select mask for bytes. N.B. Input operand, $rA, is 16-bits
def FSMB:
RRForm_1<0b01101101100, (outs VECREG:$rT), (ins R16C:$rA),
- "fsmb\t$rT, $rA", SelectOp,
- []>;
+ "fsmb\t$rT, $rA", SelectOp,
+ [(set (v16i8 VECREG:$rT), (SPUselmask R16C:$rA))]>;
// fsmh: Form select mask for halfwords. N.B., Input operand, $rA, is
// only 8-bits wide (even though it's input as 16-bits here)
def FSMH:
RRForm_1<0b10101101100, (outs VECREG:$rT), (ins R16C:$rA),
"fsmh\t$rT, $rA", SelectOp,
- []>;
+ [(set (v8i16 VECREG:$rT), (SPUselmask R16C:$rA))]>;
// fsm: Form select mask for words. Like the other fsm* instructions,
// only the lower 4 bits of $rA are significant.
-def FSM:
- RRForm_1<0b00101101100, (outs VECREG:$rT), (ins R16C:$rA),
- "fsm\t$rT, $rA", SelectOp,
- []>;
+class FSMInst<ValueType vectype, RegisterClass rclass>:
+ RRForm_1<0b00101101100, (outs VECREG:$rT), (ins rclass:$rA),
+ "fsm\t$rT, $rA",
+ SelectOp,
+ [(set (vectype VECREG:$rT), (SPUselmask rclass:$rA))]>;
+
+multiclass FormSelectMaskWord {
+ def r32 : FSMInst<v4i32, R32C>;
+ def r16 : FSMInst<v4i32, R16C>;
+}
+
+defm FSM : FormSelectMaskWord;
+
+// Special case when used for i64 math operations
+multiclass FormSelectMaskWord64 {
+ def r32 : FSMInst<v2i64, R32C>;
+ def r16 : FSMInst<v2i64, R16C>;
+}
+
+defm FSM64 : FormSelectMaskWord64;
//===----------------------------------------------------------------------===//
// Integer and Logical Operations:
def Ar8:
RRForm<0b00000011000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
"a\t$rT, $rA, $rB", IntegerOp,
- [(set R8C:$rT, (add R8C:$rA, R8C:$rB))]>;
+ [/* no pattern */]>;
def AIvec:
RI10Form<0b00111000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
[(set R32C:$rT, (sub i32ImmSExt10:$val, R32C:$rA))]>;
// ADDX: only available in vector form, doesn't match a pattern.
-def ADDXvec:
- RRForm<0b00000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
- VECREG:$rCarry),
- "addx\t$rT, $rA, $rB", IntegerOp,
- []>,
+class ADDXInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRForm<0b00000010110, OOL, IOL,
+ "addx\t$rT, $rA, $rB",
+ IntegerOp, pattern>;
+
+class ADDXVecInst<ValueType vectype>:
+ ADDXInst<(outs VECREG:$rT),
+ (ins VECREG:$rA, VECREG:$rB, VECREG:$rCarry),
+ [(set (vectype VECREG:$rT),
+ (SPUaddx (vectype VECREG:$rA), (vectype VECREG:$rB),
+ (vectype VECREG:$rCarry)))]>,
RegConstraint<"$rCarry = $rT">,
NoEncode<"$rCarry">;
-// CG: only available in vector form, doesn't match a pattern.
-def CGvec:
- RRForm<0b01000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
- VECREG:$rCarry),
- "cg\t$rT, $rA, $rB", IntegerOp,
- []>,
+class ADDXRegInst<RegisterClass rclass>:
+ ADDXInst<(outs rclass:$rT),
+ (ins rclass:$rA, rclass:$rB, rclass:$rCarry),
+ [(set rclass:$rT,
+ (SPUaddx rclass:$rA, rclass:$rB, rclass:$rCarry))]>,
RegConstraint<"$rCarry = $rT">,
NoEncode<"$rCarry">;
-// SFX: only available in vector form, doesn't match a pattern
-def SFXvec:
- RRForm<0b10000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
- VECREG:$rCarry),
- "sfx\t$rT, $rA, $rB", IntegerOp,
- []>,
+multiclass AddExtended {
+ def v2i64 : ADDXVecInst<v2i64>;
+ def v4i32 : ADDXVecInst<v4i32>;
+ def r64 : ADDXRegInst<R64C>;
+ def r32 : ADDXRegInst<R32C>;
+}
+
+defm ADDX : AddExtended;
+
+// CG: Generate carry for add
+class CGInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRForm<0b01000011000, OOL, IOL,
+ "cg\t$rT, $rA, $rB",
+ IntegerOp, pattern>;
+
+class CGVecInst<ValueType vectype>:
+ CGInst<(outs VECREG:$rT),
+ (ins VECREG:$rA, VECREG:$rB),
+ [(set (vectype VECREG:$rT),
+ (SPUcarry_gen (vectype VECREG:$rA), (vectype VECREG:$rB)))]>;
+
+class CGRegInst<RegisterClass rclass>:
+ CGInst<(outs rclass:$rT),
+ (ins rclass:$rA, rclass:$rB),
+ [(set rclass:$rT,
+ (SPUcarry_gen rclass:$rA, rclass:$rB))]>;
+
+multiclass CarryGenerate {
+ def v2i64 : CGVecInst<v2i64>;
+ def v4i32 : CGVecInst<v4i32>;
+ def r64 : CGRegInst<R64C>;
+ def r32 : CGRegInst<R32C>;
+}
+
+defm CG : CarryGenerate;
+
+// SFX: Subract from, extended. This is used in conjunction with BG to subtract
+// with carry (borrow, in this case)
+class SFXInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRForm<0b10000010110, OOL, IOL,
+ "sfx\t$rT, $rA, $rB",
+ IntegerOp, pattern>;
+
+class SFXVecInst<ValueType vectype>:
+ SFXInst<(outs VECREG:$rT),
+ (ins VECREG:$rA, VECREG:$rB, VECREG:$rCarry),
+ [(set (vectype VECREG:$rT),
+ (SPUsubx (vectype VECREG:$rA), (vectype VECREG:$rB),
+ (vectype VECREG:$rCarry)))]>,
RegConstraint<"$rCarry = $rT">,
NoEncode<"$rCarry">;
-// BG: only available in vector form, doesn't match a pattern.
-def BGvec:
- RRForm<0b01000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
- VECREG:$rCarry),
- "bg\t$rT, $rA, $rB", IntegerOp,
- []>,
+class SFXRegInst<RegisterClass rclass>:
+ SFXInst<(outs rclass:$rT),
+ (ins rclass:$rA, rclass:$rB, rclass:$rCarry),
+ [(set rclass:$rT,
+ (SPUsubx rclass:$rA, rclass:$rB, rclass:$rCarry))]>,
RegConstraint<"$rCarry = $rT">,
NoEncode<"$rCarry">;
-// BGX: only available in vector form, doesn't match a pattern.
+multiclass SubtractExtended {
+ def v2i64 : SFXVecInst<v2i64>;
+ def v4i32 : SFXVecInst<v4i32>;
+ def r64 : SFXRegInst<R64C>;
+ def r32 : SFXRegInst<R32C>;
+}
+
+defm SFX : SubtractExtended;
+
+// BG: only available in vector form, doesn't match a pattern.
+class BGInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRForm<0b01000010000, OOL, IOL,
+ "bg\t$rT, $rA, $rB",
+ IntegerOp, pattern>;
+
+class BGVecInst<ValueType vectype>:
+ BGInst<(outs VECREG:$rT),
+ (ins VECREG:$rA, VECREG:$rB),
+ [(set (vectype VECREG:$rT),
+ (SPUborrow_gen (vectype VECREG:$rA), (vectype VECREG:$rB)))]>;
+
+class BGRegInst<RegisterClass rclass>:
+ BGInst<(outs rclass:$rT),
+ (ins rclass:$rA, rclass:$rB),
+ [(set rclass:$rT,
+ (SPUborrow_gen rclass:$rA, rclass:$rB))]>;
+
+multiclass BorrowGenerate {
+ def v4i32 : BGVecInst<v4i32>;
+ def v2i64 : BGVecInst<v2i64>;
+ def r64 : BGRegInst<R64C>;
+ def r32 : BGRegInst<R32C>;
+}
+
+defm BG : BorrowGenerate;
+
+// BGX: Borrow generate, extended.
def BGXvec:
RRForm<0b11000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
VECREG:$rCarry),
def CNTBv16i8:
RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
"cntb\t$rT, $rA", IntegerOp,
- [(set (v16i8 VECREG:$rT), (SPUcntb_v16i8 (v16i8 VECREG:$rA)))]>;
+ [(set (v16i8 VECREG:$rT), (SPUcntb (v16i8 VECREG:$rA)))]>;
def CNTBv8i16 :
RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
"cntb\t$rT, $rA", IntegerOp,
- [(set (v8i16 VECREG:$rT), (SPUcntb_v8i16 (v8i16 VECREG:$rA)))]>;
+ [(set (v8i16 VECREG:$rT), (SPUcntb (v8i16 VECREG:$rA)))]>;
def CNTBv4i32 :
RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
"cntb\t$rT, $rA", IntegerOp,
- [(set (v4i32 VECREG:$rT), (SPUcntb_v4i32 (v4i32 VECREG:$rA)))]>;
+ [(set (v4i32 VECREG:$rT), (SPUcntb (v4i32 VECREG:$rA)))]>;
// gbb: Gather all low order bits from each byte in $rA into a single 16-bit
// quantity stored into $rT
[]>;
// Sign extension operations:
-def XSBHvec:
- RRForm_1<0b01101101010, (outs VECREG:$rDst), (ins VECREG:$rSrc),
- "xsbh\t$rDst, $rSrc", IntegerOp,
- [(set (v8i16 VECREG:$rDst), (sext (v16i8 VECREG:$rSrc)))]>;
+class XSBHInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRForm_1<0b01101101010, OOL, IOL,
+ "xsbh\t$rDst, $rSrc",
+ IntegerOp, pattern>;
+
+class XSBHVecInst<ValueType vectype>:
+ XSBHInst<(outs VECREG:$rDst), (ins VECREG:$rSrc),
+ [(set (v8i16 VECREG:$rDst), (sext (vectype VECREG:$rSrc)))]>;
+
+class XSBHRegInst<RegisterClass rclass>:
+ XSBHInst<(outs rclass:$rDst), (ins rclass:$rSrc),
+ [(set rclass:$rDst, (sext_inreg rclass:$rSrc, i8))]>;
+
+multiclass ExtendByteHalfword {
+ def v16i8: XSBHVecInst<v8i16>;
+ def r16: XSBHRegInst<R16C>;
+
+ // 32-bit form for XSBH: used to sign extend 8-bit quantities to 16-bit
+ // quantities to 32-bit quantities via a 32-bit register (see the sext 8->32
+ // pattern below). Intentionally doesn't match a pattern because we want the
+ // sext 8->32 pattern to do the work for us, namely because we need the extra
+ // XSHWr32.
+ def r32: XSBHRegInst<R32C>;
+}
-// Ordinary form for XSBH
-def XSBHr16:
- RRForm_1<0b01101101010, (outs R16C:$rDst), (ins R16C:$rSrc),
- "xsbh\t$rDst, $rSrc", IntegerOp,
- [(set R16C:$rDst, (sext_inreg R16C:$rSrc, i8))]>;
+defm XSBH : ExtendByteHalfword;
+// Sign-extend, but take an 8-bit register to a 16-bit register (not done as
+// sext_inreg)
def XSBHr8:
- RRForm_1<0b01101101010, (outs R16C:$rDst), (ins R8C:$rSrc),
- "xsbh\t$rDst, $rSrc", IntegerOp,
- [(set R16C:$rDst, (sext R8C:$rSrc))]>;
-
-// 32-bit form for XSBH: used to sign extend 8-bit quantities to 16-bit
-// quantities to 32-bit quantities via a 32-bit register (see the sext 8->32
-// pattern below). Intentionally doesn't match a pattern because we want the
-// sext 8->32 pattern to do the work for us, namely because we need the extra
-// XSHWr32.
-def XSBHr32:
- RRForm_1<0b01101101010, (outs R32C:$rDst), (ins R32C:$rSrc),
- "xsbh\t$rDst, $rSrc", IntegerOp,
- [(set R32C:$rDst, (sext_inreg R32C:$rSrc, i8))]>;
+ XSBHInst<(outs R16C:$rDst), (ins R8C:$rSrc),
+ [(set R16C:$rDst, (sext R8C:$rSrc))]>;
// Sign extend halfwords to words:
def XSHWvec:
[(set (vectype VECREG:$rT), (and (vectype VECREG:$rA),
(vectype VECREG:$rB)))]>;
+class ANDRegInst<RegisterClass rclass>:
+ ANDInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB),
+ [(set rclass:$rT, (and rclass:$rA, rclass:$rB))]>;
+
multiclass BitwiseAnd
{
def v16i8: ANDVecInst<v16i8>;
def v4i32: ANDVecInst<v4i32>;
def v2i64: ANDVecInst<v2i64>;
- def r64: ANDInst<(outs R64C:$rT), (ins R64C:$rA, R64C:$rB),
- [(set R64C:$rT, (and R64C:$rA, R64C:$rB))]>;
-
- def r32: ANDInst<(outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
- [(set R32C:$rT, (and R32C:$rA, R32C:$rB))]>;
-
- def r16: ANDInst<(outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
- [(set R16C:$rT, (and R16C:$rA, R16C:$rB))]>;
-
- def r8: ANDInst<(outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
- [(set R8C:$rT, (and R8C:$rA, R8C:$rB))]>;
+ def r128: ANDRegInst<GPRC>;
+ def r64: ANDRegInst<R64C>;
+ def r32: ANDRegInst<R32C>;
+ def r16: ANDRegInst<R16C>;
+ def r8: ANDRegInst<R8C>;
//===---------------------------------------------
// Special instructions to perform the fabs instruction
[]>;
// XOR:
-def XORv16i8:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [(set (v16i8 VECREG:$rT), (xor (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)))]>;
-def XORv8i16:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [(set (v8i16 VECREG:$rT), (xor (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
+class XORInst<dag OOL, dag IOL, list<dag> pattern> :
+ RRForm<0b10010010000, OOL, IOL, "xor\t$rT, $rA, $rB",
+ IntegerOp, pattern>;
-def XORv4i32:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [(set (v4i32 VECREG:$rT), (xor (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
+class XORVecInst<ValueType vectype>:
+ XORInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
+ [(set (vectype VECREG:$rT), (xor (vectype VECREG:$rA),
+ (vectype VECREG:$rB)))]>;
-def XORr32:
- RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [(set R32C:$rT, (xor R32C:$rA, R32C:$rB))]>;
+class XORRegInst<RegisterClass rclass>:
+ XORInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB),
+ [(set rclass:$rT, (xor rclass:$rA, rclass:$rB))]>;
-//==----------------------------------------------------------
-// Special forms for floating point instructions.
-// Bitwise ORs and ANDs don't make sense for normal floating
-// point numbers. These operations (fneg and fabs), however,
-// require bitwise logical ops to manipulate the sign bit.
-def XORfneg32:
- RRForm<0b10010010000, (outs R32FP:$rT), (ins R32FP:$rA, R32C:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [/* Intentionally does not match a pattern, see fneg32 */]>;
-
-// KLUDGY! Better way to do this without a VECREG? bitconvert?
-// VECREG is assumed to contain two identical 64-bit masks, so
-// it doesn't matter which word we select for the xor
-def XORfneg64:
- RRForm<0b10010010000, (outs R64FP:$rT), (ins R64FP:$rA, VECREG:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [/* Intentionally does not match a pattern, see fneg64 */]>;
-
-// Could use XORv4i32, but will use this for clarity
-def XORfnegvec:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [/* Intentionally does not match a pattern, see fneg{32,64} */]>;
+multiclass BitwiseExclusiveOr
+{
+ def v16i8: XORVecInst<v16i8>;
+ def v8i16: XORVecInst<v8i16>;
+ def v4i32: XORVecInst<v4i32>;
+ def v2i64: XORVecInst<v2i64>;
-//==----------------------------------------------------------
+ def r128: XORRegInst<GPRC>;
+ def r64: XORRegInst<R64C>;
+ def r32: XORRegInst<R32C>;
+ def r16: XORRegInst<R16C>;
+ def r8: XORRegInst<R8C>;
-def XORr16:
- RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [(set R16C:$rT, (xor R16C:$rA, R16C:$rB))]>;
+ // Special forms for floating point instructions.
+ // fneg and fabs require bitwise logical ops to manipulate the sign bit.
-def XORr8:
- RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [(set R8C:$rT, (xor R8C:$rA, R8C:$rB))]>;
+ def fneg32: XORInst<(outs R32FP:$rT), (ins R32FP:$rA, R32C:$rB),
+ [/* no pattern */]>;
+
+ def fneg64: XORInst<(outs R64FP:$rT), (ins R64FP:$rA, VECREG:$rB),
+ [/* no pattern */]>;
+
+ def fnegvec: XORInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
+ [/* no pattern, see fneg{32,64} */]>;
+}
+
+defm XOR : BitwiseExclusiveOr;
+
+//==----------------------------------------------------------
class XORBIInst<dag OOL, dag IOL, list<dag> pattern>:
RI10Form<0b01100000, OOL, IOL, "xorbi\t$rT, $rA, $val",
[(set R16C:$rT, (xor R16C:$rA, i16ImmSExt10:$val))]>;
def XORIv4i32:
- RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
+ RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm_i32:$val),
"xori\t$rT, $rA, $val", IntegerOp,
[(set (v4i32 VECREG:$rT), (xor (v4i32 VECREG:$rA),
v4i32SExt10Imm:$val))]>;
"nor\t$rT, $rA, $rB", IntegerOp,
[(set R8C:$rT, (not (or R8C:$rA, R8C:$rB)))]>;
-// EQV: Equivalence (1 for each same bit, otherwise 0)
-def EQVv16i8:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "eqv\t$rT, $rA, $rB", IntegerOp,
- [(set (v16i8 VECREG:$rT), (or (and (v16i8 VECREG:$rA),
- (v16i8 VECREG:$rB)),
- (and (vnot (v16i8 VECREG:$rA)),
- (vnot (v16i8 VECREG:$rB)))))]>;
-
-def : Pat<(xor (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rB))),
- (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
-
-def : Pat<(xor (vnot (v16i8 VECREG:$rA)), (v16i8 VECREG:$rB)),
- (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
-
-def EQVv8i16:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "eqv\t$rT, $rA, $rB", IntegerOp,
- [(set (v8i16 VECREG:$rT), (or (and (v8i16 VECREG:$rA),
- (v8i16 VECREG:$rB)),
- (and (vnot (v8i16 VECREG:$rA)),
- (vnot (v8i16 VECREG:$rB)))))]>;
-
-def : Pat<(xor (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rB))),
- (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
-
-def : Pat<(xor (vnot (v8i16 VECREG:$rA)), (v8i16 VECREG:$rB)),
- (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
-
-def EQVv4i32:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "eqv\t$rT, $rA, $rB", IntegerOp,
- [(set (v4i32 VECREG:$rT), (or (and (v4i32 VECREG:$rA),
- (v4i32 VECREG:$rB)),
- (and (vnot (v4i32 VECREG:$rA)),
- (vnot (v4i32 VECREG:$rB)))))]>;
-
-def : Pat<(xor (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rB))),
- (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
-
-def : Pat<(xor (vnot (v4i32 VECREG:$rA)), (v4i32 VECREG:$rB)),
- (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
-
-def EQVr32:
- RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
- "eqv\t$rT, $rA, $rB", IntegerOp,
- [(set R32C:$rT, (or (and R32C:$rA, R32C:$rB),
- (and (not R32C:$rA), (not R32C:$rB))))]>;
-
-def : Pat<(xor R32C:$rA, (not R32C:$rB)),
- (EQVr32 R32C:$rA, R32C:$rB)>;
-
-def : Pat<(xor (not R32C:$rA), R32C:$rB),
- (EQVr32 R32C:$rA, R32C:$rB)>;
-
-def EQVr16:
- RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
- "eqv\t$rT, $rA, $rB", IntegerOp,
- [(set R16C:$rT, (or (and R16C:$rA, R16C:$rB),
- (and (not R16C:$rA), (not R16C:$rB))))]>;
-
-def : Pat<(xor R16C:$rA, (not R16C:$rB)),
- (EQVr16 R16C:$rA, R16C:$rB)>;
-
-def : Pat<(xor (not R16C:$rA), R16C:$rB),
- (EQVr16 R16C:$rA, R16C:$rB)>;
-
-def EQVr8:
- RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
- "eqv\t$rT, $rA, $rB", IntegerOp,
- [(set R8C:$rT, (or (and R8C:$rA, R8C:$rB),
- (and (not R8C:$rA), (not R8C:$rB))))]>;
-
-def : Pat<(xor R8C:$rA, (not R8C:$rB)),
- (EQVr8 R8C:$rA, R8C:$rB)>;
-
-def : Pat<(xor (not R8C:$rA), R8C:$rB),
- (EQVr8 R8C:$rA, R8C:$rB)>;
-
-// gcc optimizes (p & q) | (~p & ~q) -> ~(p | q) | (p & q), so match that
-// pattern also:
-def : Pat<(or (vnot (or (v16i8 VECREG:$rA), (v16i8 VECREG:$rB))),
- (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rB))),
- (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
-
-def : Pat<(or (vnot (or (v8i16 VECREG:$rA), (v8i16 VECREG:$rB))),
- (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rB))),
- (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
-
-def : Pat<(or (vnot (or (v4i32 VECREG:$rA), (v4i32 VECREG:$rB))),
- (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rB))),
- (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
-
-def : Pat<(or (not (or R32C:$rA, R32C:$rB)), (and R32C:$rA, R32C:$rB)),
- (EQVr32 R32C:$rA, R32C:$rB)>;
-
-def : Pat<(or (not (or R16C:$rA, R16C:$rB)), (and R16C:$rA, R16C:$rB)),
- (EQVr16 R16C:$rA, R16C:$rB)>;
-
-def : Pat<(or (not (or R8C:$rA, R8C:$rB)), (and R8C:$rA, R8C:$rB)),
- (EQVr8 R8C:$rA, R8C:$rB)>;
-
// Select bits:
-def SELBv16i8:
- RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
- "selb\t$rT, $rA, $rB, $rC", IntegerOp,
- [(set (v16i8 VECREG:$rT),
- (SPUselb (v16i8 VECREG:$rA), (v16i8 VECREG:$rB),
- (v16i8 VECREG:$rC)))]>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
- (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
- (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
- (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
- (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
- (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
- (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
- (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
- (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
- (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
- (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
- (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
- (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
- (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
- (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
- (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
- (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def SELBv8i16:
- RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
- "selb\t$rT, $rA, $rB, $rC", IntegerOp,
- [(set (v8i16 VECREG:$rT),
- (SPUselb (v8i16 VECREG:$rA), (v8i16 VECREG:$rB),
- (v8i16 VECREG:$rC)))]>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
- (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
- (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
- (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
- (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
- (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
- (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
- (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
- (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
- (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
- (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
- (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
- (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
- (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
- (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
- (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
- (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def SELBv4i32:
- RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
- "selb\t$rT, $rA, $rB, $rC", IntegerOp,
- [(set (v4i32 VECREG:$rT),
- (SPUselb (v4i32 VECREG:$rA), (v4i32 VECREG:$rB),
- (v4i32 VECREG:$rC)))]>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
- (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
- (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
- (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
- (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
- (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
- (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
- (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
- (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
- (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
- (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
- (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
- (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
- (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
- (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
- (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
- (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def SELBr32:
- RRRForm<0b1000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB, R32C:$rC),
- "selb\t$rT, $rA, $rB, $rC", IntegerOp,
- []>;
-
-// And the various patterns that can be matched... (all 8 of them :-)
-def : Pat<(or (and R32C:$rA, R32C:$rC),
- (and R32C:$rB, (not R32C:$rC))),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def : Pat<(or (and R32C:$rC, R32C:$rA),
- (and R32C:$rB, (not R32C:$rC))),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def : Pat<(or (and R32C:$rA, R32C:$rC),
- (and (not R32C:$rC), R32C:$rB)),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def : Pat<(or (and R32C:$rC, R32C:$rA),
- (and (not R32C:$rC), R32C:$rB)),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def : Pat<(or (and R32C:$rA, (not R32C:$rC)),
- (and R32C:$rB, R32C:$rC)),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def : Pat<(or (and R32C:$rA, (not R32C:$rC)),
- (and R32C:$rC, R32C:$rB)),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def : Pat<(or (and (not R32C:$rC), R32C:$rA),
- (and R32C:$rB, R32C:$rC)),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def : Pat<(or (and (not R32C:$rC), R32C:$rA),
- (and R32C:$rC, R32C:$rB)),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def SELBr16:
- RRRForm<0b1000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB, R16C:$rC),
- "selb\t$rT, $rA, $rB, $rC", IntegerOp,
- []>;
-
-def : Pat<(or (and R16C:$rA, R16C:$rC),
- (and R16C:$rB, (not R16C:$rC))),
- (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
-
-def : Pat<(or (and R16C:$rC, R16C:$rA),
- (and R16C:$rB, (not R16C:$rC))),
- (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
-
-def : Pat<(or (and R16C:$rA, R16C:$rC),
- (and (not R16C:$rC), R16C:$rB)),
- (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
-
-def : Pat<(or (and R16C:$rC, R16C:$rA),
- (and (not R16C:$rC), R16C:$rB)),
- (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
+class SELBInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRRForm<0b1000, OOL, IOL, "selb\t$rT, $rA, $rB, $rC",
+ IntegerOp, pattern>;
-def : Pat<(or (and R16C:$rA, (not R16C:$rC)),
- (and R16C:$rB, R16C:$rC)),
- (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
+class SELBVecInst<ValueType vectype>:
+ SELBInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
+ [(set (vectype VECREG:$rT),
+ (or (and (vectype VECREG:$rC), (vectype VECREG:$rB)),
+ (and (vnot (vectype VECREG:$rC)),
+ (vectype VECREG:$rA))))]>;
-def : Pat<(or (and R16C:$rA, (not R16C:$rC)),
- (and R16C:$rC, R16C:$rB)),
- (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
+class SELBRegInst<RegisterClass rclass>:
+ SELBInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB, rclass:$rC),
+ [(set rclass:$rT,
+ (or (and rclass:$rA, rclass:$rC),
+ (and rclass:$rB, (not rclass:$rC))))]>;
-def : Pat<(or (and (not R16C:$rC), R16C:$rA),
- (and R16C:$rB, R16C:$rC)),
- (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
+multiclass SelectBits
+{
+ def v16i8: SELBVecInst<v16i8>;
+ def v8i16: SELBVecInst<v8i16>;
+ def v4i32: SELBVecInst<v4i32>;
+ def v2i64: SELBVecInst<v2i64>;
-def : Pat<(or (and (not R16C:$rC), R16C:$rA),
- (and R16C:$rC, R16C:$rB)),
- (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
-
-def SELBr8:
- RRRForm<0b1000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB, R8C:$rC),
- "selb\t$rT, $rA, $rB, $rC", IntegerOp,
- []>;
+ def r128: SELBRegInst<GPRC>;
+ def r64: SELBRegInst<R64C>;
+ def r32: SELBRegInst<R32C>;
+ def r16: SELBRegInst<R16C>;
+ def r8: SELBRegInst<R8C>;
+}
-def : Pat<(or (and R8C:$rA, R8C:$rC),
- (and R8C:$rB, (not R8C:$rC))),
- (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+defm SELB : SelectBits;
-def : Pat<(or (and R8C:$rC, R8C:$rA),
- (and R8C:$rB, (not R8C:$rC))),
- (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+class SPUselbPat<ValueType vectype, SPUInstr inst>:
+ Pat<(SPUselb (vectype VECREG:$rA), (vectype VECREG:$rB), (vectype VECREG:$rC)),
+ (inst VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-def : Pat<(or (and R8C:$rA, R8C:$rC),
- (and (not R8C:$rC), R8C:$rB)),
- (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+def : SPUselbPat<v16i8, SELBv16i8>;
+def : SPUselbPat<v8i16, SELBv8i16>;
+def : SPUselbPat<v4i32, SELBv4i32>;
+def : SPUselbPat<v2i64, SELBv2i64>;
-def : Pat<(or (and R8C:$rC, R8C:$rA),
- (and (not R8C:$rC), R8C:$rB)),
- (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+class SelectConditional<RegisterClass rclass, SPUInstr inst>:
+ Pat<(select rclass:$rCond, rclass:$rTrue, rclass:$rFalse),
+ (inst rclass:$rFalse, rclass:$rTrue, rclass:$rCond)>;
-def : Pat<(or (and R8C:$rA, (not R8C:$rC)),
- (and R8C:$rB, R8C:$rC)),
- (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+def : SelectConditional<R32C, SELBr32>;
+def : SelectConditional<R16C, SELBr16>;
+def : SelectConditional<R8C, SELBr8>;
-def : Pat<(or (and R8C:$rA, (not R8C:$rC)),
- (and R8C:$rC, R8C:$rB)),
- (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+// EQV: Equivalence (1 for each same bit, otherwise 0)
+//
+// Note: There are a lot of ways to match this bit operator and these patterns
+// attempt to be as exhaustive as possible.
-def : Pat<(or (and (not R8C:$rC), R8C:$rA),
- (and R8C:$rB, R8C:$rC)),
- (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+class EQVInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRForm<0b10010010000, OOL, IOL, "eqv\t$rT, $rA, $rB",
+ IntegerOp, pattern>;
-def : Pat<(or (and (not R8C:$rC), R8C:$rA),
- (and R8C:$rC, R8C:$rB)),
- (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+class EQVVecInst<ValueType vectype>:
+ EQVInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
+ [(set (vectype VECREG:$rT),
+ (or (and (vectype VECREG:$rA), (vectype VECREG:$rB)),
+ (and (vnot (vectype VECREG:$rA)),
+ (vnot (vectype VECREG:$rB)))))]>;
+
+class EQVRegInst<RegisterClass rclass>:
+ EQVInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB),
+ [(set rclass:$rT, (or (and rclass:$rA, rclass:$rB),
+ (and (not rclass:$rA), (not rclass:$rB))))]>;
+
+class EQVVecPattern1<ValueType vectype>:
+ EQVInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
+ [(set (vectype VECREG:$rT),
+ (xor (vectype VECREG:$rA), (vnot (vectype VECREG:$rB))))]>;
+
+class EQVRegPattern1<RegisterClass rclass>:
+ EQVInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB),
+ [(set rclass:$rT, (xor rclass:$rA, (not rclass:$rB)))]>;
+
+class EQVVecPattern2<ValueType vectype>:
+ EQVInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
+ [(set (vectype VECREG:$rT),
+ (or (and (vectype VECREG:$rA), (vectype VECREG:$rB)),
+ (vnot (or (vectype VECREG:$rA), (vectype VECREG:$rB)))))]>;
+
+class EQVRegPattern2<RegisterClass rclass>:
+ EQVInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB),
+ [(set rclass:$rT,
+ (or (and rclass:$rA, rclass:$rB),
+ (not (or rclass:$rA, rclass:$rB))))]>;
+
+class EQVVecPattern3<ValueType vectype>:
+ EQVInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
+ [(set (vectype VECREG:$rT),
+ (not (xor (vectype VECREG:$rA), (vectype VECREG:$rB))))]>;
+
+class EQVRegPattern3<RegisterClass rclass>:
+ EQVInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB),
+ [(set rclass:$rT, (not (xor rclass:$rA, rclass:$rB)))]>;
+
+multiclass BitEquivalence
+{
+ def v16i8: EQVVecInst<v16i8>;
+ def v8i16: EQVVecInst<v8i16>;
+ def v4i32: EQVVecInst<v4i32>;
+ def v2i64: EQVVecInst<v2i64>;
+
+ def v16i8_1: EQVVecPattern1<v16i8>;
+ def v8i16_1: EQVVecPattern1<v8i16>;
+ def v4i32_1: EQVVecPattern1<v4i32>;
+ def v2i64_1: EQVVecPattern1<v2i64>;
+
+ def v16i8_2: EQVVecPattern2<v16i8>;
+ def v8i16_2: EQVVecPattern2<v8i16>;
+ def v4i32_2: EQVVecPattern2<v4i32>;
+ def v2i64_2: EQVVecPattern2<v2i64>;
+
+ def v16i8_3: EQVVecPattern3<v16i8>;
+ def v8i16_3: EQVVecPattern3<v8i16>;
+ def v4i32_3: EQVVecPattern3<v4i32>;
+ def v2i64_3: EQVVecPattern3<v2i64>;
+
+ def r128: EQVRegInst<GPRC>;
+ def r64: EQVRegInst<R64C>;
+ def r32: EQVRegInst<R32C>;
+ def r16: EQVRegInst<R16C>;
+ def r8: EQVRegInst<R8C>;
+
+ def r128_1: EQVRegPattern1<GPRC>;
+ def r64_1: EQVRegPattern1<R64C>;
+ def r32_1: EQVRegPattern1<R32C>;
+ def r16_1: EQVRegPattern1<R16C>;
+ def r8_1: EQVRegPattern1<R8C>;
+
+ def r128_2: EQVRegPattern2<GPRC>;
+ def r64_2: EQVRegPattern2<R64C>;
+ def r32_2: EQVRegPattern2<R32C>;
+ def r16_2: EQVRegPattern2<R16C>;
+ def r8_2: EQVRegPattern2<R8C>;
+
+ def r128_3: EQVRegPattern3<GPRC>;
+ def r64_3: EQVRegPattern3<R64C>;
+ def r32_3: EQVRegPattern3<R32C>;
+ def r16_3: EQVRegPattern3<R16C>;
+ def r8_3: EQVRegPattern3<R8C>;
+}
+
+defm EQV: BitEquivalence;
//===----------------------------------------------------------------------===//
// Vector shuffle...
// It's this pattern that's probably the most useful, since SPUISelLowering
// methods create a v16i8 vector for $rC:
-class SHUFBVecPat1<ValueType vectype, SPUInstr inst>:
+class SHUFBVecPat1<ValueType vectype, ValueType masktype, SPUInstr inst>:
Pat<(SPUshuffle (vectype VECREG:$rA), (vectype VECREG:$rB),
- (v16i8 VECREG:$rC)),
+ (masktype VECREG:$rC)),
(inst VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
multiclass ShuffleBytes
defm SHUFB : ShuffleBytes;
-def : SHUFBVecPat1<v8i16, SHUFBv16i8>;
-def : SHUFBVecPat1<v4i32, SHUFBv16i8>;
-def : SHUFBVecPat1<v2i64, SHUFBv16i8>;
-def : SHUFBVecPat1<v4f32, SHUFBv16i8>;
-def : SHUFBVecPat1<v2f64, SHUFBv16i8>;
+// Shuffle mask is a v16i8 vector
+def : SHUFBVecPat1<v8i16, v16i8, SHUFBv16i8>;
+def : SHUFBVecPat1<v4i32, v16i8, SHUFBv16i8>;
+def : SHUFBVecPat1<v2i64, v16i8, SHUFBv16i8>;
+def : SHUFBVecPat1<v4f32, v16i8, SHUFBv16i8>;
+def : SHUFBVecPat1<v2f64, v16i8, SHUFBv16i8>;
+
+// Shuffle mask is a v4i32 vector:
+def : SHUFBVecPat1<v8i16, v4i32, SHUFBv4i32>;
+def : SHUFBVecPat1<v4i32, v4i32, SHUFBv4i32>;
+def : SHUFBVecPat1<v2i64, v4i32, SHUFBv4i32>;
+def : SHUFBVecPat1<v4f32, v4i32, SHUFBv4i32>;
+def : SHUFBVecPat1<v2f64, v4i32, SHUFBv4i32>;
//===----------------------------------------------------------------------===//
// Shift and rotate group:
(ROTQBYIv2i64 VECREG:$rA, uimm7:$val)>;
// See ROTQBY note above.
-def ROTQBYBIvec:
- RI7Form<0b00110011100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
- "rotqbybi\t$rT, $rA, $val", RotateShift,
- [/* intrinsic */]>;
+class ROTQBYBIInst<dag OOL, dag IOL, list<dag> pattern>:
+ RI7Form<0b00110011100, OOL, IOL,
+ "rotqbybi\t$rT, $rA, $shift",
+ RotateShift, pattern>;
+
+class ROTQBYBIVecInst<ValueType vectype, RegisterClass rclass>:
+ ROTQBYBIInst<(outs VECREG:$rT), (ins VECREG:$rA, rclass:$shift),
+ [(set (vectype VECREG:$rT),
+ (SPUrotbytes_left_bits (vectype VECREG:$rA), rclass:$shift))]>;
+
+multiclass RotateQuadByBytesByBitshift {
+ def v16i8_r32: ROTQBYBIVecInst<v16i8, R32C>;
+ def v8i16_r32: ROTQBYBIVecInst<v8i16, R32C>;
+ def v4i32_r32: ROTQBYBIVecInst<v4i32, R32C>;
+ def v2i64_r32: ROTQBYBIVecInst<v2i64, R32C>;
+}
+
+defm ROTQBYBI : RotateQuadByBytesByBitshift;
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
// See ROTQBY note above.
defm ROTQMBYI : RotateQuadBytesImm;
-
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
// Rotate right and mask by bit count
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
(ROTMAr32 R32C:$rA,
(SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
-def ROTMAIv4i32:
- RRForm<0b01011110000, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
- "rotmai\t$rT, $rA, $val", RotateShift,
- [(set (v4i32 VECREG:$rT),
- (SPUvec_sra VECREG:$rA, (i32 uimm7:$val)))]>;
+class ROTMAIInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRForm<0b01011110000, OOL, IOL,
+ "rotmai\t$rT, $rA, $val",
+ RotateShift, pattern>;
-def : Pat<(SPUvec_sra VECREG:$rA, (i16 uimm7:$val)),
- (ROTMAIv4i32 VECREG:$rA, uimm7:$val)>;
+class ROTMAIVecInst<ValueType vectype, Operand intop, ValueType inttype>:
+ ROTMAIInst<(outs VECREG:$rT), (ins VECREG:$rA, intop:$val),
+ [(set (vectype VECREG:$rT),
+ (SPUvec_sra VECREG:$rA, (inttype uimm7:$val)))]>;
-def ROTMAIr32:
- RRForm<0b01011110000, (outs R32C:$rT), (ins R32C:$rA, rotNeg7imm:$val),
- "rotmai\t$rT, $rA, $val", RotateShift,
- [(set R32C:$rT, (sra R32C:$rA, (i32 uimm7:$val)))]>;
+class ROTMAIRegInst<RegisterClass rclass, Operand intop, ValueType inttype>:
+ ROTMAIInst<(outs rclass:$rT), (ins rclass:$rA, intop:$val),
+ [(set rclass:$rT, (sra rclass:$rA, (inttype uimm7:$val)))]>;
-def : Pat<(sra R32C:$rA, (i16 uimm7:$val)),
- (ROTMAIr32 R32C:$rA, uimm7:$val)>;
+multiclass RotateMaskAlgebraicImm {
+ def v2i64_i32 : ROTMAIVecInst<v2i64, rotNeg7imm, i32>;
+ def v4i32_i32 : ROTMAIVecInst<v4i32, rotNeg7imm, i32>;
+ def r64_i32 : ROTMAIRegInst<R64C, rotNeg7imm, i32>;
+ def r32_i32 : ROTMAIRegInst<R32C, rotNeg7imm, i32>;
+}
-def : Pat<(sra R32C:$rA, (i8 uimm7:$val)),
- (ROTMAIr32 R32C:$rA, uimm7:$val)>;
+defm ROTMAI : RotateMaskAlgebraicImm;
//===----------------------------------------------------------------------===//
// Branch and conditionals:
v16i8SExt8Imm:$val))]>;
def r8:
CGTBIInst<(outs R8C:$rT), (ins R8C:$rA, s10imm_i8:$val),
- [(set R8C:$rT, (setgt R8C:$rA, immSExt8:$val))]>;
+ [(set R8C:$rT, (setgt R8C:$rA, immSExt8:$val))]>;
}
class CGTHInst<dag OOL, dag IOL, list<dag> pattern> :
}
class CLGTBInst<dag OOL, dag IOL, list<dag> pattern> :
- RRForm<0b00001011010, OOL, IOL, "cgtb\t$rT, $rA, $rB",
+ RRForm<0b00001011010, OOL, IOL, "clgtb\t$rT, $rA, $rB",
ByteOp, pattern>;
multiclass CmpLGtrByte
}
class CLGTBIInst<dag OOL, dag IOL, list<dag> pattern> :
- RI10Form<0b01111010, OOL, IOL, "cgtbi\t$rT, $rA, $val",
+ RI10Form<0b01111010, OOL, IOL, "clgtbi\t$rT, $rA, $val",
ByteOp, pattern>;
multiclass CmpLGtrByteImm
}
class CLGTHInst<dag OOL, dag IOL, list<dag> pattern> :
- RRForm<0b00010011010, OOL, IOL, "cgth\t$rT, $rA, $rB",
+ RRForm<0b00010011010, OOL, IOL, "clgth\t$rT, $rA, $rB",
ByteOp, pattern>;
multiclass CmpLGtrHalfword
}
class CLGTHIInst<dag OOL, dag IOL, list<dag> pattern> :
- RI10Form<0b10111010, OOL, IOL, "cgthi\t$rT, $rA, $val",
+ RI10Form<0b10111010, OOL, IOL, "clgthi\t$rT, $rA, $val",
ByteOp, pattern>;
multiclass CmpLGtrHalfwordImm
}
class CLGTInst<dag OOL, dag IOL, list<dag> pattern> :
- RRForm<0b00000011010, OOL, IOL, "cgt\t$rT, $rA, $rB",
+ RRForm<0b00000011010, OOL, IOL, "clgt\t$rT, $rA, $rB",
ByteOp, pattern>;
multiclass CmpLGtrWord
}
class CLGTIInst<dag OOL, dag IOL, list<dag> pattern> :
- RI10Form<0b00111010, OOL, IOL, "cgti\t$rT, $rA, $val",
+ RI10Form<0b00111010, OOL, IOL, "clgti\t$rT, $rA, $val",
ByteOp, pattern>;
multiclass CmpLGtrWordImm
(v4i32 v4i32SExt16Imm:$val)))]>;
def r32: CLGTIInst<(outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val),
- [(set R32C:$rT, (setugt R32C:$rA, i32ImmSExt10:$val))]>;
+ [(set R32C:$rT, (setugt R32C:$rA, i32ImmSExt10:$val))]>;
}
defm CEQB : CmpEqualByte;
defm CLGT : CmpLGtrWord;
defm CLGTI : CmpLGtrWordImm;
+//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
// For SETCC primitives not supported above (setlt, setle, setge, etc.)
// define a pattern to generate the right code, as a binary operator
// (in a manner of speaking.)
+//
+// N.B.: This only matches the setcc set of conditionals. Special pattern
+// matching is used for select conditionals.
+//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
+
+class SETCCNegCondReg<PatFrag cond, RegisterClass rclass, ValueType inttype,
+ SPUInstr xorinst, SPUInstr cmpare>:
+ Pat<(cond rclass:$rA, rclass:$rB),
+ (xorinst (cmpare rclass:$rA, rclass:$rB), (inttype -1))>;
+
+class SETCCNegCondImm<PatFrag cond, RegisterClass rclass, ValueType inttype,
+ PatLeaf immpred, SPUInstr xorinst, SPUInstr cmpare>:
+ Pat<(cond rclass:$rA, (inttype immpred:$imm)),
+ (xorinst (cmpare rclass:$rA, (inttype immpred:$imm)), (inttype -1))>;
+
+def : SETCCNegCondReg<setne, R8C, i8, XORBIr8, CEQBr8>;
+def : SETCCNegCondImm<setne, R8C, i8, immSExt8, XORBIr8, CEQBIr8>;
-class SETCCNegCond<PatFrag cond, RegisterClass rclass, dag pattern>:
- Pat<(cond rclass:$rA, rclass:$rB), pattern>;
+def : SETCCNegCondReg<setne, R16C, i16, XORHIr16, CEQHr16>;
+def : SETCCNegCondImm<setne, R16C, i16, i16ImmSExt10, XORHIr16, CEQHIr16>;
+
+def : SETCCNegCondReg<setne, R32C, i32, XORIr32, CEQr32>;
+def : SETCCNegCondImm<setne, R32C, i32, i32ImmSExt10, XORIr32, CEQIr32>;
class SETCCBinOpReg<PatFrag cond, RegisterClass rclass,
SPUInstr binop, SPUInstr cmpOp1, SPUInstr cmpOp2>:
(binop (cmpOp1 rclass:$rA, (immtype immpred:$imm)),
(cmpOp2 rclass:$rA, (immtype immpred:$imm)))>;
-def CGTEQBr8: SETCCBinOpReg<setge, R8C, ORr8, CGTBr8, CEQBr8>;
-def CGTEQBIr8: SETCCBinOpImm<setge, R8C, immSExt8, i8, ORr8, CGTBIr8, CEQBIr8>;
-def CLTBr8: SETCCBinOpReg<setlt, R8C, NORr8, CGTBr8, CEQBr8>;
-def CLTBIr8: SETCCBinOpImm<setlt, R8C, immSExt8, i8, NORr8, CGTBIr8, CEQBIr8>;
-def CLTEQr8: Pat<(setle R8C:$rA, R8C:$rB),
- (XORBIr8 (CGTBIr8 R8C:$rA, R8C:$rB), 0xff)>;
-def CLTEQIr8: Pat<(setle R8C:$rA, immU8:$imm),
- (XORBIr8 (CGTBIr8 R8C:$rA, immU8:$imm), 0xff)>;
-
-def CGTEQHr16: SETCCBinOpReg<setge, R16C, ORr16, CGTHr16, CEQHr16>;
-def CGTEQHIr16: SETCCBinOpImm<setge, R16C, i16ImmUns10, i16,
- ORr16, CGTHIr16, CEQHIr16>;
-def CLTEQr16: Pat<(setle R16C:$rA, R16C:$rB),
- (XORHIr16 (CGTHIr16 R16C:$rA, R16C:$rB), 0xffff)>;
-def CLTEQIr16: Pat<(setle R16C:$rA, i16ImmUns10:$imm),
- (XORHIr16 (CGTHIr16 R16C:$rA, i16ImmSExt10:$imm), 0xffff)>;
-
-
-def CGTEQHr32: SETCCBinOpReg<setge, R32C, ORr32, CGTr32, CEQr32>;
-def CGTEQHIr32: SETCCBinOpImm<setge, R32C, i32ImmUns10, i32,
- ORr32, CGTIr32, CEQIr32>;
-def CLTEQr32: Pat<(setle R32C:$rA, R32C:$rB),
- (XORIr32 (CGTIr32 R32C:$rA, R32C:$rB), 0xffffffff)>;
-def CLTEQIr32: Pat<(setle R32C:$rA, i32ImmUns10:$imm),
- (XORIr32 (CGTIr32 R32C:$rA, i32ImmSExt10:$imm), 0xffffffff)>;
-
-def CLGTEQBr8: SETCCBinOpReg<setuge, R8C, ORr8, CLGTBr8, CEQBr8>;
-def CLGTEQBIr8: SETCCBinOpImm<setuge, R8C, immSExt8, i8, ORr8, CLGTBIr8, CEQBIr8>;
-def CLLTBr8: SETCCBinOpReg<setult, R8C, NORr8, CLGTBr8, CEQBr8>;
-def CLLTBIr8: SETCCBinOpImm<setult, R8C, immSExt8, i8, NORr8, CLGTBIr8, CEQBIr8>;
-def CLLTEQr8: Pat<(setule R8C:$rA, R8C:$rB),
- (XORBIr8 (CLGTBIr8 R8C:$rA, R8C:$rB), 0xff)>;
-def CLLTEQIr8: Pat<(setule R8C:$rA, immU8:$imm),
- (XORBIr8 (CLGTBIr8 R8C:$rA, immU8:$imm), 0xff)>;
-
-def CLGTEQHr16: SETCCBinOpReg<setuge, R16C, ORr16, CLGTHr16, CEQHr16>;
-def CLGTEQHIr16: SETCCBinOpImm<setuge, R16C, i16ImmUns10, i16,
- ORr16, CLGTHIr16, CEQHIr16>;
-def CLLTEQr16: Pat<(setule R16C:$rA, R16C:$rB),
- (XORHIr16 (CLGTHIr16 R16C:$rA, R16C:$rB), 0xffff)>;
-def CLLTEQIr16: Pat<(setule R16C:$rA, i16ImmUns10:$imm),
- (XORHIr16 (CLGTHIr16 R16C:$rA, i16ImmSExt10:$imm), 0xffff)>;
-
-
-def CLGTEQHr32: SETCCBinOpReg<setuge, R32C, ORr32, CLGTr32, CEQr32>;
-def CLGTEQHIr32: SETCCBinOpImm<setuge, R32C, i32ImmUns10, i32,
- ORr32, CLGTIr32, CEQIr32>;
-def CLLTEQr32: Pat<(setule R32C:$rA, R32C:$rB),
- (XORIr32 (CLGTIr32 R32C:$rA, R32C:$rB), 0xffffffff)>;
-def CLLTEQIr32: Pat<(setule R32C:$rA, i32ImmUns10:$imm),
- (XORIr32 (CLGTIr32 R32C:$rA, i32ImmSExt10:$imm), 0xffffffff)>;
+def : SETCCBinOpReg<setge, R8C, ORr8, CGTBr8, CEQBr8>;
+def : SETCCBinOpImm<setge, R8C, immSExt8, i8, ORr8, CGTBIr8, CEQBIr8>;
+def : SETCCBinOpReg<setlt, R8C, NORr8, CGTBr8, CEQBr8>;
+def : SETCCBinOpImm<setlt, R8C, immSExt8, i8, NORr8, CGTBIr8, CEQBIr8>;
+def : Pat<(setle R8C:$rA, R8C:$rB),
+ (XORBIr8 (CGTBr8 R8C:$rA, R8C:$rB), 0xff)>;
+def : Pat<(setle R8C:$rA, immU8:$imm),
+ (XORBIr8 (CGTBIr8 R8C:$rA, immU8:$imm), 0xff)>;
+
+def : SETCCBinOpReg<setge, R16C, ORr16, CGTHr16, CEQHr16>;
+def : SETCCBinOpImm<setge, R16C, i16ImmSExt10, i16,
+ ORr16, CGTHIr16, CEQHIr16>;
+def : SETCCBinOpReg<setlt, R16C, NORr16, CGTHr16, CEQHr16>;
+def : SETCCBinOpImm<setlt, R16C, i16ImmSExt10, i16, NORr16, CGTHIr16, CEQHIr16>;
+def : Pat<(setle R16C:$rA, R16C:$rB),
+ (XORHIr16 (CGTHr16 R16C:$rA, R16C:$rB), 0xffff)>;
+def : Pat<(setle R16C:$rA, i16ImmSExt10:$imm),
+ (XORHIr16 (CGTHIr16 R16C:$rA, i16ImmSExt10:$imm), 0xffff)>;
+
+def : SETCCBinOpReg<setge, R32C, ORr32, CGTr32, CEQr32>;
+def : SETCCBinOpImm<setge, R32C, i32ImmSExt10, i32,
+ ORr32, CGTIr32, CEQIr32>;
+def : SETCCBinOpReg<setlt, R32C, NORr32, CGTr32, CEQr32>;
+def : SETCCBinOpImm<setlt, R32C, i32ImmSExt10, i32, NORr32, CGTIr32, CEQIr32>;
+def : Pat<(setle R32C:$rA, R32C:$rB),
+ (XORIr32 (CGTr32 R32C:$rA, R32C:$rB), 0xffffffff)>;
+def : Pat<(setle R32C:$rA, i32ImmSExt10:$imm),
+ (XORIr32 (CGTIr32 R32C:$rA, i32ImmSExt10:$imm), 0xffffffff)>;
+
+def : SETCCBinOpReg<setuge, R8C, ORr8, CLGTBr8, CEQBr8>;
+def : SETCCBinOpImm<setuge, R8C, immSExt8, i8, ORr8, CLGTBIr8, CEQBIr8>;
+def : SETCCBinOpReg<setult, R8C, NORr8, CLGTBr8, CEQBr8>;
+def : SETCCBinOpImm<setult, R8C, immSExt8, i8, NORr8, CLGTBIr8, CEQBIr8>;
+def : Pat<(setule R8C:$rA, R8C:$rB),
+ (XORBIr8 (CLGTBr8 R8C:$rA, R8C:$rB), 0xff)>;
+def : Pat<(setule R8C:$rA, immU8:$imm),
+ (XORBIr8 (CLGTBIr8 R8C:$rA, immU8:$imm), 0xff)>;
+
+def : SETCCBinOpReg<setuge, R16C, ORr16, CLGTHr16, CEQHr16>;
+def : SETCCBinOpImm<setuge, R16C, i16ImmSExt10, i16,
+ ORr16, CLGTHIr16, CEQHIr16>;
+def : SETCCBinOpReg<setult, R16C, NORr16, CLGTHr16, CEQHr16>;
+def : SETCCBinOpImm<setult, R16C, i16ImmSExt10, i16, NORr16,
+ CLGTHIr16, CEQHIr16>;
+def : Pat<(setule R16C:$rA, R16C:$rB),
+ (XORHIr16 (CLGTHr16 R16C:$rA, R16C:$rB), 0xffff)>;
+def : Pat<(setule R16C:$rA, i16ImmSExt10:$imm),
+ (XORHIr16 (CLGTHIr16 R16C:$rA, i16ImmSExt10:$imm), 0xffff)>;
+
+def : SETCCBinOpReg<setuge, R32C, ORr32, CLGTr32, CEQr32>;
+def : SETCCBinOpImm<setuge, R32C, i32ImmSExt10, i32,
+ ORr32, CLGTIr32, CEQIr32>;
+def : SETCCBinOpReg<setult, R32C, NORr32, CLGTr32, CEQr32>;
+def : SETCCBinOpImm<setult, R32C, i32ImmSExt10, i32, NORr32, CLGTIr32, CEQIr32>;
+def : Pat<(setule R32C:$rA, R32C:$rB),
+ (XORIr32 (CLGTr32 R32C:$rA, R32C:$rB), 0xffffffff)>;
+def : Pat<(setule R32C:$rA, i32ImmSExt10:$imm),
+ (XORIr32 (CLGTIr32 R32C:$rA, i32ImmSExt10:$imm), 0xffffffff)>;
+
+//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
+// select conditional patterns:
+//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
+
+class SELECTNegCondReg<PatFrag cond, RegisterClass rclass, ValueType inttype,
+ SPUInstr selinstr, SPUInstr cmpare>:
+ Pat<(select (inttype (cond rclass:$rA, rclass:$rB)),
+ rclass:$rTrue, rclass:$rFalse),
+ (selinstr rclass:$rTrue, rclass:$rFalse,
+ (cmpare rclass:$rA, rclass:$rB))>;
+
+class SELECTNegCondImm<PatFrag cond, RegisterClass rclass, ValueType inttype,
+ PatLeaf immpred, SPUInstr selinstr, SPUInstr cmpare>:
+ Pat<(select (inttype (cond rclass:$rA, immpred:$imm)),
+ rclass:$rTrue, rclass:$rFalse),
+ (selinstr rclass:$rTrue, rclass:$rFalse,
+ (cmpare rclass:$rA, immpred:$imm))>;
+
+def : SELECTNegCondReg<setne, R8C, i8, SELBr8, CEQBr8>;
+def : SELECTNegCondImm<setne, R8C, i8, immSExt8, SELBr8, CEQBIr8>;
+def : SELECTNegCondReg<setle, R8C, i8, SELBr8, CGTBr8>;
+def : SELECTNegCondImm<setle, R8C, i8, immSExt8, SELBr8, CGTBr8>;
+def : SELECTNegCondReg<setule, R8C, i8, SELBr8, CLGTBr8>;
+def : SELECTNegCondImm<setule, R8C, i8, immU8, SELBr8, CLGTBIr8>;
+
+def : SELECTNegCondReg<setne, R16C, i16, SELBr16, CEQHr16>;
+def : SELECTNegCondImm<setne, R16C, i16, i16ImmSExt10, SELBr16, CEQHIr16>;
+def : SELECTNegCondReg<setle, R16C, i16, SELBr16, CGTHr16>;
+def : SELECTNegCondImm<setle, R16C, i16, i16ImmSExt10, SELBr16, CGTHIr16>;
+def : SELECTNegCondReg<setule, R16C, i16, SELBr16, CLGTHr16>;
+def : SELECTNegCondImm<setule, R16C, i16, i16ImmSExt10, SELBr16, CLGTHIr16>;
+
+def : SELECTNegCondReg<setne, R32C, i32, SELBr32, CEQr32>;
+def : SELECTNegCondImm<setne, R32C, i32, i32ImmSExt10, SELBr32, CEQIr32>;
+def : SELECTNegCondReg<setle, R32C, i32, SELBr32, CGTr32>;
+def : SELECTNegCondImm<setle, R32C, i32, i32ImmSExt10, SELBr32, CGTIr32>;
+def : SELECTNegCondReg<setule, R32C, i32, SELBr32, CLGTr32>;
+def : SELECTNegCondImm<setule, R32C, i32, i32ImmSExt10, SELBr32, CLGTIr32>;
+
+class SELECTBinOpReg<PatFrag cond, RegisterClass rclass, ValueType inttype,
+ SPUInstr selinstr, SPUInstr binop, SPUInstr cmpOp1,
+ SPUInstr cmpOp2>:
+ Pat<(select (inttype (cond rclass:$rA, rclass:$rB)),
+ rclass:$rFalse, rclass:$rTrue),
+ (selinstr rclass:$rTrue, rclass:$rFalse,
+ (binop (cmpOp1 rclass:$rA, rclass:$rB),
+ (cmpOp2 rclass:$rA, rclass:$rB)))>;
+
+class SELECTBinOpImm<PatFrag cond, RegisterClass rclass, PatLeaf immpred,
+ ValueType inttype,
+ SPUInstr selinstr, SPUInstr binop, SPUInstr cmpOp1,
+ SPUInstr cmpOp2>:
+ Pat<(select (inttype (cond rclass:$rA, (inttype immpred:$imm))),
+ rclass:$rTrue, rclass:$rFalse),
+ (selinstr rclass:$rFalse, rclass:$rTrue,
+ (binop (cmpOp1 rclass:$rA, (inttype immpred:$imm)),
+ (cmpOp2 rclass:$rA, (inttype immpred:$imm))))>;
+
+def : SELECTBinOpReg<setge, R8C, i8, SELBr8, ORr8, CGTBr8, CEQBr8>;
+def : SELECTBinOpImm<setge, R8C, immSExt8, i8,
+ SELBr8, ORr8, CGTBIr8, CEQBIr8>;
+
+def : SELECTBinOpReg<setge, R16C, i16, SELBr16, ORr16, CGTHr16, CEQHr16>;
+def : SELECTBinOpImm<setge, R16C, i16ImmSExt10, i16,
+ SELBr16, ORr16, CGTHIr16, CEQHIr16>;
+
+def : SELECTBinOpReg<setge, R32C, i32, SELBr32, ORr32, CGTr32, CEQr32>;
+def : SELECTBinOpImm<setge, R32C, i32ImmSExt10, i32,
+ SELBr32, ORr32, CGTIr32, CEQIr32>;
+
+def : SELECTBinOpReg<setuge, R8C, i8, SELBr8, ORr8, CLGTBr8, CEQBr8>;
+def : SELECTBinOpImm<setuge, R8C, immSExt8, i8,
+ SELBr8, ORr8, CLGTBIr8, CEQBIr8>;
+
+def : SELECTBinOpReg<setuge, R16C, i16, SELBr16, ORr16, CLGTHr16, CEQHr16>;
+def : SELECTBinOpImm<setuge, R16C, i16ImmUns10, i16,
+ SELBr16, ORr16, CLGTHIr16, CEQHIr16>;
+
+def : SELECTBinOpReg<setuge, R32C, i32, SELBr32, ORr32, CLGTr32, CEQr32>;
+def : SELECTBinOpImm<setuge, R32C, i32ImmUns10, i32,
+ SELBr32, ORr32, CLGTIr32, CEQIr32>;
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
// General constant 32-bit vectors
def : Pat<(v4i32 v4i32Imm:$imm),
- (IOHLvec (v4i32 (ILHUv4i32 (HI16_vec v4i32Imm:$imm))),
- (LO16_vec v4i32Imm:$imm))>;
+ (IOHLv4i32 (v4i32 (ILHUv4i32 (HI16_vec v4i32Imm:$imm))),
+ (LO16_vec v4i32Imm:$imm))>;
// 8-bit constants
def : Pat<(i8 imm:$imm),
(SPUlo tconstpool:$in, 0)),
(IOHLlo (ILHUhi tconstpool:$in), tconstpool:$in)>;
+def : Pat<(SPUindirect R32C:$sp, i32ImmSExt10:$imm),
+ (AIr32 R32C:$sp, i32ImmSExt10:$imm)>;
+
+def : Pat<(SPUindirect R32C:$sp, imm:$imm),
+ (Ar32 R32C:$sp,
+ (IOHLr32 (ILHUr32 (HI16 imm:$imm)), (LO16 imm:$imm)))>;
+
def : Pat<(add (SPUhi tglobaladdr:$in, 0), (SPUlo tglobaladdr:$in, 0)),
(IOHLlo (ILHUhi tglobaladdr:$in), tglobaladdr:$in)>;