Vector = 13, // This is an abstract vector type, which will
// be expanded into a target vector type, or scalars
// if no matching vector type is available.
- v16i8 = 14, // 16 x i8
- v8i16 = 15, // 8 x i16
- v4i32 = 16, // 4 x i32
- v2i64 = 17, // 2 x i64
+ v8i8 = 14, // 8 x i8
+ v4i16 = 15, // 4 x i16
+ v2i32 = 16, // 2 x i32
+ v16i8 = 17, // 16 x i8
+ v8i16 = 18, // 8 x i16
+ v4i32 = 19, // 4 x i32
+ v2i64 = 20, // 2 x i64
- v4f32 = 18, // 4 x f32
- v2f64 = 19, // 2 x f64
+ v4f32 = 21, // 4 x f32
+ v2f64 = 22, // 2 x f64
LAST_VALUETYPE, // This always remains at the end of the list.
};
case MVT::f32 :
case MVT::i32 : return 32;
case MVT::f64 :
- case MVT::i64 : return 64;
+ case MVT::i64 :
+ case MVT::v8i8:
+ case MVT::v4i16:
+ case MVT::v2i32:return 64;
case MVT::f80 : return 80;
case MVT::f128:
case MVT::i128:
def FlagVT : ValueType<0 , 11>; // Condition code or machine flag
def isVoid : ValueType<0 , 12>; // Produces no value
def Vector : ValueType<0 , 13>; // Abstract vector value
-def v16i8 : ValueType<128, 14>; // 16 x i8 vector value
-def v8i16 : ValueType<128, 15>; // 8 x i16 vector value
-def v4i32 : ValueType<128, 16>; // 4 x i32 vector value
-def v2i64 : ValueType<128, 17>; // 2 x i64 vector value
-def v4f32 : ValueType<128, 18>; // 4 x f32 vector value
-def v2f64 : ValueType<128, 19>; // 2 x f64 vector value
+def v8i8 : ValueType<64 , 14>; // 8 x i8 vector value
+def v4i16 : ValueType<64 , 15>; // 4 x i16 vector value
+def v2i32 : ValueType<64 , 16>; // 2 x i32 vector value
+def v16i8 : ValueType<128, 17>; // 16 x i8 vector value
+def v8i16 : ValueType<128, 18>; // 8 x i16 vector value
+def v4i32 : ValueType<128, 19>; // 4 x i32 vector value
+def v2i64 : ValueType<128, 20>; // 2 x i64 vector value
+def v4f32 : ValueType<128, 21>; // 4 x f32 vector value
+def v2f64 : ValueType<128, 22>; // 2 x f64 vector value
//===----------------------------------------------------------------------===//
// Register file description - These classes are used to fill in the target
// XMM Packed Floating point support (requires SSE / SSE2)
//===----------------------------------------------------------------------===//
-def MOVAPSrr : I<0x28, MRMSrcReg, (ops V4F4:$dst, V4F4:$src),
+def MOVAPSrr : I<0x28, MRMSrcReg, (ops V4F32:$dst, V4F32:$src),
"movaps {$src, $dst|$dst, $src}", []>,
Requires<[HasSSE1]>, TB;
-def MOVAPDrr : I<0x28, MRMSrcReg, (ops V2F8:$dst, V2F8:$src),
+def MOVAPDrr : I<0x28, MRMSrcReg, (ops V2F64:$dst, V2F64:$src),
"movapd {$src, $dst|$dst, $src}", []>,
Requires<[HasSSE2]>, TB, OpSize;
-def MOVAPSrm : I<0x28, MRMSrcMem, (ops V4F4:$dst, f128mem:$src),
+def MOVAPSrm : I<0x28, MRMSrcMem, (ops V4F32:$dst, f128mem:$src),
"movaps {$src, $dst|$dst, $src}", []>,
Requires<[HasSSE1]>, TB;
-def MOVAPSmr : I<0x29, MRMDestMem, (ops f128mem:$dst, V4F4:$src),
+def MOVAPSmr : I<0x29, MRMDestMem, (ops f128mem:$dst, V4F32:$src),
"movaps {$src, $dst|$dst, $src}",[]>,
Requires<[HasSSE1]>, TB;
-def MOVAPDrm : I<0x28, MRMSrcMem, (ops V2F8:$dst, f128mem:$src),
+def MOVAPDrm : I<0x28, MRMSrcMem, (ops V2F64:$dst, f128mem:$src),
"movapd {$src, $dst|$dst, $src}", []>,
Requires<[HasSSE1]>, TB, OpSize;
-def MOVAPDmr : I<0x29, MRMDestMem, (ops f128mem:$dst, V2F8:$src),
+def MOVAPDmr : I<0x29, MRMDestMem, (ops f128mem:$dst, V2F64:$src),
"movapd {$src, $dst|$dst, $src}",[]>,
Requires<[HasSSE2]>, TB, OpSize;
// Alias instructions to do FR32 / FR64 reg-to-reg copy using movaps / movapd.
// Upper bits are disregarded.
-def FsMOVAPSrr : I<0x28, MRMSrcReg, (ops V4F4:$dst, V4F4:$src),
+def FsMOVAPSrr : I<0x28, MRMSrcReg, (ops V4F32:$dst, V4F32:$src),
"movaps {$src, $dst|$dst, $src}", []>,
Requires<[HasSSE1]>, TB;
-def FsMOVAPDrr : I<0x28, MRMSrcReg, (ops V2F8:$dst, V2F8:$src),
+def FsMOVAPDrr : I<0x28, MRMSrcReg, (ops V2F64:$dst, V2F64:$src),
"movapd {$src, $dst|$dst, $src}", []>,
Requires<[HasSSE2]>, TB, OpSize;
Opc = X86::MOVSSmr;
} else if (RC == &X86::FR64RegClass) {
Opc = X86::MOVSDmr;
- } else if (RC == &X86::V4F4RegClass) {
+ } else if (RC == &X86::V4F32RegClass) {
Opc = X86::MOVAPSmr;
- } else if (RC == &X86::V2F8RegClass) {
+ } else if (RC == &X86::V2F64RegClass) {
Opc = X86::MOVAPDmr;
} else {
assert(0 && "Unknown regclass");
Opc = X86::MOVSSrm;
} else if (RC == &X86::FR64RegClass) {
Opc = X86::MOVSDrm;
- } else if (RC == &X86::V4F4RegClass) {
+ } else if (RC == &X86::V4F32RegClass) {
Opc = X86::MOVAPSrm;
- } else if (RC == &X86::V2F8RegClass) {
+ } else if (RC == &X86::V2F64RegClass) {
Opc = X86::MOVAPDrm;
} else {
assert(0 && "Unknown regclass");
Opc = X86::MOV16rr;
} else if (RC == &X86::RFPRegClass || RC == &X86::RSTRegClass) {
Opc = X86::FpMOV;
- } else if (RC == &X86::FR32RegClass || RC == &X86::V4F4RegClass) {
+ } else if (RC == &X86::FR32RegClass || RC == &X86::V4F32RegClass) {
Opc = X86::FsMOVAPSrr;
- } else if (RC == &X86::FR64RegClass || RC == &X86::V2F8RegClass) {
+ } else if (RC == &X86::FR64RegClass || RC == &X86::V2F64RegClass) {
Opc = X86::FsMOVAPDrr;
} else {
assert(0 && "Unknown regclass");
def DL : RegisterGroup<"DL", [DX,EDX]>; def BL : RegisterGroup<"BL",[BX,EBX]>;
def AH : RegisterGroup<"AH", [AX,EAX]>; def CH : RegisterGroup<"CH",[CX,ECX]>;
def DH : RegisterGroup<"DH", [DX,EDX]>; def BH : RegisterGroup<"BH",[BX,EBX]>;
+
+ // MMX Registers. These are actually aliased to ST0 .. ST7
+ def MM0 : Register<"MM0">; def MM1 : Register<"MM1">;
+ def MM2 : Register<"MM2">; def MM3 : Register<"MM3">;
+ def MM4 : Register<"MM4">; def MM5 : Register<"MM5">;
+ def MM6 : Register<"MM6">; def MM7 : Register<"MM7">;
// Pseudo Floating Point registers
def FP0 : Register<"FP0">; def FP1 : Register<"FP1">;
[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>;
def FR64 : RegisterClass<"X86", [f64], 64,
[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>;
-// Vector floating point registers: V4F4, the 4 x f32 class, and V2F8,
-// the 2 x f64 class.
-def V4F4 : RegisterClass<"X86", [f32], 32,
- [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>;
-def V2F8 : RegisterClass<"X86", [f64], 64,
- [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>;
// FIXME: This sets up the floating point register files as though they are f64
// values, though they really are f80 values. This will cause us to spill
}
}];
}
+
+// Vector integer registers: V8I8, the 8 x i8 class, V4I16, the 4 x i16 class,
+// and V2I32, the 2 x i32 class.
+def V8I8 : RegisterClass<"X86", [v8i8], 64, [MM0, MM1, MM2, MM3, MM4, MM5,
+ MM6, MM7]>;
+def V4I16 : RegisterClass<"X86", [v4i16], 64, [MM0, MM1, MM2, MM3, MM4, MM5,
+ MM6, MM7]>;
+def V2I32 : RegisterClass<"X86", [v2i32], 64, [MM0, MM1, MM2, MM3, MM4, MM5,
+ MM6, MM7]>;
+
+// Vector floating point registers: V4F4, the 4 x f32 class, and V2F8,
+// the 2 x f64 class.
+def V4F32 : RegisterClass<"X86", [v4f32], 128,
+ [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>;
+def V2F64 : RegisterClass<"X86", [v2f64], 128,
+ [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>;
+
case MVT::Other: return "ch";
case MVT::Flag: return "flag";
case MVT::Vector:return "vec";
+ case MVT::v8i8: return "v8i8";
+ case MVT::v4i16: return "v4i16";
+ case MVT::v2i32: return "v2i32";
case MVT::v16i8: return "v16i8";
case MVT::v8i16: return "v8i16";
case MVT::v4i32: return "v4i32";
case MVT::f128: return "f128";
case MVT::Flag: return "Flag";
case MVT::isVoid:return "void";
+ case MVT::v8i8: return "v8i8";
+ case MVT::v4i16: return "v4i16";
+ case MVT::v2i32: return "v2i32";
case MVT::v16i8: return "v16i8";
case MVT::v8i16: return "v8i16";
case MVT::v4i32: return "v4i32";