def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
"movd\t{$src, $dst|$dst, $src}", []>;
+let neverHasSideEffects = 1 in
+def MMX_MOVD64from64rr : MMXRI<0x6E, MRMSrcReg, (outs GR64:$dst), (ins VR64:$src),
+ "movd\t{$src, $dst|$dst, $src}", []>;
+
let neverHasSideEffects = 1 in
def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
"movq\t{$src, $dst|$dst, $src}", []>;
(MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(v8i8 (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
+def : Pat<(i64 (bitconvert (v1i64 VR64:$src))),
+ (MMX_MOVD64from64rr VR64:$src)>;
+def : Pat<(i64 (bitconvert (v2i32 VR64:$src))),
+ (MMX_MOVD64from64rr VR64:$src)>;
+def : Pat<(i64 (bitconvert (v4i16 VR64:$src))),
+ (MMX_MOVD64from64rr VR64:$src)>;
+def : Pat<(i64 (bitconvert (v8i8 VR64:$src))),
+ (MMX_MOVD64from64rr VR64:$src)>;
// Move scalar to XMM zero-extended
// movd to XMM register zero-extends
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86-64 | grep movd | count 4
+
+define i64 @foo(<1 x i64>* %p) {
+ %t = load <1 x i64>* %p
+ %u = add <1 x i64> %t, %t
+ %s = bitcast <1 x i64> %u to i64
+ ret i64 %s
+}
+define i64 @goo(<2 x i32>* %p) {
+ %t = load <2 x i32>* %p
+ %u = add <2 x i32> %t, %t
+ %s = bitcast <2 x i32> %u to i64
+ ret i64 %s
+}
+define i64 @hoo(<4 x i16>* %p) {
+ %t = load <4 x i16>* %p
+ %u = add <4 x i16> %t, %t
+ %s = bitcast <4 x i16> %u to i64
+ ret i64 %s
+}
+define i64 @ioo(<8 x i8>* %p) {
+ %t = load <8 x i8>* %p
+ %u = add <8 x i8> %t, %t
+ %s = bitcast <8 x i8> %u to i64
+ ret i64 %s
+}