// VMVN : Vector Bitwise NOT (Immediate)
let isReMaterializable = 1 in {
+// FIXME: This instruction's encoding MAY NOT BE correct.
def VMVNv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 1, 1, (outs DPR:$dst),
(ins nModImm:$SIMM), IIC_VMOVImm,
"vmvn", "i16", "$dst, $SIMM", "",
[(set DPR:$dst, (v4i16 (NEONvmvnImm timm:$SIMM)))]>;
+// FIXME: This instruction's encoding MAY NOT BE correct.
def VMVNv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 1, 1, (outs QPR:$dst),
(ins nModImm:$SIMM), IIC_VMOVImm,
"vmvn", "i16", "$dst, $SIMM", "",
[(set QPR:$dst, (v8i16 (NEONvmvnImm timm:$SIMM)))]>;
-
+// FIXME: This instruction's encoding MAY NOT BE correct.
def VMVNv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 1, 1, (outs DPR:$dst),
(ins nModImm:$SIMM), IIC_VMOVImm,
"vmvn", "i32", "$dst, $SIMM", "",
[(set DPR:$dst, (v2i32 (NEONvmvnImm timm:$SIMM)))]>;
+// FIXME: This instruction's encoding MAY NOT BE correct.
def VMVNv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 1, 1, (outs QPR:$dst),
(ins nModImm:$SIMM), IIC_VMOVImm,
"vmvn", "i32", "$dst, $SIMM", "",
; FIXME: The following instructions still require testing:
; - vand with immediate
+; - vmvn of an immediate
; CHECK: vand_8xi8
define <8 x i8> @vand_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK: vorr q8, q8, q9 @ encoding: [0xf2,0x01,0x60,0xf2]
%tmp3 = or <16 x i8> %tmp1, %tmp2
ret <16 x i8> %tmp3
-}
\ No newline at end of file
+}
+
+; CHECK: vbic_8xi8
+define <8 x i8> @vbic_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+; CHECK: vbic d16, d17, d16 @ encoding: [0xb0,0x01,0x51,0xf2]
+ %tmp3 = xor <8 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
+ %tmp4 = and <8 x i8> %tmp1, %tmp3
+ ret <8 x i8> %tmp4
+}
+
+; CHECK: vbic_16xi8
+define <16 x i8> @vbic_16xi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+; CHECK: vbic q8, q8, q9 @ encoding: [0xf2,0x01,0x50,0xf2]
+ %tmp3 = xor <16 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
+ %tmp4 = and <16 x i8> %tmp1, %tmp3
+ ret <16 x i8> %tmp4
+}
+
+; CHECK: vorn_8xi8
+define <8 x i8> @vorn_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+; CHECK: vorn d16, d17, d16 @ encoding: [0xb0,0x01,0x71,0xf2]
+ %tmp3 = xor <8 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
+ %tmp4 = or <8 x i8> %tmp1, %tmp3
+ ret <8 x i8> %tmp4
+}
+
+; CHECK: vorn_16xi8
+define <16 x i8> @vorn_16xi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+; CHECK: vorn q8, q8, q9 @ encoding: [0xf2,0x01,0x70,0xf2]
+ %tmp3 = xor <16 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
+ %tmp4 = or <16 x i8> %tmp1, %tmp3
+ ret <16 x i8> %tmp4
+}
+
+; CHECK: vmvn_8xi8
+define <8 x i8> @vmvn_8xi8(<8 x i8>* %A) nounwind {
+ %tmp1 = load <8 x i8>* %A
+; CHECK: vmvn d16, d16 @ encoding: [0xa0,0x05,0xf0,0xf3]
+ %tmp2 = xor <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
+ ret <8 x i8> %tmp2
+}
+
+; CHECK: vmvn_16xi8
+define <16 x i8> @vmvn_16xi8(<16 x i8>* %A) nounwind {
+ %tmp1 = load <16 x i8>* %A
+; CHECK: vmvn q8, q8 @ encoding: [0xe0,0x05,0xf0,0xf3]
+ %tmp2 = xor <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
+ ret <16 x i8> %tmp2
+}