ARM NEON implied destination aliases for VMAX/VMIN.
authorJim Grosbach <grosbach@apple.com>
Mon, 19 Dec 2011 18:57:38 +0000 (18:57 +0000)
committerJim Grosbach <grosbach@apple.com>
Mon, 19 Dec 2011 18:57:38 +0000 (18:57 +0000)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@146885 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/ARM/ARMInstrNEON.td
test/MC/ARM/neon-minmax-encoding.s
test/MC/ARM/neont2-minmax-encoding.s

index dcac1485d40f72744b6bcfacbf5bbaab05f94cad..cac8f45468926c078671554a0115c374d0a003d6 100644 (file)
@@ -5877,6 +5877,68 @@ def : NEONInstAlias<"vqdmulh${p}.s16 $Vdn, $Vm",
 def : NEONInstAlias<"vqdmulh${p}.s32 $Vdn, $Vm",
                     (VQDMULHv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
 
+// Two-operand variants for VMAX.
+def : NEONInstAlias<"vmax${p}.s8 $Vdn, $Vm",
+                    (VMAXsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.s16 $Vdn, $Vm",
+                    (VMAXsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.s32 $Vdn, $Vm",
+                    (VMAXsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.u8 $Vdn, $Vm",
+                    (VMAXuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.u16 $Vdn, $Vm",
+                    (VMAXuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.u32 $Vdn, $Vm",
+                    (VMAXuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.f32 $Vdn, $Vm",
+                    (VMAXfd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+
+def : NEONInstAlias<"vmax${p}.s8 $Vdn, $Vm",
+                    (VMAXsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.s16 $Vdn, $Vm",
+                    (VMAXsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.s32 $Vdn, $Vm",
+                    (VMAXsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.u8 $Vdn, $Vm",
+                    (VMAXuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.u16 $Vdn, $Vm",
+                    (VMAXuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.u32 $Vdn, $Vm",
+                    (VMAXuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.f32 $Vdn, $Vm",
+                    (VMAXfq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
+// Two-operand variants for VMIN.
+def : NEONInstAlias<"vmin${p}.s8 $Vdn, $Vm",
+                    (VMINsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.s16 $Vdn, $Vm",
+                    (VMINsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.s32 $Vdn, $Vm",
+                    (VMINsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.u8 $Vdn, $Vm",
+                    (VMINuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.u16 $Vdn, $Vm",
+                    (VMINuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.u32 $Vdn, $Vm",
+                    (VMINuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.f32 $Vdn, $Vm",
+                    (VMINfd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+
+def : NEONInstAlias<"vmin${p}.s8 $Vdn, $Vm",
+                    (VMINsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.s16 $Vdn, $Vm",
+                    (VMINsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.s32 $Vdn, $Vm",
+                    (VMINsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.u8 $Vdn, $Vm",
+                    (VMINuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.u16 $Vdn, $Vm",
+                    (VMINuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.u32 $Vdn, $Vm",
+                    (VMINuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.f32 $Vdn, $Vm",
+                    (VMINfq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
 // 'gas' compatibility aliases for quad-word instructions. Strictly speaking,
 // these should restrict to just the Q register variants, but the register
 // classes are enough to match correctly regardless, so we keep it simple
index 2d0d8c9b8aeb5f9fb86ca0d4c9779e6b553f8f3d..b1eb258b07574ab1df9e759a3658710d4a670637 100644 (file)
 @ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unknown -show-encoding < %s | FileCheck %s
 
-@ CHECK: vmin.s8       d16, d16, d17           @ encoding: [0xb1,0x06,0x40,0xf2]
-       vmin.s8 d16, d16, d17
-@ CHECK: vmin.s16      d16, d16, d17   @ encoding: [0xb1,0x06,0x50,0xf2]
-       vmin.s16        d16, d16, d17
-@ CHECK: vmin.s32      d16, d16, d17   @ encoding: [0xb1,0x06,0x60,0xf2]
-       vmin.s32        d16, d16, d17
-@ CHECK: vmin.u8       d16, d16, d17           @ encoding: [0xb1,0x06,0x40,0xf3]
-       vmin.u8 d16, d16, d17
-@ CHECK: vmin.u16      d16, d16, d17   @ encoding: [0xb1,0x06,0x50,0xf3]
-       vmin.u16        d16, d16, d17
-@ CHECK: vmin.u32      d16, d16, d17   @ encoding: [0xb1,0x06,0x60,0xf3]
-       vmin.u32        d16, d16, d17
-@ CHECK: vmin.f32      d16, d16, d17   @ encoding: [0xa1,0x0f,0x60,0xf2]
-       vmin.f32        d16, d16, d17
-@ CHECK: vmin.s8       q8, q8, q9              @ encoding: [0xf2,0x06,0x40,0xf2]
-       vmin.s8 q8, q8, q9
-@ CHECK: vmin.s16      q8, q8, q9      @ encoding: [0xf2,0x06,0x50,0xf2]
-       vmin.s16        q8, q8, q9
-@ CHECK: vmin.s32      q8, q8, q9      @ encoding: [0xf2,0x06,0x60,0xf2]
-       vmin.s32        q8, q8, q9
-@ CHECK: vmin.u8       q8, q8, q9              @ encoding: [0xf2,0x06,0x40,0xf3]
-       vmin.u8 q8, q8, q9
-@ CHECK: vmin.u16      q8, q8, q9      @ encoding: [0xf2,0x06,0x50,0xf3]
-       vmin.u16        q8, q8, q9
-@ CHECK: vmin.u32      q8, q8, q9      @ encoding: [0xf2,0x06,0x60,0xf3]
-       vmin.u32        q8, q8, q9
-@ CHECK: vmin.f32      q8, q8, q9      @ encoding: [0xe2,0x0f,0x60,0xf2]
-       vmin.f32        q8, q8, q9
-@ CHECK: vmax.s8       d16, d16, d17           @ encoding: [0xa1,0x06,0x40,0xf2]
-       vmax.s8 d16, d16, d17
-@ CHECK: vmax.s16      d16, d16, d17   @ encoding: [0xa1,0x06,0x50,0xf2]
-       vmax.s16        d16, d16, d17
-@ CHECK: vmax.s32      d16, d16, d17   @ encoding: [0xa1,0x06,0x60,0xf2]
-       vmax.s32        d16, d16, d17
-@ CHECK: vmax.u8       d16, d16, d17           @ encoding: [0xa1,0x06,0x40,0xf3]
-       vmax.u8 d16, d16, d17
-@ CHECK: vmax.u16      d16, d16, d17   @ encoding: [0xa1,0x06,0x50,0xf3]
-       vmax.u16        d16, d16, d17
-@ CHECK: vmax.u32      d16, d16, d17   @ encoding: [0xa1,0x06,0x60,0xf3]
-       vmax.u32        d16, d16, d17
-@ CHECK: vmax.f32      d16, d16, d17   @ encoding: [0xa1,0x0f,0x40,0xf2]
-       vmax.f32        d16, d16, d17
-@ CHECK: vmax.s8       q8, q8, q9              @ encoding: [0xe2,0x06,0x40,0xf2]
-       vmax.s8 q8, q8, q9
-@ CHECK: vmax.s16      q8, q8, q9      @ encoding: [0xe2,0x06,0x50,0xf2]
-       vmax.s16        q8, q8, q9
+        vmax.s8 d1, d2, d3
+        vmax.s16 d4, d5, d6
+        vmax.s32 d7, d8, d9
+        vmax.u8 d10, d11, d12
+        vmax.u16 d13, d14, d15
+        vmax.u32 d16, d17, d18
+        vmax.f32 d19, d20, d21
+
+        vmax.s8 d2, d3
+        vmax.s16 d5, d6
+        vmax.s32 d8, d9
+        vmax.u8 d11, d12
+        vmax.u16 d14, d15
+        vmax.u32 d17, d18
+        vmax.f32 d20, d21
+
+        vmax.s8 q1, q2, q3
+        vmax.s16 q4, q5, q6
+        vmax.s32 q7, q8, q9
+        vmax.u8 q10, q11, q12
+        vmax.u16 q13, q14, q15
+        vmax.u32 q6, q7, q8
+        vmax.f32 q9, q5, q1
+
+        vmax.s8 q2, q3
+        vmax.s16 q5, q6
+        vmax.s32 q8, q9
+        vmax.u8 q11, q2
+        vmax.u16 q4, q5
+        vmax.u32 q7, q8
+        vmax.f32 q2, q1
+
+@ CHECK: vmax.s8       d1, d2, d3      @ encoding: [0x03,0x16,0x02,0xf2]
+@ CHECK: vmax.s16      d4, d5, d6      @ encoding: [0x06,0x46,0x15,0xf2]
+@ CHECK: vmax.s32      d7, d8, d9      @ encoding: [0x09,0x76,0x28,0xf2]
+@ CHECK: vmax.u8       d10, d11, d12   @ encoding: [0x0c,0xa6,0x0b,0xf3]
+@ CHECK: vmax.u16      d13, d14, d15   @ encoding: [0x0f,0xd6,0x1e,0xf3]
+@ CHECK: vmax.u32      d16, d17, d18   @ encoding: [0xa2,0x06,0x61,0xf3]
+@ CHECK: vmax.f32      d19, d20, d21   @ encoding: [0xa5,0x3f,0x44,0xf2]
+@ CHECK: vmax.s8       d2, d2, d3      @ encoding: [0x03,0x26,0x02,0xf2]
+@ CHECK: vmax.s16      d5, d5, d6      @ encoding: [0x06,0x56,0x15,0xf2]
+@ CHECK: vmax.s32      d8, d8, d9      @ encoding: [0x09,0x86,0x28,0xf2]
+@ CHECK: vmax.u8       d11, d11, d12   @ encoding: [0x0c,0xb6,0x0b,0xf3]
+@ CHECK: vmax.u16      d14, d14, d15   @ encoding: [0x0f,0xe6,0x1e,0xf3]
+@ CHECK: vmax.u32      d17, d17, d18   @ encoding: [0xa2,0x16,0x61,0xf3]
+@ CHECK: vmax.f32      d20, d20, d21   @ encoding: [0xa5,0x4f,0x44,0xf2]
+@ CHECK: vmax.s8       q1, q2, q3      @ encoding: [0x46,0x26,0x04,0xf2]
+@ CHECK: vmax.s16      q4, q5, q6      @ encoding: [0x4c,0x86,0x1a,0xf2]
+@ CHECK: vmax.s32      q7, q8, q9      @ encoding: [0xe2,0xe6,0x20,0xf2]
+@ CHECK: vmax.u8       q10, q11, q12   @ encoding: [0xe8,0x46,0x46,0xf3]
+@ CHECK: vmax.u16      q13, q14, q15   @ encoding: [0xee,0xa6,0x5c,0xf3]
+@ CHECK: vmax.u32      q6, q7, q8      @ encoding: [0x60,0xc6,0x2e,0xf3]
+@ CHECK: vmax.f32      q9, q5, q1      @ encoding: [0x42,0x2f,0x4a,0xf2]
+@ CHECK: vmax.s8       q2, q2, q3      @ encoding: [0x46,0x46,0x04,0xf2]
+@ CHECK: vmax.s16      q5, q5, q6      @ encoding: [0x4c,0xa6,0x1a,0xf2]
 @ CHECK: vmax.s32      q8, q8, q9      @ encoding: [0xe2,0x06,0x60,0xf2]
-       vmax.s32        q8, q8, q9
-@ CHECK: vmax.u8       q8, q8, q9              @ encoding: [0xe2,0x06,0x40,0xf3]
-       vmax.u8 q8, q8, q9
-@ CHECK: vmax.u16      q8, q8, q9      @ encoding: [0xe2,0x06,0x50,0xf3]
-       vmax.u16        q8, q8, q9
-@ CHECK: vmax.u32      q8, q8, q9      @ encoding: [0xe2,0x06,0x60,0xf3]
-       vmax.u32        q8, q8, q9
-@ CHECK: vmax.f32      q8, q8, q9      @ encoding: [0xe2,0x0f,0x40,0xf2]
-       vmax.f32        q8, q8, q9
+@ CHECK: vmax.u8       q11, q11, q2    @ encoding: [0xc4,0x66,0x46,0xf3]
+@ CHECK: vmax.u16      q4, q4, q5      @ encoding: [0x4a,0x86,0x18,0xf3]
+@ CHECK: vmax.u32      q7, q7, q8      @ encoding: [0x60,0xe6,0x2e,0xf3]
+@ CHECK: vmax.f32      q2, q2, q1      @ encoding: [0x42,0x4f,0x04,0xf2]
+
+
+        vmin.s8 d1, d2, d3
+        vmin.s16 d4, d5, d6
+        vmin.s32 d7, d8, d9
+        vmin.u8 d10, d11, d12
+        vmin.u16 d13, d14, d15
+        vmin.u32 d16, d17, d18
+        vmin.f32 d19, d20, d21
+
+        vmin.s8 d2, d3
+        vmin.s16 d5, d6
+        vmin.s32 d8, d9
+        vmin.u8 d11, d12
+        vmin.u16 d14, d15
+        vmin.u32 d17, d18
+        vmin.f32 d20, d21
+
+        vmin.s8 q1, q2, q3
+        vmin.s16 q4, q5, q6
+        vmin.s32 q7, q8, q9
+        vmin.u8 q10, q11, q12
+        vmin.u16 q13, q14, q15
+        vmin.u32 q6, q7, q8
+        vmin.f32 q9, q5, q1
+
+        vmin.s8 q2, q3
+        vmin.s16 q5, q6
+        vmin.s32 q8, q9
+        vmin.u8 q11, q2
+        vmin.u16 q4, q5
+        vmin.u32 q7, q8
+        vmin.f32 q2, q1
+
+@ CHECK: vmin.s8       d1, d2, d3      @ encoding: [0x13,0x16,0x02,0xf2]
+@ CHECK: vmin.s16      d4, d5, d6      @ encoding: [0x16,0x46,0x15,0xf2]
+@ CHECK: vmin.s32      d7, d8, d9      @ encoding: [0x19,0x76,0x28,0xf2]
+@ CHECK: vmin.u8       d10, d11, d12   @ encoding: [0x1c,0xa6,0x0b,0xf3]
+@ CHECK: vmin.u16      d13, d14, d15   @ encoding: [0x1f,0xd6,0x1e,0xf3]
+@ CHECK: vmin.u32      d16, d17, d18   @ encoding: [0xb2,0x06,0x61,0xf3]
+@ CHECK: vmin.f32      d19, d20, d21   @ encoding: [0xa5,0x3f,0x64,0xf2]
+@ CHECK: vmin.s8       d2, d2, d3      @ encoding: [0x13,0x26,0x02,0xf2]
+@ CHECK: vmin.s16      d5, d5, d6      @ encoding: [0x16,0x56,0x15,0xf2]
+@ CHECK: vmin.s32      d8, d8, d9      @ encoding: [0x19,0x86,0x28,0xf2]
+@ CHECK: vmin.u8       d11, d11, d12   @ encoding: [0x1c,0xb6,0x0b,0xf3]
+@ CHECK: vmin.u16      d14, d14, d15   @ encoding: [0x1f,0xe6,0x1e,0xf3]
+@ CHECK: vmin.u32      d17, d17, d18   @ encoding: [0xb2,0x16,0x61,0xf3]
+@ CHECK: vmin.f32      d20, d20, d21   @ encoding: [0xa5,0x4f,0x64,0xf2]
+@ CHECK: vmin.s8       q1, q2, q3      @ encoding: [0x56,0x26,0x04,0xf2]
+@ CHECK: vmin.s16      q4, q5, q6      @ encoding: [0x5c,0x86,0x1a,0xf2]
+@ CHECK: vmin.s32      q7, q8, q9      @ encoding: [0xf2,0xe6,0x20,0xf2]
+@ CHECK: vmin.u8       q10, q11, q12   @ encoding: [0xf8,0x46,0x46,0xf3]
+@ CHECK: vmin.u16      q13, q14, q15   @ encoding: [0xfe,0xa6,0x5c,0xf3]
+@ CHECK: vmin.u32      q6, q7, q8      @ encoding: [0x70,0xc6,0x2e,0xf3]
+@ CHECK: vmin.f32      q9, q5, q1      @ encoding: [0x42,0x2f,0x6a,0xf2]
+@ CHECK: vmin.s8       q2, q2, q3      @ encoding: [0x56,0x46,0x04,0xf2]
+@ CHECK: vmin.s16      q5, q5, q6      @ encoding: [0x5c,0xa6,0x1a,0xf2]
+@ CHECK: vmin.s32      q8, q8, q9      @ encoding: [0xf2,0x06,0x60,0xf2]
+@ CHECK: vmin.u8       q11, q11, q2    @ encoding: [0xd4,0x66,0x46,0xf3]
+@ CHECK: vmin.u16      q4, q4, q5      @ encoding: [0x5a,0x86,0x18,0xf3]
+@ CHECK: vmin.u32      q7, q7, q8      @ encoding: [0x70,0xe6,0x2e,0xf3]
+@ CHECK: vmin.f32      q2, q2, q1      @ encoding: [0x42,0x4f,0x24,0xf2]
index 7e86d45bb14acc47bd9a1a044338254aa7f2c76f..9ecadce8dce2783c7ff4fdfb5588b5808f18e649 100644 (file)
 
 .code 16
 
-@ CHECK: vmin.s8       d16, d16, d17           @ encoding: [0x40,0xef,0xb1,0x06]
-       vmin.s8 d16, d16, d17
-@ CHECK: vmin.s16      d16, d16, d17   @ encoding: [0x50,0xef,0xb1,0x06]
-       vmin.s16        d16, d16, d17
-@ CHECK: vmin.s32      d16, d16, d17   @ encoding: [0x60,0xef,0xb1,0x06]
-       vmin.s32        d16, d16, d17
-@ CHECK: vmin.u8       d16, d16, d17           @ encoding: [0x40,0xff,0xb1,0x06]
-       vmin.u8 d16, d16, d17
-@ CHECK: vmin.u16      d16, d16, d17   @ encoding: [0x50,0xff,0xb1,0x06]
-       vmin.u16        d16, d16, d17
-@ CHECK: vmin.u32      d16, d16, d17   @ encoding: [0x60,0xff,0xb1,0x06]
-       vmin.u32        d16, d16, d17
-@ CHECK: vmin.f32      d16, d16, d17   @ encoding: [0x60,0xef,0xa1,0x0f]
-       vmin.f32        d16, d16, d17
-@ CHECK: vmin.s8       q8, q8, q9              @ encoding: [0x40,0xef,0xf2,0x06]
-       vmin.s8 q8, q8, q9
-@ CHECK: vmin.s16      q8, q8, q9      @ encoding: [0x50,0xef,0xf2,0x06]
-       vmin.s16        q8, q8, q9
-@ CHECK: vmin.s32      q8, q8, q9      @ encoding: [0x60,0xef,0xf2,0x06]
-       vmin.s32        q8, q8, q9
-@ CHECK: vmin.u8       q8, q8, q9              @ encoding: [0x40,0xff,0xf2,0x06]
-       vmin.u8 q8, q8, q9
-@ CHECK: vmin.u16      q8, q8, q9      @ encoding: [0x50,0xff,0xf2,0x06]
-       vmin.u16        q8, q8, q9
-@ CHECK: vmin.u32      q8, q8, q9      @ encoding: [0x60,0xff,0xf2,0x06]
-       vmin.u32        q8, q8, q9
-@ CHECK: vmin.f32      q8, q8, q9      @ encoding: [0x60,0xef,0xe2,0x0f]
-       vmin.f32        q8, q8, q9
-@ CHECK: vmax.s8       d16, d16, d17           @ encoding: [0x40,0xef,0xa1,0x06]
-       vmax.s8 d16, d16, d17
-@ CHECK: vmax.s16      d16, d16, d17   @ encoding: [0x50,0xef,0xa1,0x06]
-       vmax.s16        d16, d16, d17
-@ CHECK: vmax.s32      d16, d16, d17   @ encoding: [0x60,0xef,0xa1,0x06]
-       vmax.s32        d16, d16, d17
-@ CHECK: vmax.u8       d16, d16, d17           @ encoding: [0x40,0xff,0xa1,0x06]
-       vmax.u8 d16, d16, d17
-@ CHECK: vmax.u16      d16, d16, d17   @ encoding: [0x50,0xff,0xa1,0x06]
-       vmax.u16        d16, d16, d17
-@ CHECK: vmax.u32      d16, d16, d17   @ encoding: [0x60,0xff,0xa1,0x06]
-       vmax.u32        d16, d16, d17
-@ CHECK: vmax.f32      d16, d16, d17   @ encoding: [0x40,0xef,0xa1,0x0f]
-       vmax.f32        d16, d16, d17
-@ CHECK: vmax.s8       q8, q8, q9              @ encoding: [0x40,0xef,0xe2,0x06]
-       vmax.s8 q8, q8, q9
-@ CHECK: vmax.s16      q8, q8, q9      @ encoding: [0x50,0xef,0xe2,0x06]
-       vmax.s16        q8, q8, q9
+        vmax.s8 d1, d2, d3
+        vmax.s16 d4, d5, d6
+        vmax.s32 d7, d8, d9
+        vmax.u8 d10, d11, d12
+        vmax.u16 d13, d14, d15
+        vmax.u32 d16, d17, d18
+        vmax.f32 d19, d20, d21
+
+        vmax.s8 d2, d3
+        vmax.s16 d5, d6
+        vmax.s32 d8, d9
+        vmax.u8 d11, d12
+        vmax.u16 d14, d15
+        vmax.u32 d17, d18
+        vmax.f32 d20, d21
+
+        vmax.s8 q1, q2, q3
+        vmax.s16 q4, q5, q6
+        vmax.s32 q7, q8, q9
+        vmax.u8 q10, q11, q12
+        vmax.u16 q13, q14, q15
+        vmax.u32 q6, q7, q8
+        vmax.f32 q9, q5, q1
+
+        vmax.s8 q2, q3
+        vmax.s16 q5, q6
+        vmax.s32 q8, q9
+        vmax.u8 q11, q2
+        vmax.u16 q4, q5
+        vmax.u32 q7, q8
+        vmax.f32 q2, q1
+
+@ CHECK: vmax.s8       d1, d2, d3      @ encoding: [0x02,0xef,0x03,0x16]
+@ CHECK: vmax.s16      d4, d5, d6      @ encoding: [0x15,0xef,0x06,0x46]
+@ CHECK: vmax.s32      d7, d8, d9      @ encoding: [0x28,0xef,0x09,0x76]
+@ CHECK: vmax.u8       d10, d11, d12   @ encoding: [0x0b,0xff,0x0c,0xa6]
+@ CHECK: vmax.u16      d13, d14, d15   @ encoding: [0x1e,0xff,0x0f,0xd6]
+@ CHECK: vmax.u32      d16, d17, d18   @ encoding: [0x61,0xff,0xa2,0x06]
+@ CHECK: vmax.f32      d19, d20, d21   @ encoding: [0x44,0xef,0xa5,0x3f]
+@ CHECK: vmax.s8       d2, d2, d3      @ encoding: [0x02,0xef,0x03,0x26]
+@ CHECK: vmax.s16      d5, d5, d6      @ encoding: [0x15,0xef,0x06,0x56]
+@ CHECK: vmax.s32      d8, d8, d9      @ encoding: [0x28,0xef,0x09,0x86]
+@ CHECK: vmax.u8       d11, d11, d12   @ encoding: [0x0b,0xff,0x0c,0xb6]
+@ CHECK: vmax.u16      d14, d14, d15   @ encoding: [0x1e,0xff,0x0f,0xe6]
+@ CHECK: vmax.u32      d17, d17, d18   @ encoding: [0x61,0xff,0xa2,0x16]
+@ CHECK: vmax.f32      d20, d20, d21   @ encoding: [0x44,0xef,0xa5,0x4f]
+@ CHECK: vmax.s8       q1, q2, q3      @ encoding: [0x04,0xef,0x46,0x26]
+@ CHECK: vmax.s16      q4, q5, q6      @ encoding: [0x1a,0xef,0x4c,0x86]
+@ CHECK: vmax.s32      q7, q8, q9      @ encoding: [0x20,0xef,0xe2,0xe6]
+@ CHECK: vmax.u8       q10, q11, q12   @ encoding: [0x46,0xff,0xe8,0x46]
+@ CHECK: vmax.u16      q13, q14, q15   @ encoding: [0x5c,0xff,0xee,0xa6]
+@ CHECK: vmax.u32      q6, q7, q8      @ encoding: [0x2e,0xff,0x60,0xc6]
+@ CHECK: vmax.f32      q9, q5, q1      @ encoding: [0x4a,0xef,0x42,0x2f]
+@ CHECK: vmax.s8       q2, q2, q3      @ encoding: [0x04,0xef,0x46,0x46]
+@ CHECK: vmax.s16      q5, q5, q6      @ encoding: [0x1a,0xef,0x4c,0xa6]
 @ CHECK: vmax.s32      q8, q8, q9      @ encoding: [0x60,0xef,0xe2,0x06]
-       vmax.s32        q8, q8, q9
-@ CHECK: vmax.u8       q8, q8, q9              @ encoding: [0x40,0xff,0xe2,0x06]
-       vmax.u8 q8, q8, q9
-@ CHECK: vmax.u16      q8, q8, q9      @ encoding: [0x50,0xff,0xe2,0x06]
-       vmax.u16        q8, q8, q9
-@ CHECK: vmax.u32      q8, q8, q9      @ encoding: [0x60,0xff,0xe2,0x06]
-       vmax.u32        q8, q8, q9
-@ CHECK: vmax.f32      q8, q8, q9      @ encoding: [0x40,0xef,0xe2,0x0f]
-       vmax.f32        q8, q8, q9
+@ CHECK: vmax.u8       q11, q11, q2    @ encoding: [0x46,0xff,0xc4,0x66]
+@ CHECK: vmax.u16      q4, q4, q5      @ encoding: [0x18,0xff,0x4a,0x86]
+@ CHECK: vmax.u32      q7, q7, q8      @ encoding: [0x2e,0xff,0x60,0xe6]
+@ CHECK: vmax.f32      q2, q2, q1      @ encoding: [0x04,0xef,0x42,0x4f]
+
+
+        vmin.s8 d1, d2, d3
+        vmin.s16 d4, d5, d6
+        vmin.s32 d7, d8, d9
+        vmin.u8 d10, d11, d12
+        vmin.u16 d13, d14, d15
+        vmin.u32 d16, d17, d18
+        vmin.f32 d19, d20, d21
+
+        vmin.s8 d2, d3
+        vmin.s16 d5, d6
+        vmin.s32 d8, d9
+        vmin.u8 d11, d12
+        vmin.u16 d14, d15
+        vmin.u32 d17, d18
+        vmin.f32 d20, d21
+
+        vmin.s8 q1, q2, q3
+        vmin.s16 q4, q5, q6
+        vmin.s32 q7, q8, q9
+        vmin.u8 q10, q11, q12
+        vmin.u16 q13, q14, q15
+        vmin.u32 q6, q7, q8
+        vmin.f32 q9, q5, q1
+
+        vmin.s8 q2, q3
+        vmin.s16 q5, q6
+        vmin.s32 q8, q9
+        vmin.u8 q11, q2
+        vmin.u16 q4, q5
+        vmin.u32 q7, q8
+        vmin.f32 q2, q1
+
+@ CHECK: vmin.s8       d1, d2, d3      @ encoding: [0x02,0xef,0x13,0x16]
+@ CHECK: vmin.s16      d4, d5, d6      @ encoding: [0x15,0xef,0x16,0x46]
+@ CHECK: vmin.s32      d7, d8, d9      @ encoding: [0x28,0xef,0x19,0x76]
+@ CHECK: vmin.u8       d10, d11, d12   @ encoding: [0x0b,0xff,0x1c,0xa6]
+@ CHECK: vmin.u16      d13, d14, d15   @ encoding: [0x1e,0xff,0x1f,0xd6]
+@ CHECK: vmin.u32      d16, d17, d18   @ encoding: [0x61,0xff,0xb2,0x06]
+@ CHECK: vmin.f32      d19, d20, d21   @ encoding: [0x64,0xef,0xa5,0x3f]
+@ CHECK: vmin.s8       d2, d2, d3      @ encoding: [0x02,0xef,0x13,0x26]
+@ CHECK: vmin.s16      d5, d5, d6      @ encoding: [0x15,0xef,0x16,0x56]
+@ CHECK: vmin.s32      d8, d8, d9      @ encoding: [0x28,0xef,0x19,0x86]
+@ CHECK: vmin.u8       d11, d11, d12   @ encoding: [0x0b,0xff,0x1c,0xb6]
+@ CHECK: vmin.u16      d14, d14, d15   @ encoding: [0x1e,0xff,0x1f,0xe6]
+@ CHECK: vmin.u32      d17, d17, d18   @ encoding: [0x61,0xff,0xb2,0x16]
+@ CHECK: vmin.f32      d20, d20, d21   @ encoding: [0x64,0xef,0xa5,0x4f]
+@ CHECK: vmin.s8       q1, q2, q3      @ encoding: [0x04,0xef,0x56,0x26]
+@ CHECK: vmin.s16      q4, q5, q6      @ encoding: [0x1a,0xef,0x5c,0x86]
+@ CHECK: vmin.s32      q7, q8, q9      @ encoding: [0x20,0xef,0xf2,0xe6]
+@ CHECK: vmin.u8       q10, q11, q12   @ encoding: [0x46,0xff,0xf8,0x46]
+@ CHECK: vmin.u16      q13, q14, q15   @ encoding: [0x5c,0xff,0xfe,0xa6]
+@ CHECK: vmin.u32      q6, q7, q8      @ encoding: [0x2e,0xff,0x70,0xc6]
+@ CHECK: vmin.f32      q9, q5, q1      @ encoding: [0x6a,0xef,0x42,0x2f]
+@ CHECK: vmin.s8       q2, q2, q3      @ encoding: [0x04,0xef,0x56,0x46]
+@ CHECK: vmin.s16      q5, q5, q6      @ encoding: [0x1a,0xef,0x5c,0xa6]
+@ CHECK: vmin.s32      q8, q8, q9      @ encoding: [0x60,0xef,0xf2,0x06]
+@ CHECK: vmin.u8       q11, q11, q2    @ encoding: [0x46,0xff,0xd4,0x66]
+@ CHECK: vmin.u16      q4, q4, q5      @ encoding: [0x18,0xff,0x5a,0x86]
+@ CHECK: vmin.u32      q7, q7, q8      @ encoding: [0x2e,0xff,0x70,0xe6]
+@ CHECK: vmin.f32      q2, q2, q1      @ encoding: [0x24,0xef,0x42,0x4f]