1 ; RUN: llc -show-mc-encoding -march=arm -mcpu=cortex-a8 -mattr=+neon < %s | FileCheck %s
3 define <8 x i8> @vnegs8(<8 x i8>* %A) nounwind {
4 %tmp1 = load <8 x i8>* %A
5 ; CHECK: vneg.s8 d16, d16 @ encoding: [0xa0,0x03,0xf1,0xf3]
6 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1
10 define <4 x i16> @vnegs16(<4 x i16>* %A) nounwind {
11 %tmp1 = load <4 x i16>* %A
12 ; CHECK: vneg.s16 d16, d16 @ encoding: [0xa0,0x03,0xf5,0xf3
13 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1
17 define <2 x i32> @vnegs32(<2 x i32>* %A) nounwind {
18 %tmp1 = load <2 x i32>* %A
19 ; CHECK: vneg.s32 d16, d16 @ encoding: [0xa0,0x03,0xf9,0xf3]
20 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1
24 define <2 x float> @vnegf32(<2 x float>* %A) nounwind {
25 %tmp1 = load <2 x float>* %A
26 ; CHECK: vneg.f32 d16, d16 @ encoding: [0xa0,0x07,0xf9,0xf3]
27 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1
31 define <16 x i8> @vnegQs8(<16 x i8>* %A) nounwind {
32 %tmp1 = load <16 x i8>* %A
33 ; CHECK: vneg.s8 q8, q8 @ encoding: [0xe0,0x03,0xf1,0xf3]
34 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1
38 define <8 x i16> @vnegQs16(<8 x i16>* %A) nounwind {
39 %tmp1 = load <8 x i16>* %A
40 ; CHECK: vneg.s16 q8, q8 @ encoding: [0xe0,0x03,0xf5,0xf3]
41 %tmp2 = sub <8 x i16> zeroinitializer, %tmp1
45 define <4 x i32> @vnegQs32(<4 x i32>* %A) nounwind {
46 %tmp1 = load <4 x i32>* %A
47 ; CHECK: vneg.s32 q8, q8 @ encoding: [0xe0,0x03,0xf9,0xf3]
48 %tmp2 = sub <4 x i32> zeroinitializer, %tmp1
52 define <4 x float> @vnegQf32(<4 x float>* %A) nounwind {
53 %tmp1 = load <4 x float>* %A
54 ; CHECK: vneg.f32 q8, q8 @ encoding: [0xe0,0x07,0xf9,0xf3]
55 %tmp2 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp1
59 define <8 x i8> @vqnegs8(<8 x i8>* %A) nounwind {
60 %tmp1 = load <8 x i8>* %A
61 ; CHECK: vqneg.s8 d16, d16 @ encoding: [0xa0,0x07,0xf0,0xf3]
62 %tmp2 = call <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8> %tmp1)
66 define <4 x i16> @vqnegs16(<4 x i16>* %A) nounwind {
67 %tmp1 = load <4 x i16>* %A
68 ; CHECK: vqneg.s16 d16, d16 @ encoding: [0xa0,0x07,0xf4,0xf3]
69 %tmp2 = call <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16> %tmp1)
73 define <2 x i32> @vqnegs32(<2 x i32>* %A) nounwind {
74 %tmp1 = load <2 x i32>* %A
75 ; CHECK: vqneg.s32 d16, d16 @ encoding: [0xa0,0x07,0xf8,0xf3]
76 %tmp2 = call <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32> %tmp1)
80 define <16 x i8> @vqnegQs8(<16 x i8>* %A) nounwind {
81 %tmp1 = load <16 x i8>* %A
82 ; CHECK: vqneg.s8 q8, q8 @ encoding: [0xe0,0x07,0xf0,0xf3]
83 %tmp2 = call <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8> %tmp1)
87 define <8 x i16> @vqnegQs16(<8 x i16>* %A) nounwind {
88 %tmp1 = load <8 x i16>* %A
89 ; CHECK: vqneg.s16 q8, q8 @ encoding: [0xe0,0x07,0xf4,0xf3]
90 %tmp2 = call <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16> %tmp1)
94 define <4 x i32> @vqnegQs32(<4 x i32>* %A) nounwind {
95 %tmp1 = load <4 x i32>* %A
96 ; CHECK: vqneg.s32 q8, q8 @ encoding: [0xe0,0x07,0xf8,0xf3]
97 %tmp2 = call <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32> %tmp1)
101 declare <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8>) nounwind readnone
102 declare <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16>) nounwind readnone
103 declare <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32>) nounwind readnone
105 declare <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8>) nounwind readnone
106 declare <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16>) nounwind readnone
107 declare <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32>) nounwind readnone