1 //===- ARMInstrVFP.td - VFP support for ARM -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM VFP instruction set.
12 //===----------------------------------------------------------------------===//
15 SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
17 SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
19 SDTypeProfile<0, 1, [SDTCisFP<0>]>;
21 SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
24 def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
25 def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
26 def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
27 def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
28 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInFlag,SDNPOutFlag]>;
29 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutFlag]>;
30 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0",SDT_CMPFP0, [SDNPOutFlag]>;
31 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
33 //===----------------------------------------------------------------------===//
34 // Operand Definitions.
38 def vfp_f32imm : Operand<f32>,
39 PatLeaf<(f32 fpimm), [{
40 return ARM::getVFPf32Imm(N->getValueAPF()) != -1;
42 let PrintMethod = "printVFPf32ImmOperand";
45 def vfp_f64imm : Operand<f64>,
46 PatLeaf<(f64 fpimm), [{
47 return ARM::getVFPf64Imm(N->getValueAPF()) != -1;
49 let PrintMethod = "printVFPf64ImmOperand";
53 //===----------------------------------------------------------------------===//
54 // Load / store Instructions.
57 let canFoldAsLoad = 1, isReMaterializable = 1 in {
58 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$dst), (ins addrmode5:$addr),
59 IIC_fpLoad64, "vldr", ".64\t$dst, $addr",
60 [(set DPR:$dst, (f64 (load addrmode5:$addr)))]>;
62 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$dst), (ins addrmode5:$addr),
63 IIC_fpLoad32, "vldr", ".32\t$dst, $addr",
64 [(set SPR:$dst, (load addrmode5:$addr))]>;
67 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$src, addrmode5:$addr),
68 IIC_fpStore64, "vstr", ".64\t$src, $addr",
69 [(store (f64 DPR:$src), addrmode5:$addr)]>;
71 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$src, addrmode5:$addr),
72 IIC_fpStore32, "vstr", ".32\t$src, $addr",
73 [(store SPR:$src, addrmode5:$addr)]>;
75 //===----------------------------------------------------------------------===//
76 // Load / store multiple Instructions.
79 let mayLoad = 1, hasExtraDefRegAllocReq = 1 in {
80 def VLDMD : AXDI5<(outs), (ins addrmode5:$addr, pred:$p, reglist:$dsts,
81 variable_ops), IndexModeNone, IIC_fpLoadm,
82 "vldm${addr:submode}${p}\t${addr:base}, $dsts", "", []> {
86 def VLDMS : AXSI5<(outs), (ins addrmode5:$addr, pred:$p, reglist:$dsts,
87 variable_ops), IndexModeNone, IIC_fpLoadm,
88 "vldm${addr:submode}${p}\t${addr:base}, $dsts", "", []> {
92 def VLDMD_UPD : AXDI5<(outs GPR:$wb), (ins addrmode5:$addr, pred:$p,
93 reglist:$dsts, variable_ops),
94 IndexModeUpd, IIC_fpLoadm,
95 "vldm${addr:submode}${p}\t${addr:base}!, $dsts",
96 "$addr.base = $wb", []> {
100 def VLDMS_UPD : AXSI5<(outs GPR:$wb), (ins addrmode5:$addr, pred:$p,
101 reglist:$dsts, variable_ops),
102 IndexModeUpd, IIC_fpLoadm,
103 "vldm${addr:submode}${p}\t${addr:base}!, $dsts",
104 "$addr.base = $wb", []> {
107 } // mayLoad, hasExtraDefRegAllocReq
109 let mayStore = 1, hasExtraSrcRegAllocReq = 1 in {
110 def VSTMD : AXDI5<(outs), (ins addrmode5:$addr, pred:$p, reglist:$srcs,
111 variable_ops), IndexModeNone, IIC_fpStorem,
112 "vstm${addr:submode}${p}\t${addr:base}, $srcs", "", []> {
116 def VSTMS : AXSI5<(outs), (ins addrmode5:$addr, pred:$p, reglist:$srcs,
117 variable_ops), IndexModeNone, IIC_fpStorem,
118 "vstm${addr:submode}${p}\t${addr:base}, $srcs", "", []> {
122 def VSTMD_UPD : AXDI5<(outs GPR:$wb), (ins addrmode5:$addr, pred:$p,
123 reglist:$srcs, variable_ops),
124 IndexModeUpd, IIC_fpStorem,
125 "vstm${addr:submode}${p}\t${addr:base}!, $srcs",
126 "$addr.base = $wb", []> {
130 def VSTMS_UPD : AXSI5<(outs GPR:$wb), (ins addrmode5:$addr, pred:$p,
131 reglist:$srcs, variable_ops),
132 IndexModeUpd, IIC_fpStorem,
133 "vstm${addr:submode}${p}\t${addr:base}!, $srcs",
134 "$addr.base = $wb", []> {
137 } // mayStore, hasExtraSrcRegAllocReq
139 // FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
141 //===----------------------------------------------------------------------===//
142 // FP Binary Operations.
145 def VADDD : ADbI<0b11100, 0b11, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
146 IIC_fpALU64, "vadd", ".f64\t$dst, $a, $b",
147 [(set DPR:$dst, (fadd DPR:$a, (f64 DPR:$b)))]>;
149 def VADDS : ASbIn<0b11100, 0b11, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
150 IIC_fpALU32, "vadd", ".f32\t$dst, $a, $b",
151 [(set SPR:$dst, (fadd SPR:$a, SPR:$b))]>;
153 // These are encoded as unary instructions.
154 let Defs = [FPSCR] in {
155 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins DPR:$a, DPR:$b),
156 IIC_fpCMP64, "vcmpe", ".f64\t$a, $b",
157 [(arm_cmpfp DPR:$a, (f64 DPR:$b))]>;
159 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins DPR:$a, DPR:$b),
160 IIC_fpCMP64, "vcmp", ".f64\t$a, $b",
161 [/* For disassembly only; pattern left blank */]>;
163 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins SPR:$a, SPR:$b),
164 IIC_fpCMP32, "vcmpe", ".f32\t$a, $b",
165 [(arm_cmpfp SPR:$a, SPR:$b)]>;
167 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins SPR:$a, SPR:$b),
168 IIC_fpCMP32, "vcmp", ".f32\t$a, $b",
169 [/* For disassembly only; pattern left blank */]>;
172 def VDIVD : ADbI<0b11101, 0b00, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
173 IIC_fpDIV64, "vdiv", ".f64\t$dst, $a, $b",
174 [(set DPR:$dst, (fdiv DPR:$a, (f64 DPR:$b)))]>;
176 def VDIVS : ASbI<0b11101, 0b00, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
177 IIC_fpDIV32, "vdiv", ".f32\t$dst, $a, $b",
178 [(set SPR:$dst, (fdiv SPR:$a, SPR:$b))]>;
180 def VMULD : ADbI<0b11100, 0b10, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
181 IIC_fpMUL64, "vmul", ".f64\t$dst, $a, $b",
182 [(set DPR:$dst, (fmul DPR:$a, (f64 DPR:$b)))]>;
184 def VMULS : ASbIn<0b11100, 0b10, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
185 IIC_fpMUL32, "vmul", ".f32\t$dst, $a, $b",
186 [(set SPR:$dst, (fmul SPR:$a, SPR:$b))]>;
188 def VNMULD : ADbI<0b11100, 0b10, 1, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
189 IIC_fpMUL64, "vnmul", ".f64\t$dst, $a, $b",
190 [(set DPR:$dst, (fneg (fmul DPR:$a, (f64 DPR:$b))))]>;
192 def VNMULS : ASbI<0b11100, 0b10, 1, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
193 IIC_fpMUL32, "vnmul", ".f32\t$dst, $a, $b",
194 [(set SPR:$dst, (fneg (fmul SPR:$a, SPR:$b)))]>;
196 // Match reassociated forms only if not sign dependent rounding.
197 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
198 (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
199 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
200 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
203 def VSUBD : ADbI<0b11100, 0b11, 1, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
204 IIC_fpALU64, "vsub", ".f64\t$dst, $a, $b",
205 [(set DPR:$dst, (fsub DPR:$a, (f64 DPR:$b)))]>;
207 def VSUBS : ASbIn<0b11100, 0b11, 1, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
208 IIC_fpALU32, "vsub", ".f32\t$dst, $a, $b",
209 [(set SPR:$dst, (fsub SPR:$a, SPR:$b))]>;
211 //===----------------------------------------------------------------------===//
212 // FP Unary Operations.
215 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
216 IIC_fpUNA64, "vabs", ".f64\t$dst, $a",
217 [(set DPR:$dst, (fabs (f64 DPR:$a)))]>;
219 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,(outs SPR:$dst), (ins SPR:$a),
220 IIC_fpUNA32, "vabs", ".f32\t$dst, $a",
221 [(set SPR:$dst, (fabs SPR:$a))]>;
223 let Defs = [FPSCR] in {
224 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins DPR:$a),
225 IIC_fpCMP64, "vcmpe", ".f64\t$a, #0",
226 [(arm_cmpfp0 (f64 DPR:$a))]>;
228 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins DPR:$a),
229 IIC_fpCMP64, "vcmp", ".f64\t$a, #0",
230 [/* For disassembly only; pattern left blank */]>;
232 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins SPR:$a),
233 IIC_fpCMP32, "vcmpe", ".f32\t$a, #0",
234 [(arm_cmpfp0 SPR:$a)]>;
236 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins SPR:$a),
237 IIC_fpCMP32, "vcmp", ".f32\t$a, #0",
238 [/* For disassembly only; pattern left blank */]>;
241 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0, (outs DPR:$dst), (ins SPR:$a),
242 IIC_fpCVTDS, "vcvt", ".f64.f32\t$dst, $a",
243 [(set DPR:$dst, (fextend SPR:$a))]>;
245 // Special case encoding: bits 11-8 is 0b1011.
246 def VCVTSD : VFPAI<(outs SPR:$dst), (ins DPR:$a), VFPUnaryFrm,
247 IIC_fpCVTSD, "vcvt", ".f32.f64\t$dst, $a",
248 [(set SPR:$dst, (fround DPR:$a))]> {
249 let Inst{27-23} = 0b11101;
250 let Inst{21-16} = 0b110111;
251 let Inst{11-8} = 0b1011;
252 let Inst{7-6} = 0b11;
256 // Between half-precision and single-precision. For disassembly only.
258 def VCVTBSH : ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
259 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$dst, $a",
260 [/* For disassembly only; pattern left blank */]>;
262 def : ARMPat<(f32_to_f16 SPR:$a),
263 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
265 def VCVTBHS : ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
266 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$dst, $a",
267 [/* For disassembly only; pattern left blank */]>;
269 def : ARMPat<(f16_to_f32 GPR:$a),
270 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
272 def VCVTTSH : ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
273 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$dst, $a",
274 [/* For disassembly only; pattern left blank */]>;
276 def VCVTTHS : ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
277 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$dst, $a",
278 [/* For disassembly only; pattern left blank */]>;
280 let neverHasSideEffects = 1 in {
281 def VMOVD: ADuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
282 IIC_fpUNA64, "vmov", ".f64\t$dst, $a", []>;
284 def VMOVS: ASuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
285 IIC_fpUNA32, "vmov", ".f32\t$dst, $a", []>;
286 } // neverHasSideEffects
288 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
289 IIC_fpUNA64, "vneg", ".f64\t$dst, $a",
290 [(set DPR:$dst, (fneg (f64 DPR:$a)))]>;
292 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,(outs SPR:$dst), (ins SPR:$a),
293 IIC_fpUNA32, "vneg", ".f32\t$dst, $a",
294 [(set SPR:$dst, (fneg SPR:$a))]>;
296 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
297 IIC_fpSQRT64, "vsqrt", ".f64\t$dst, $a",
298 [(set DPR:$dst, (fsqrt (f64 DPR:$a)))]>;
300 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
301 IIC_fpSQRT32, "vsqrt", ".f32\t$dst, $a",
302 [(set SPR:$dst, (fsqrt SPR:$a))]>;
304 //===----------------------------------------------------------------------===//
305 // FP <-> GPR Copies. Int <-> FP Conversions.
308 def VMOVRS : AVConv2I<0b11100001, 0b1010, (outs GPR:$dst), (ins SPR:$src),
309 IIC_VMOVSI, "vmov", "\t$dst, $src",
310 [(set GPR:$dst, (bitconvert SPR:$src))]>;
312 def VMOVSR : AVConv4I<0b11100000, 0b1010, (outs SPR:$dst), (ins GPR:$src),
313 IIC_VMOVIS, "vmov", "\t$dst, $src",
314 [(set SPR:$dst, (bitconvert GPR:$src))]>;
316 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
317 (outs GPR:$wb, GPR:$dst2), (ins DPR:$src),
318 IIC_VMOVDI, "vmov", "\t$wb, $dst2, $src",
319 [/* FIXME: Can't write pattern for multiple result instr*/]> {
320 let Inst{7-6} = 0b00;
323 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
324 (outs GPR:$wb, GPR:$dst2), (ins SPR:$src1, SPR:$src2),
325 IIC_VMOVDI, "vmov", "\t$wb, $dst2, $src1, $src2",
326 [/* For disassembly only; pattern left blank */]> {
327 let Inst{7-6} = 0b00;
333 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
334 (outs DPR:$dst), (ins GPR:$src1, GPR:$src2),
335 IIC_VMOVID, "vmov", "\t$dst, $src1, $src2",
336 [(set DPR:$dst, (arm_fmdrr GPR:$src1, GPR:$src2))]> {
337 let Inst{7-6} = 0b00;
340 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
341 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
342 IIC_VMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
343 [/* For disassembly only; pattern left blank */]> {
344 let Inst{7-6} = 0b00;
350 // FMRX : SPR system reg -> GPR
354 // FMXR: GPR -> VFP Sstem reg
359 def VSITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
360 (outs DPR:$dst), (ins SPR:$a),
361 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a",
362 [(set DPR:$dst, (f64 (arm_sitof SPR:$a)))]> {
363 let Inst{7} = 1; // s32
366 def VSITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
367 (outs SPR:$dst),(ins SPR:$a),
368 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a",
369 [(set SPR:$dst, (arm_sitof SPR:$a))]> {
370 let Inst{7} = 1; // s32
373 def VUITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
374 (outs DPR:$dst), (ins SPR:$a),
375 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a",
376 [(set DPR:$dst, (f64 (arm_uitof SPR:$a)))]> {
377 let Inst{7} = 0; // u32
380 def VUITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
381 (outs SPR:$dst), (ins SPR:$a),
382 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a",
383 [(set SPR:$dst, (arm_uitof SPR:$a))]> {
384 let Inst{7} = 0; // u32
388 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
390 def VTOSIZD : AVConv1I<0b11101, 0b11, 0b1101, 0b1011,
391 (outs SPR:$dst), (ins DPR:$a),
392 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a",
393 [(set SPR:$dst, (arm_ftosi (f64 DPR:$a)))]> {
394 let Inst{7} = 1; // Z bit
397 def VTOSIZS : AVConv1In<0b11101, 0b11, 0b1101, 0b1010,
398 (outs SPR:$dst), (ins SPR:$a),
399 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a",
400 [(set SPR:$dst, (arm_ftosi SPR:$a))]> {
401 let Inst{7} = 1; // Z bit
404 def VTOUIZD : AVConv1I<0b11101, 0b11, 0b1100, 0b1011,
405 (outs SPR:$dst), (ins DPR:$a),
406 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a",
407 [(set SPR:$dst, (arm_ftoui (f64 DPR:$a)))]> {
408 let Inst{7} = 1; // Z bit
411 def VTOUIZS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
412 (outs SPR:$dst), (ins SPR:$a),
413 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a",
414 [(set SPR:$dst, (arm_ftoui SPR:$a))]> {
415 let Inst{7} = 1; // Z bit
418 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
419 // For disassembly only.
421 def VTOSIRD : AVConv1I<0b11101, 0b11, 0b1101, 0b1011,
422 (outs SPR:$dst), (ins DPR:$a),
423 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$dst, $a",
424 [/* For disassembly only; pattern left blank */]> {
425 let Inst{7} = 0; // Z bit
428 def VTOSIRS : AVConv1In<0b11101, 0b11, 0b1101, 0b1010,
429 (outs SPR:$dst), (ins SPR:$a),
430 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$dst, $a",
431 [/* For disassembly only; pattern left blank */]> {
432 let Inst{7} = 0; // Z bit
435 def VTOUIRD : AVConv1I<0b11101, 0b11, 0b1100, 0b1011,
436 (outs SPR:$dst), (ins DPR:$a),
437 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$dst, $a",
438 [/* For disassembly only; pattern left blank */]> {
439 let Inst{7} = 0; // Z bit
442 def VTOUIRS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
443 (outs SPR:$dst), (ins SPR:$a),
444 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$dst, $a",
445 [/* For disassembly only; pattern left blank */]> {
446 let Inst{7} = 0; // Z bit
449 // Convert between floating-point and fixed-point
450 // Data type for fixed-point naming convention:
451 // S16 (U=0, sx=0) -> SH
452 // U16 (U=1, sx=0) -> UH
453 // S32 (U=0, sx=1) -> SL
454 // U32 (U=1, sx=1) -> UL
456 let Constraints = "$a = $dst" in {
458 // FP to Fixed-Point:
460 def VTOSHS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 0,
461 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
462 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits",
463 [/* For disassembly only; pattern left blank */]>;
465 def VTOUHS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 0,
466 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
467 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits",
468 [/* For disassembly only; pattern left blank */]>;
470 def VTOSLS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 1,
471 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
472 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits",
473 [/* For disassembly only; pattern left blank */]>;
475 def VTOULS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 1,
476 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
477 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits",
478 [/* For disassembly only; pattern left blank */]>;
480 def VTOSHD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 0,
481 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
482 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits",
483 [/* For disassembly only; pattern left blank */]>;
485 def VTOUHD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 0,
486 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
487 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits",
488 [/* For disassembly only; pattern left blank */]>;
490 def VTOSLD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 1,
491 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
492 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits",
493 [/* For disassembly only; pattern left blank */]>;
495 def VTOULD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 1,
496 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
497 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits",
498 [/* For disassembly only; pattern left blank */]>;
500 // Fixed-Point to FP:
502 def VSHTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 0,
503 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
504 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits",
505 [/* For disassembly only; pattern left blank */]>;
507 def VUHTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 0,
508 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
509 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits",
510 [/* For disassembly only; pattern left blank */]>;
512 def VSLTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 1,
513 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
514 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits",
515 [/* For disassembly only; pattern left blank */]>;
517 def VULTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 1,
518 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
519 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits",
520 [/* For disassembly only; pattern left blank */]>;
522 def VSHTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 0,
523 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
524 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits",
525 [/* For disassembly only; pattern left blank */]>;
527 def VUHTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 0,
528 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
529 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits",
530 [/* For disassembly only; pattern left blank */]>;
532 def VSLTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 1,
533 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
534 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits",
535 [/* For disassembly only; pattern left blank */]>;
537 def VULTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 1,
538 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
539 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits",
540 [/* For disassembly only; pattern left blank */]>;
542 } // End of 'let Constraints = "$src = $dst" in'
544 //===----------------------------------------------------------------------===//
545 // FP FMA Operations.
548 def VMLAD : ADbI_vmlX<0b11100, 0b00, 0, 0,
549 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
550 IIC_fpMAC64, "vmla", ".f64\t$dst, $a, $b",
551 [(set DPR:$dst, (fadd (fmul DPR:$a, DPR:$b),
552 (f64 DPR:$dstin)))]>,
553 RegConstraint<"$dstin = $dst">;
555 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
556 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
557 IIC_fpMAC32, "vmla", ".f32\t$dst, $a, $b",
558 [(set SPR:$dst, (fadd (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
559 RegConstraint<"$dstin = $dst">;
561 def VNMLSD : ADbI_vmlX<0b11100, 0b01, 0, 0,
562 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
563 IIC_fpMAC64, "vnmls", ".f64\t$dst, $a, $b",
564 [(set DPR:$dst, (fsub (fmul DPR:$a, DPR:$b),
565 (f64 DPR:$dstin)))]>,
566 RegConstraint<"$dstin = $dst">;
568 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
569 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
570 IIC_fpMAC32, "vnmls", ".f32\t$dst, $a, $b",
571 [(set SPR:$dst, (fsub (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
572 RegConstraint<"$dstin = $dst">;
574 def VMLSD : ADbI_vmlX<0b11100, 0b00, 1, 0,
575 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
576 IIC_fpMAC64, "vmls", ".f64\t$dst, $a, $b",
577 [(set DPR:$dst, (fadd (fneg (fmul DPR:$a, DPR:$b)),
578 (f64 DPR:$dstin)))]>,
579 RegConstraint<"$dstin = $dst">;
581 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
582 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
583 IIC_fpMAC32, "vmls", ".f32\t$dst, $a, $b",
584 [(set SPR:$dst, (fadd (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
585 RegConstraint<"$dstin = $dst">;
587 def : Pat<(fsub DPR:$dstin, (fmul DPR:$a, (f64 DPR:$b))),
588 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>, Requires<[DontUseNEONForFP]>;
589 def : Pat<(fsub SPR:$dstin, (fmul SPR:$a, SPR:$b)),
590 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>, Requires<[DontUseNEONForFP]>;
592 def VNMLAD : ADbI_vmlX<0b11100, 0b01, 1, 0,
593 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
594 IIC_fpMAC64, "vnmla", ".f64\t$dst, $a, $b",
595 [(set DPR:$dst, (fsub (fneg (fmul DPR:$a, DPR:$b)),
596 (f64 DPR:$dstin)))]>,
597 RegConstraint<"$dstin = $dst">;
599 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
600 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
601 IIC_fpMAC32, "vnmla", ".f32\t$dst, $a, $b",
602 [(set SPR:$dst, (fsub (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
603 RegConstraint<"$dstin = $dst">;
605 //===----------------------------------------------------------------------===//
606 // FP Conditional moves.
609 def VMOVDcc : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
610 (outs DPR:$dst), (ins DPR:$false, DPR:$true),
611 IIC_fpUNA64, "vmov", ".f64\t$dst, $true",
612 [/*(set DPR:$dst, (ARMcmov DPR:$false, DPR:$true, imm:$cc))*/]>,
613 RegConstraint<"$false = $dst">;
615 def VMOVScc : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
616 (outs SPR:$dst), (ins SPR:$false, SPR:$true),
617 IIC_fpUNA32, "vmov", ".f32\t$dst, $true",
618 [/*(set SPR:$dst, (ARMcmov SPR:$false, SPR:$true, imm:$cc))*/]>,
619 RegConstraint<"$false = $dst">;
621 def VNEGDcc : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
622 (outs DPR:$dst), (ins DPR:$false, DPR:$true),
623 IIC_fpUNA64, "vneg", ".f64\t$dst, $true",
624 [/*(set DPR:$dst, (ARMcneg DPR:$false, DPR:$true, imm:$cc))*/]>,
625 RegConstraint<"$false = $dst">;
627 def VNEGScc : ASuI<0b11101, 0b11, 0b0001, 0b01, 0,
628 (outs SPR:$dst), (ins SPR:$false, SPR:$true),
629 IIC_fpUNA32, "vneg", ".f32\t$dst, $true",
630 [/*(set SPR:$dst, (ARMcneg SPR:$false, SPR:$true, imm:$cc))*/]>,
631 RegConstraint<"$false = $dst">;
634 //===----------------------------------------------------------------------===//
638 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
640 let Defs = [CPSR], Uses = [FPSCR] in
641 def FMSTAT : VFPAI<(outs), (ins), VFPMiscFrm, IIC_fpSTAT, "vmrs",
642 "\tapsr_nzcv, fpscr",
644 let Inst{27-20} = 0b11101111;
645 let Inst{19-16} = 0b0001;
646 let Inst{15-12} = 0b1111;
647 let Inst{11-8} = 0b1010;
652 // FPSCR <-> GPR (for disassembly only)
654 let Uses = [FPSCR] in {
655 def VMRS : VFPAI<(outs GPR:$dst), (ins), VFPMiscFrm, IIC_fpSTAT, "vmrs",
657 [/* For disassembly only; pattern left blank */]> {
658 let Inst{27-20} = 0b11101111;
659 let Inst{19-16} = 0b0001;
660 let Inst{11-8} = 0b1010;
666 let Defs = [FPSCR] in {
667 def VMSR : VFPAI<(outs), (ins GPR:$src), VFPMiscFrm, IIC_fpSTAT, "vmsr",
669 [/* For disassembly only; pattern left blank */]> {
670 let Inst{27-20} = 0b11101110;
671 let Inst{19-16} = 0b0001;
672 let Inst{11-8} = 0b1010;
678 // Materialize FP immediates. VFP3 only.
679 let isReMaterializable = 1 in {
680 def FCONSTD : VFPAI<(outs DPR:$dst), (ins vfp_f64imm:$imm),
681 VFPMiscFrm, IIC_fpUNA64,
682 "vmov", ".f64\t$dst, $imm",
683 [(set DPR:$dst, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
684 let Inst{27-23} = 0b11101;
685 let Inst{21-20} = 0b11;
686 let Inst{11-9} = 0b101;
688 let Inst{7-4} = 0b0000;
691 def FCONSTS : VFPAI<(outs SPR:$dst), (ins vfp_f32imm:$imm),
692 VFPMiscFrm, IIC_fpUNA32,
693 "vmov", ".f32\t$dst, $imm",
694 [(set SPR:$dst, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
695 let Inst{27-23} = 0b11101;
696 let Inst{21-20} = 0b11;
697 let Inst{11-9} = 0b101;
699 let Inst{7-4} = 0b0000;