1 //===- ARMInstrVFP.td - VFP support for ARM -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM VFP instruction set.
12 //===----------------------------------------------------------------------===//
15 SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
17 SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
19 SDTypeProfile<0, 1, [SDTCisFP<0>]>;
21 SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
24 def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
25 def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
26 def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
27 def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
28 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInFlag,SDNPOutFlag]>;
29 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutFlag]>;
30 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0",SDT_CMPFP0, [SDNPOutFlag]>;
31 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
33 //===----------------------------------------------------------------------===//
34 // Operand Definitions.
38 def vfp_f32imm : Operand<f32>,
39 PatLeaf<(f32 fpimm), [{
40 return ARM::getVFPf32Imm(N->getValueAPF()) != -1;
42 let PrintMethod = "printVFPf32ImmOperand";
45 def vfp_f64imm : Operand<f64>,
46 PatLeaf<(f64 fpimm), [{
47 return ARM::getVFPf64Imm(N->getValueAPF()) != -1;
49 let PrintMethod = "printVFPf64ImmOperand";
53 //===----------------------------------------------------------------------===//
54 // Load / store Instructions.
57 let canFoldAsLoad = 1, isReMaterializable = 1 in {
58 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$dst), (ins addrmode5:$addr),
59 IIC_fpLoad64, "vldr", ".64\t$dst, $addr",
60 [(set DPR:$dst, (f64 (load addrmode5:$addr)))]>;
62 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$dst), (ins addrmode5:$addr),
63 IIC_fpLoad32, "vldr", ".32\t$dst, $addr",
64 [(set SPR:$dst, (load addrmode5:$addr))]>;
67 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$src, addrmode5:$addr),
68 IIC_fpStore64, "vstr", ".64\t$src, $addr",
69 [(store (f64 DPR:$src), addrmode5:$addr)]>;
71 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$src, addrmode5:$addr),
72 IIC_fpStore32, "vstr", ".32\t$src, $addr",
73 [(store SPR:$src, addrmode5:$addr)]>;
75 //===----------------------------------------------------------------------===//
76 // Load / store multiple Instructions.
79 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
80 def VLDMD : AXDI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$dsts,
81 variable_ops), IndexModeNone, IIC_fpLoad_m,
82 "vldm${addr:submode}${p}\t$addr, $dsts", "", []> {
86 def VLDMS : AXSI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$dsts,
87 variable_ops), IndexModeNone, IIC_fpLoad_m,
88 "vldm${addr:submode}${p}\t$addr, $dsts", "", []> {
92 def VLDMD_UPD : AXDI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
93 reglist:$dsts, variable_ops),
94 IndexModeUpd, IIC_fpLoad_mu,
95 "vldm${addr:submode}${p}\t$addr!, $dsts",
96 "$addr.addr = $wb", []> {
100 def VLDMS_UPD : AXSI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
101 reglist:$dsts, variable_ops),
102 IndexModeUpd, IIC_fpLoad_mu,
103 "vldm${addr:submode}${p}\t$addr!, $dsts",
104 "$addr.addr = $wb", []> {
107 } // mayLoad, neverHasSideEffects, hasExtraDefRegAllocReq
109 let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
110 def VSTMD : AXDI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$srcs,
111 variable_ops), IndexModeNone, IIC_fpStore_m,
112 "vstm${addr:submode}${p}\t$addr, $srcs", "", []> {
116 def VSTMS : AXSI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$srcs,
117 variable_ops), IndexModeNone, IIC_fpStore_m,
118 "vstm${addr:submode}${p}\t$addr, $srcs", "", []> {
122 def VSTMD_UPD : AXDI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
123 reglist:$srcs, variable_ops),
124 IndexModeUpd, IIC_fpStore_mu,
125 "vstm${addr:submode}${p}\t$addr!, $srcs",
126 "$addr.addr = $wb", []> {
130 def VSTMS_UPD : AXSI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
131 reglist:$srcs, variable_ops),
132 IndexModeUpd, IIC_fpStore_mu,
133 "vstm${addr:submode}${p}\t$addr!, $srcs",
134 "$addr.addr = $wb", []> {
137 } // mayStore, neverHasSideEffects, hasExtraSrcRegAllocReq
139 // FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
142 // FIXME: Can these be placed into the base class?
143 class ADbI_Encode<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops,
144 dag iops, InstrItinClass itin, string opc, string asm,
146 : ADbI<opcod1, opcod2, op6, op4, oops, iops, itin, opc, asm, pattern> {
147 // Instruction operands.
152 // Encode instruction operands.
153 let Inst{3-0} = Dm{3-0};
155 let Inst{19-16} = Dn{3-0};
157 let Inst{15-12} = Dd{3-0};
158 let Inst{22} = Dd{4};
161 class ASbI_Encode<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops,
162 dag iops, InstrItinClass itin, string opc, string asm,
164 : ASbI<opcod1, opcod2, op6, op4, oops, iops, itin, opc, asm, pattern> {
165 // Instruction operands.
170 // Encode instruction operands.
171 let Inst{3-0} = Sm{4-1};
173 let Inst{19-16} = Sn{4-1};
175 let Inst{15-12} = Sd{4-1};
176 let Inst{22} = Sd{0};
179 class ASbIn_Encode<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops,
180 dag iops, InstrItinClass itin, string opc, string asm,
182 : ASbIn<opcod1, opcod2, op6, op4, oops, iops, itin, opc, asm, pattern> {
183 // Instruction operands.
188 // Encode instruction operands.
189 let Inst{3-0} = Sm{4-1};
191 let Inst{19-16} = Sn{4-1};
193 let Inst{15-12} = Sd{4-1};
194 let Inst{22} = Sd{0};
198 //===----------------------------------------------------------------------===//
199 // FP Binary Operations.
202 def VADDD : ADbI_Encode<0b11100, 0b11, 0, 0,
203 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
204 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
205 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>;
207 def VADDS : ASbIn_Encode<0b11100, 0b11, 0, 0,
208 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
209 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
210 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]>;
212 def VSUBD : ADbI_Encode<0b11100, 0b11, 1, 0,
213 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
214 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
215 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>;
217 def VSUBS : ASbIn_Encode<0b11100, 0b11, 1, 0,
218 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
219 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
220 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]>;
222 def VDIVD : ADbI_Encode<0b11101, 0b00, 0, 0,
223 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
224 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
225 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>;
227 def VDIVS : ASbI_Encode<0b11101, 0b00, 0, 0,
228 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
229 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
230 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>;
232 def VMULD : ADbI_Encode<0b11100, 0b10, 0, 0,
233 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
234 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
235 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>;
237 def VMULS : ASbIn_Encode<0b11100, 0b10, 0, 0,
238 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
239 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
240 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]>;
242 def VNMULD : ADbI<0b11100, 0b10, 1, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
243 IIC_fpMUL64, "vnmul", ".f64\t$dst, $a, $b",
244 [(set DPR:$dst, (fneg (fmul DPR:$a, (f64 DPR:$b))))]>;
246 def VNMULS : ASbI<0b11100, 0b10, 1, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
247 IIC_fpMUL32, "vnmul", ".f32\t$dst, $a, $b",
248 [(set SPR:$dst, (fneg (fmul SPR:$a, SPR:$b)))]>;
250 // Match reassociated forms only if not sign dependent rounding.
251 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
252 (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
253 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
254 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
257 // These are encoded as unary instructions.
258 let Defs = [FPSCR] in {
259 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs),(ins DPR:$Dd, DPR:$Dm),
260 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
261 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]> {
262 // Instruction operands.
266 // Encode instruction operands.
267 let Inst{3-0} = Dm{3-0};
269 let Inst{15-12} = Dd{3-0};
270 let Inst{22} = Dd{4};
273 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs),(ins SPR:$Sd, SPR:$Sm),
274 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm",
275 [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> {
276 // Instruction operands.
280 // Encode instruction operands.
281 let Inst{3-0} = Sm{4-1};
283 let Inst{15-12} = Sd{4-1};
284 let Inst{22} = Sd{0};
287 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins DPR:$a, DPR:$b),
288 IIC_fpCMP64, "vcmp", ".f64\t$a, $b",
289 [/* For disassembly only; pattern left blank */]>;
291 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins SPR:$a, SPR:$b),
292 IIC_fpCMP32, "vcmp", ".f32\t$a, $b",
293 [/* For disassembly only; pattern left blank */]>;
296 //===----------------------------------------------------------------------===//
297 // FP Unary Operations.
300 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
301 IIC_fpUNA64, "vabs", ".f64\t$dst, $a",
302 [(set DPR:$dst, (fabs (f64 DPR:$a)))]>;
304 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,(outs SPR:$dst), (ins SPR:$a),
305 IIC_fpUNA32, "vabs", ".f32\t$dst, $a",
306 [(set SPR:$dst, (fabs SPR:$a))]>;
308 let Defs = [FPSCR] in {
309 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins DPR:$a),
310 IIC_fpCMP64, "vcmpe", ".f64\t$a, #0",
311 [(arm_cmpfp0 (f64 DPR:$a))]>;
313 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins DPR:$a),
314 IIC_fpCMP64, "vcmp", ".f64\t$a, #0",
315 [/* For disassembly only; pattern left blank */]>;
317 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins SPR:$a),
318 IIC_fpCMP32, "vcmpe", ".f32\t$a, #0",
319 [(arm_cmpfp0 SPR:$a)]>;
321 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins SPR:$a),
322 IIC_fpCMP32, "vcmp", ".f32\t$a, #0",
323 [/* For disassembly only; pattern left blank */]>;
326 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0, (outs DPR:$dst), (ins SPR:$a),
327 IIC_fpCVTDS, "vcvt", ".f64.f32\t$dst, $a",
328 [(set DPR:$dst, (fextend SPR:$a))]>;
330 // Special case encoding: bits 11-8 is 0b1011.
331 def VCVTSD : VFPAI<(outs SPR:$dst), (ins DPR:$a), VFPUnaryFrm,
332 IIC_fpCVTSD, "vcvt", ".f32.f64\t$dst, $a",
333 [(set SPR:$dst, (fround DPR:$a))]> {
334 let Inst{27-23} = 0b11101;
335 let Inst{21-16} = 0b110111;
336 let Inst{11-8} = 0b1011;
337 let Inst{7-6} = 0b11;
341 // Between half-precision and single-precision. For disassembly only.
343 def VCVTBSH: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
344 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$dst, $a",
345 [/* For disassembly only; pattern left blank */]>;
347 def : ARMPat<(f32_to_f16 SPR:$a),
348 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
350 def VCVTBHS: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
351 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$dst, $a",
352 [/* For disassembly only; pattern left blank */]>;
354 def : ARMPat<(f16_to_f32 GPR:$a),
355 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
357 def VCVTTSH: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
358 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$dst, $a",
359 [/* For disassembly only; pattern left blank */]>;
361 def VCVTTHS: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
362 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$dst, $a",
363 [/* For disassembly only; pattern left blank */]>;
365 let neverHasSideEffects = 1 in {
366 def VMOVD: ADuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
367 IIC_fpUNA64, "vmov", ".f64\t$dst, $a", []>;
369 def VMOVS: ASuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
370 IIC_fpUNA32, "vmov", ".f32\t$dst, $a", []>;
371 } // neverHasSideEffects
373 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
374 IIC_fpUNA64, "vneg", ".f64\t$dst, $a",
375 [(set DPR:$dst, (fneg (f64 DPR:$a)))]>;
377 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,(outs SPR:$dst), (ins SPR:$a),
378 IIC_fpUNA32, "vneg", ".f32\t$dst, $a",
379 [(set SPR:$dst, (fneg SPR:$a))]>;
381 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
382 IIC_fpSQRT64, "vsqrt", ".f64\t$dst, $a",
383 [(set DPR:$dst, (fsqrt (f64 DPR:$a)))]>;
385 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
386 IIC_fpSQRT32, "vsqrt", ".f32\t$dst, $a",
387 [(set SPR:$dst, (fsqrt SPR:$a))]>;
389 //===----------------------------------------------------------------------===//
390 // FP <-> GPR Copies. Int <-> FP Conversions.
393 def VMOVRS : AVConv2I<0b11100001, 0b1010, (outs GPR:$dst), (ins SPR:$src),
394 IIC_fpMOVSI, "vmov", "\t$dst, $src",
395 [(set GPR:$dst, (bitconvert SPR:$src))]>;
397 def VMOVSR : AVConv4I<0b11100000, 0b1010, (outs SPR:$dst), (ins GPR:$src),
398 IIC_fpMOVIS, "vmov", "\t$dst, $src",
399 [(set SPR:$dst, (bitconvert GPR:$src))]>;
401 let neverHasSideEffects = 1 in {
402 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
403 (outs GPR:$wb, GPR:$dst2), (ins DPR:$src),
404 IIC_fpMOVDI, "vmov", "\t$wb, $dst2, $src",
405 [/* FIXME: Can't write pattern for multiple result instr*/]> {
406 let Inst{7-6} = 0b00;
409 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
410 (outs GPR:$wb, GPR:$dst2), (ins SPR:$src1, SPR:$src2),
411 IIC_fpMOVDI, "vmov", "\t$wb, $dst2, $src1, $src2",
412 [/* For disassembly only; pattern left blank */]> {
413 let Inst{7-6} = 0b00;
415 } // neverHasSideEffects
420 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
421 (outs DPR:$dst), (ins GPR:$src1, GPR:$src2),
422 IIC_fpMOVID, "vmov", "\t$dst, $src1, $src2",
423 [(set DPR:$dst, (arm_fmdrr GPR:$src1, GPR:$src2))]> {
424 let Inst{7-6} = 0b00;
427 let neverHasSideEffects = 1 in
428 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
429 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
430 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
431 [/* For disassembly only; pattern left blank */]> {
432 let Inst{7-6} = 0b00;
438 // FMRX : SPR system reg -> GPR
442 // FMXR: GPR -> VFP system reg
447 def VSITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
448 (outs DPR:$dst), (ins SPR:$a),
449 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a",
450 [(set DPR:$dst, (f64 (arm_sitof SPR:$a)))]> {
451 let Inst{7} = 1; // s32
454 def VSITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
455 (outs SPR:$dst),(ins SPR:$a),
456 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a",
457 [(set SPR:$dst, (arm_sitof SPR:$a))]> {
458 let Inst{7} = 1; // s32
461 def VUITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
462 (outs DPR:$dst), (ins SPR:$a),
463 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a",
464 [(set DPR:$dst, (f64 (arm_uitof SPR:$a)))]> {
465 let Inst{7} = 0; // u32
468 def VUITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
469 (outs SPR:$dst), (ins SPR:$a),
470 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a",
471 [(set SPR:$dst, (arm_uitof SPR:$a))]> {
472 let Inst{7} = 0; // u32
476 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
478 def VTOSIZD : AVConv1I<0b11101, 0b11, 0b1101, 0b1011,
479 (outs SPR:$dst), (ins DPR:$a),
480 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a",
481 [(set SPR:$dst, (arm_ftosi (f64 DPR:$a)))]> {
482 let Inst{7} = 1; // Z bit
485 def VTOSIZS : AVConv1In<0b11101, 0b11, 0b1101, 0b1010,
486 (outs SPR:$dst), (ins SPR:$a),
487 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a",
488 [(set SPR:$dst, (arm_ftosi SPR:$a))]> {
489 let Inst{7} = 1; // Z bit
492 def VTOUIZD : AVConv1I<0b11101, 0b11, 0b1100, 0b1011,
493 (outs SPR:$dst), (ins DPR:$a),
494 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a",
495 [(set SPR:$dst, (arm_ftoui (f64 DPR:$a)))]> {
496 let Inst{7} = 1; // Z bit
499 def VTOUIZS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
500 (outs SPR:$dst), (ins SPR:$a),
501 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a",
502 [(set SPR:$dst, (arm_ftoui SPR:$a))]> {
503 let Inst{7} = 1; // Z bit
506 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
507 // For disassembly only.
508 let Uses = [FPSCR] in {
509 def VTOSIRD : AVConv1I<0b11101, 0b11, 0b1101, 0b1011,
510 (outs SPR:$dst), (ins DPR:$a),
511 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$dst, $a",
512 [(set SPR:$dst, (int_arm_vcvtr (f64 DPR:$a)))]> {
513 let Inst{7} = 0; // Z bit
516 def VTOSIRS : AVConv1In<0b11101, 0b11, 0b1101, 0b1010,
517 (outs SPR:$dst), (ins SPR:$a),
518 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$dst, $a",
519 [(set SPR:$dst, (int_arm_vcvtr SPR:$a))]> {
520 let Inst{7} = 0; // Z bit
523 def VTOUIRD : AVConv1I<0b11101, 0b11, 0b1100, 0b1011,
524 (outs SPR:$dst), (ins DPR:$a),
525 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$dst, $a",
526 [(set SPR:$dst, (int_arm_vcvtru (f64 DPR:$a)))]> {
527 let Inst{7} = 0; // Z bit
530 def VTOUIRS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
531 (outs SPR:$dst), (ins SPR:$a),
532 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$dst, $a",
533 [(set SPR:$dst, (int_arm_vcvtru SPR:$a))]> {
534 let Inst{7} = 0; // Z bit
538 // Convert between floating-point and fixed-point
539 // Data type for fixed-point naming convention:
540 // S16 (U=0, sx=0) -> SH
541 // U16 (U=1, sx=0) -> UH
542 // S32 (U=0, sx=1) -> SL
543 // U32 (U=1, sx=1) -> UL
545 let Constraints = "$a = $dst" in {
547 // FP to Fixed-Point:
549 let isCodeGenOnly = 1 in {
550 def VTOSHS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 0,
551 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
552 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits",
553 [/* For disassembly only; pattern left blank */]>;
555 def VTOUHS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 0,
556 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
557 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits",
558 [/* For disassembly only; pattern left blank */]>;
560 def VTOSLS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 1,
561 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
562 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits",
563 [/* For disassembly only; pattern left blank */]>;
565 def VTOULS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 1,
566 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
567 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits",
568 [/* For disassembly only; pattern left blank */]>;
570 def VTOSHD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 0,
571 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
572 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits",
573 [/* For disassembly only; pattern left blank */]>;
575 def VTOUHD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 0,
576 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
577 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits",
578 [/* For disassembly only; pattern left blank */]>;
580 def VTOSLD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 1,
581 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
582 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits",
583 [/* For disassembly only; pattern left blank */]>;
585 def VTOULD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 1,
586 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
587 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits",
588 [/* For disassembly only; pattern left blank */]>;
591 // Fixed-Point to FP:
593 let isCodeGenOnly = 1 in {
594 def VSHTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 0,
595 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
596 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits",
597 [/* For disassembly only; pattern left blank */]>;
599 def VUHTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 0,
600 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
601 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits",
602 [/* For disassembly only; pattern left blank */]>;
604 def VSLTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 1,
605 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
606 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits",
607 [/* For disassembly only; pattern left blank */]>;
609 def VULTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 1,
610 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
611 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits",
612 [/* For disassembly only; pattern left blank */]>;
614 def VSHTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 0,
615 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
616 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits",
617 [/* For disassembly only; pattern left blank */]>;
619 def VUHTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 0,
620 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
621 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits",
622 [/* For disassembly only; pattern left blank */]>;
624 def VSLTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 1,
625 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
626 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits",
627 [/* For disassembly only; pattern left blank */]>;
629 def VULTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 1,
630 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
631 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits",
632 [/* For disassembly only; pattern left blank */]>;
635 } // End of 'let Constraints = "$src = $dst" in'
637 //===----------------------------------------------------------------------===//
638 // FP FMA Operations.
641 def VMLAD : ADbI_vmlX<0b11100, 0b00, 0, 0,
642 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
643 IIC_fpMAC64, "vmla", ".f64\t$dst, $a, $b",
644 [(set DPR:$dst, (fadd (fmul DPR:$a, DPR:$b),
645 (f64 DPR:$dstin)))]>,
646 RegConstraint<"$dstin = $dst">;
648 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
649 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
650 IIC_fpMAC32, "vmla", ".f32\t$dst, $a, $b",
651 [(set SPR:$dst, (fadd (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
652 RegConstraint<"$dstin = $dst">;
654 def VNMLSD : ADbI_vmlX<0b11100, 0b01, 0, 0,
655 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
656 IIC_fpMAC64, "vnmls", ".f64\t$dst, $a, $b",
657 [(set DPR:$dst, (fsub (fmul DPR:$a, DPR:$b),
658 (f64 DPR:$dstin)))]>,
659 RegConstraint<"$dstin = $dst">;
661 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
662 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
663 IIC_fpMAC32, "vnmls", ".f32\t$dst, $a, $b",
664 [(set SPR:$dst, (fsub (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
665 RegConstraint<"$dstin = $dst">;
667 def VMLSD : ADbI_vmlX<0b11100, 0b00, 1, 0,
668 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
669 IIC_fpMAC64, "vmls", ".f64\t$dst, $a, $b",
670 [(set DPR:$dst, (fadd (fneg (fmul DPR:$a, DPR:$b)),
671 (f64 DPR:$dstin)))]>,
672 RegConstraint<"$dstin = $dst">;
674 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
675 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
676 IIC_fpMAC32, "vmls", ".f32\t$dst, $a, $b",
677 [(set SPR:$dst, (fadd (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
678 RegConstraint<"$dstin = $dst">;
680 def : Pat<(fsub DPR:$dstin, (fmul DPR:$a, (f64 DPR:$b))),
681 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>, Requires<[DontUseNEONForFP]>;
682 def : Pat<(fsub SPR:$dstin, (fmul SPR:$a, SPR:$b)),
683 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>, Requires<[DontUseNEONForFP]>;
685 def VNMLAD : ADbI_vmlX<0b11100, 0b01, 1, 0,
686 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
687 IIC_fpMAC64, "vnmla", ".f64\t$dst, $a, $b",
688 [(set DPR:$dst, (fsub (fneg (fmul DPR:$a, DPR:$b)),
689 (f64 DPR:$dstin)))]>,
690 RegConstraint<"$dstin = $dst">;
692 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
693 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
694 IIC_fpMAC32, "vnmla", ".f32\t$dst, $a, $b",
695 [(set SPR:$dst, (fsub (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
696 RegConstraint<"$dstin = $dst">;
698 //===----------------------------------------------------------------------===//
699 // FP Conditional moves.
702 let neverHasSideEffects = 1 in {
703 def VMOVDcc : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
704 (outs DPR:$dst), (ins DPR:$false, DPR:$true),
705 IIC_fpUNA64, "vmov", ".f64\t$dst, $true",
706 [/*(set DPR:$dst, (ARMcmov DPR:$false, DPR:$true, imm:$cc))*/]>,
707 RegConstraint<"$false = $dst">;
709 def VMOVScc : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
710 (outs SPR:$dst), (ins SPR:$false, SPR:$true),
711 IIC_fpUNA32, "vmov", ".f32\t$dst, $true",
712 [/*(set SPR:$dst, (ARMcmov SPR:$false, SPR:$true, imm:$cc))*/]>,
713 RegConstraint<"$false = $dst">;
715 def VNEGDcc : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
716 (outs DPR:$dst), (ins DPR:$false, DPR:$true),
717 IIC_fpUNA64, "vneg", ".f64\t$dst, $true",
718 [/*(set DPR:$dst, (ARMcneg DPR:$false, DPR:$true, imm:$cc))*/]>,
719 RegConstraint<"$false = $dst">;
721 def VNEGScc : ASuI<0b11101, 0b11, 0b0001, 0b01, 0,
722 (outs SPR:$dst), (ins SPR:$false, SPR:$true),
723 IIC_fpUNA32, "vneg", ".f32\t$dst, $true",
724 [/*(set SPR:$dst, (ARMcneg SPR:$false, SPR:$true, imm:$cc))*/]>,
725 RegConstraint<"$false = $dst">;
726 } // neverHasSideEffects
728 //===----------------------------------------------------------------------===//
732 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
734 let Defs = [CPSR], Uses = [FPSCR] in
735 def FMSTAT : VFPAI<(outs), (ins), VFPMiscFrm, IIC_fpSTAT, "vmrs",
736 "\tapsr_nzcv, fpscr",
738 let Inst{27-20} = 0b11101111;
739 let Inst{19-16} = 0b0001;
740 let Inst{15-12} = 0b1111;
741 let Inst{11-8} = 0b1010;
746 // FPSCR <-> GPR (for disassembly only)
747 let hasSideEffects = 1, Uses = [FPSCR] in
748 def VMRS : VFPAI<(outs GPR:$dst), (ins), VFPMiscFrm, IIC_fpSTAT,
749 "vmrs", "\t$dst, fpscr",
750 [(set GPR:$dst, (int_arm_get_fpscr))]> {
751 let Inst{27-20} = 0b11101111;
752 let Inst{19-16} = 0b0001;
753 let Inst{11-8} = 0b1010;
758 let Defs = [FPSCR] in
759 def VMSR : VFPAI<(outs), (ins GPR:$src), VFPMiscFrm, IIC_fpSTAT,
760 "vmsr", "\tfpscr, $src",
761 [(int_arm_set_fpscr GPR:$src)]> {
762 let Inst{27-20} = 0b11101110;
763 let Inst{19-16} = 0b0001;
764 let Inst{11-8} = 0b1010;
769 // Materialize FP immediates. VFP3 only.
770 let isReMaterializable = 1 in {
771 def FCONSTD : VFPAI<(outs DPR:$dst), (ins vfp_f64imm:$imm),
772 VFPMiscFrm, IIC_fpUNA64,
773 "vmov", ".f64\t$dst, $imm",
774 [(set DPR:$dst, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
775 let Inst{27-23} = 0b11101;
776 let Inst{21-20} = 0b11;
777 let Inst{11-9} = 0b101;
779 let Inst{7-4} = 0b0000;
782 def FCONSTS : VFPAI<(outs SPR:$dst), (ins vfp_f32imm:$imm),
783 VFPMiscFrm, IIC_fpUNA32,
784 "vmov", ".f32\t$dst, $imm",
785 [(set SPR:$dst, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
786 let Inst{27-23} = 0b11101;
787 let Inst{21-20} = 0b11;
788 let Inst{11-9} = 0b101;
790 let Inst{7-4} = 0b0000;