1 //===- ARMInstrVFP.td - VFP support for ARM -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM VFP instruction set.
12 //===----------------------------------------------------------------------===//
15 SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
17 SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
19 SDTypeProfile<0, 1, [SDTCisFP<0>]>;
21 SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
24 def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
25 def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
26 def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
27 def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
28 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInFlag,SDNPOutFlag]>;
29 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutFlag]>;
30 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0",SDT_CMPFP0, [SDNPOutFlag]>;
31 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
33 //===----------------------------------------------------------------------===//
34 // Operand Definitions.
38 def vfp_f32imm : Operand<f32>,
39 PatLeaf<(f32 fpimm), [{
40 return ARM::getVFPf32Imm(N->getValueAPF()) != -1;
42 let PrintMethod = "printVFPf32ImmOperand";
45 def vfp_f64imm : Operand<f64>,
46 PatLeaf<(f64 fpimm), [{
47 return ARM::getVFPf64Imm(N->getValueAPF()) != -1;
49 let PrintMethod = "printVFPf64ImmOperand";
53 //===----------------------------------------------------------------------===//
54 // Load / store Instructions.
57 let canFoldAsLoad = 1, isReMaterializable = 1 in {
58 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$dst), (ins addrmode5:$addr),
59 IIC_fpLoad64, "vldr", ".64\t$dst, $addr",
60 [(set DPR:$dst, (f64 (load addrmode5:$addr)))]>;
62 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$dst), (ins addrmode5:$addr),
63 IIC_fpLoad32, "vldr", ".32\t$dst, $addr",
64 [(set SPR:$dst, (load addrmode5:$addr))]>;
67 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$src, addrmode5:$addr),
68 IIC_fpStore64, "vstr", ".64\t$src, $addr",
69 [(store (f64 DPR:$src), addrmode5:$addr)]>;
71 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$src, addrmode5:$addr),
72 IIC_fpStore32, "vstr", ".32\t$src, $addr",
73 [(store SPR:$src, addrmode5:$addr)]>;
75 //===----------------------------------------------------------------------===//
76 // Load / store multiple Instructions.
79 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
80 def VLDMD : AXDI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$dsts,
81 variable_ops), IndexModeNone, IIC_fpLoad_m,
82 "vldm${addr:submode}${p}\t$addr, $dsts", "", []> {
86 def VLDMS : AXSI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$dsts,
87 variable_ops), IndexModeNone, IIC_fpLoad_m,
88 "vldm${addr:submode}${p}\t$addr, $dsts", "", []> {
92 def VLDMD_UPD : AXDI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
93 reglist:$dsts, variable_ops),
94 IndexModeUpd, IIC_fpLoad_mu,
95 "vldm${addr:submode}${p}\t$addr!, $dsts",
96 "$addr.addr = $wb", []> {
100 def VLDMS_UPD : AXSI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
101 reglist:$dsts, variable_ops),
102 IndexModeUpd, IIC_fpLoad_mu,
103 "vldm${addr:submode}${p}\t$addr!, $dsts",
104 "$addr.addr = $wb", []> {
107 } // mayLoad, neverHasSideEffects, hasExtraDefRegAllocReq
109 let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
110 def VSTMD : AXDI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$srcs,
111 variable_ops), IndexModeNone, IIC_fpStore_m,
112 "vstm${addr:submode}${p}\t$addr, $srcs", "", []> {
116 def VSTMS : AXSI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$srcs,
117 variable_ops), IndexModeNone, IIC_fpStore_m,
118 "vstm${addr:submode}${p}\t$addr, $srcs", "", []> {
122 def VSTMD_UPD : AXDI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
123 reglist:$srcs, variable_ops),
124 IndexModeUpd, IIC_fpStore_mu,
125 "vstm${addr:submode}${p}\t$addr!, $srcs",
126 "$addr.addr = $wb", []> {
130 def VSTMS_UPD : AXSI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
131 reglist:$srcs, variable_ops),
132 IndexModeUpd, IIC_fpStore_mu,
133 "vstm${addr:submode}${p}\t$addr!, $srcs",
134 "$addr.addr = $wb", []> {
137 } // mayStore, neverHasSideEffects, hasExtraSrcRegAllocReq
139 // FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
142 // FIXME: Can these be placed into the base class?
143 class ADbI_Encode<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops,
144 dag iops, InstrItinClass itin, string opc, string asm,
146 : ADbI<opcod1, opcod2, op6, op4, oops, iops, itin, opc, asm, pattern> {
147 // Instruction operands.
152 // Encode instruction operands.
153 let Inst{3-0} = Dm{3-0};
155 let Inst{19-16} = Dn{3-0};
157 let Inst{15-12} = Dd{3-0};
158 let Inst{22} = Dd{4};
161 class ASbIn_Encode<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops,
162 dag iops, InstrItinClass itin, string opc, string asm,
164 : ASbIn<opcod1, opcod2, op6, op4, oops, iops, itin, opc, asm, pattern> {
165 // Instruction operands.
170 // Encode instruction operands.
171 let Inst{3-0} = Sm{4-1};
173 let Inst{19-16} = Sn{4-1};
175 let Inst{15-12} = Sd{4-1};
176 let Inst{22} = Sd{0};
180 //===----------------------------------------------------------------------===//
181 // FP Binary Operations.
184 def VADDD : ADbI_Encode<0b11100, 0b11, 0, 0,
185 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
186 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
187 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>;
189 def VADDS : ASbIn_Encode<0b11100, 0b11, 0, 0,
190 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
191 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
192 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]>;
194 def VSUBD : ADbI_Encode<0b11100, 0b11, 1, 0,
195 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
196 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
197 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>;
199 def VSUBS : ASbIn_Encode<0b11100, 0b11, 1, 0,
200 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
201 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
202 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]>;
204 def VDIVD : ADbI<0b11101, 0b00, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
205 IIC_fpDIV64, "vdiv", ".f64\t$dst, $a, $b",
206 [(set DPR:$dst, (fdiv DPR:$a, (f64 DPR:$b)))]>;
208 def VDIVS : ASbI<0b11101, 0b00, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
209 IIC_fpDIV32, "vdiv", ".f32\t$dst, $a, $b",
210 [(set SPR:$dst, (fdiv SPR:$a, SPR:$b))]>;
212 def VMULD : ADbI<0b11100, 0b10, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
213 IIC_fpMUL64, "vmul", ".f64\t$dst, $a, $b",
214 [(set DPR:$dst, (fmul DPR:$a, (f64 DPR:$b)))]>;
216 def VMULS : ASbIn<0b11100, 0b10, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
217 IIC_fpMUL32, "vmul", ".f32\t$dst, $a, $b",
218 [(set SPR:$dst, (fmul SPR:$a, SPR:$b))]>;
220 def VNMULD : ADbI<0b11100, 0b10, 1, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
221 IIC_fpMUL64, "vnmul", ".f64\t$dst, $a, $b",
222 [(set DPR:$dst, (fneg (fmul DPR:$a, (f64 DPR:$b))))]>;
224 def VNMULS : ASbI<0b11100, 0b10, 1, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
225 IIC_fpMUL32, "vnmul", ".f32\t$dst, $a, $b",
226 [(set SPR:$dst, (fneg (fmul SPR:$a, SPR:$b)))]>;
228 // Match reassociated forms only if not sign dependent rounding.
229 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
230 (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
231 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
232 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
235 // These are encoded as unary instructions.
236 let Defs = [FPSCR] in {
237 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs),(ins DPR:$Dd, DPR:$Dm),
238 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
239 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]> {
240 // Instruction operands.
244 // Encode instruction operands.
245 let Inst{3-0} = Dm{3-0};
247 let Inst{15-12} = Dd{3-0};
248 let Inst{22} = Dd{4};
251 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs),(ins SPR:$Sd, SPR:$Sm),
252 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm",
253 [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> {
254 // Instruction operands.
258 // Encode instruction operands.
259 let Inst{3-0} = Sm{4-1};
261 let Inst{15-12} = Sd{4-1};
262 let Inst{22} = Sd{0};
265 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins DPR:$a, DPR:$b),
266 IIC_fpCMP64, "vcmp", ".f64\t$a, $b",
267 [/* For disassembly only; pattern left blank */]>;
269 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins SPR:$a, SPR:$b),
270 IIC_fpCMP32, "vcmp", ".f32\t$a, $b",
271 [/* For disassembly only; pattern left blank */]>;
274 //===----------------------------------------------------------------------===//
275 // FP Unary Operations.
278 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
279 IIC_fpUNA64, "vabs", ".f64\t$dst, $a",
280 [(set DPR:$dst, (fabs (f64 DPR:$a)))]>;
282 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,(outs SPR:$dst), (ins SPR:$a),
283 IIC_fpUNA32, "vabs", ".f32\t$dst, $a",
284 [(set SPR:$dst, (fabs SPR:$a))]>;
286 let Defs = [FPSCR] in {
287 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins DPR:$a),
288 IIC_fpCMP64, "vcmpe", ".f64\t$a, #0",
289 [(arm_cmpfp0 (f64 DPR:$a))]>;
291 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins DPR:$a),
292 IIC_fpCMP64, "vcmp", ".f64\t$a, #0",
293 [/* For disassembly only; pattern left blank */]>;
295 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins SPR:$a),
296 IIC_fpCMP32, "vcmpe", ".f32\t$a, #0",
297 [(arm_cmpfp0 SPR:$a)]>;
299 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins SPR:$a),
300 IIC_fpCMP32, "vcmp", ".f32\t$a, #0",
301 [/* For disassembly only; pattern left blank */]>;
304 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0, (outs DPR:$dst), (ins SPR:$a),
305 IIC_fpCVTDS, "vcvt", ".f64.f32\t$dst, $a",
306 [(set DPR:$dst, (fextend SPR:$a))]>;
308 // Special case encoding: bits 11-8 is 0b1011.
309 def VCVTSD : VFPAI<(outs SPR:$dst), (ins DPR:$a), VFPUnaryFrm,
310 IIC_fpCVTSD, "vcvt", ".f32.f64\t$dst, $a",
311 [(set SPR:$dst, (fround DPR:$a))]> {
312 let Inst{27-23} = 0b11101;
313 let Inst{21-16} = 0b110111;
314 let Inst{11-8} = 0b1011;
315 let Inst{7-6} = 0b11;
319 // Between half-precision and single-precision. For disassembly only.
321 def VCVTBSH: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
322 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$dst, $a",
323 [/* For disassembly only; pattern left blank */]>;
325 def : ARMPat<(f32_to_f16 SPR:$a),
326 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
328 def VCVTBHS: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
329 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$dst, $a",
330 [/* For disassembly only; pattern left blank */]>;
332 def : ARMPat<(f16_to_f32 GPR:$a),
333 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
335 def VCVTTSH: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
336 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$dst, $a",
337 [/* For disassembly only; pattern left blank */]>;
339 def VCVTTHS: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
340 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$dst, $a",
341 [/* For disassembly only; pattern left blank */]>;
343 let neverHasSideEffects = 1 in {
344 def VMOVD: ADuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
345 IIC_fpUNA64, "vmov", ".f64\t$dst, $a", []>;
347 def VMOVS: ASuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
348 IIC_fpUNA32, "vmov", ".f32\t$dst, $a", []>;
349 } // neverHasSideEffects
351 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
352 IIC_fpUNA64, "vneg", ".f64\t$dst, $a",
353 [(set DPR:$dst, (fneg (f64 DPR:$a)))]>;
355 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,(outs SPR:$dst), (ins SPR:$a),
356 IIC_fpUNA32, "vneg", ".f32\t$dst, $a",
357 [(set SPR:$dst, (fneg SPR:$a))]>;
359 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
360 IIC_fpSQRT64, "vsqrt", ".f64\t$dst, $a",
361 [(set DPR:$dst, (fsqrt (f64 DPR:$a)))]>;
363 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
364 IIC_fpSQRT32, "vsqrt", ".f32\t$dst, $a",
365 [(set SPR:$dst, (fsqrt SPR:$a))]>;
367 //===----------------------------------------------------------------------===//
368 // FP <-> GPR Copies. Int <-> FP Conversions.
371 def VMOVRS : AVConv2I<0b11100001, 0b1010, (outs GPR:$dst), (ins SPR:$src),
372 IIC_fpMOVSI, "vmov", "\t$dst, $src",
373 [(set GPR:$dst, (bitconvert SPR:$src))]>;
375 def VMOVSR : AVConv4I<0b11100000, 0b1010, (outs SPR:$dst), (ins GPR:$src),
376 IIC_fpMOVIS, "vmov", "\t$dst, $src",
377 [(set SPR:$dst, (bitconvert GPR:$src))]>;
379 let neverHasSideEffects = 1 in {
380 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
381 (outs GPR:$wb, GPR:$dst2), (ins DPR:$src),
382 IIC_fpMOVDI, "vmov", "\t$wb, $dst2, $src",
383 [/* FIXME: Can't write pattern for multiple result instr*/]> {
384 let Inst{7-6} = 0b00;
387 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
388 (outs GPR:$wb, GPR:$dst2), (ins SPR:$src1, SPR:$src2),
389 IIC_fpMOVDI, "vmov", "\t$wb, $dst2, $src1, $src2",
390 [/* For disassembly only; pattern left blank */]> {
391 let Inst{7-6} = 0b00;
393 } // neverHasSideEffects
398 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
399 (outs DPR:$dst), (ins GPR:$src1, GPR:$src2),
400 IIC_fpMOVID, "vmov", "\t$dst, $src1, $src2",
401 [(set DPR:$dst, (arm_fmdrr GPR:$src1, GPR:$src2))]> {
402 let Inst{7-6} = 0b00;
405 let neverHasSideEffects = 1 in
406 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
407 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
408 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
409 [/* For disassembly only; pattern left blank */]> {
410 let Inst{7-6} = 0b00;
416 // FMRX : SPR system reg -> GPR
420 // FMXR: GPR -> VFP system reg
425 def VSITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
426 (outs DPR:$dst), (ins SPR:$a),
427 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a",
428 [(set DPR:$dst, (f64 (arm_sitof SPR:$a)))]> {
429 let Inst{7} = 1; // s32
432 def VSITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
433 (outs SPR:$dst),(ins SPR:$a),
434 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a",
435 [(set SPR:$dst, (arm_sitof SPR:$a))]> {
436 let Inst{7} = 1; // s32
439 def VUITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
440 (outs DPR:$dst), (ins SPR:$a),
441 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a",
442 [(set DPR:$dst, (f64 (arm_uitof SPR:$a)))]> {
443 let Inst{7} = 0; // u32
446 def VUITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
447 (outs SPR:$dst), (ins SPR:$a),
448 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a",
449 [(set SPR:$dst, (arm_uitof SPR:$a))]> {
450 let Inst{7} = 0; // u32
454 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
456 def VTOSIZD : AVConv1I<0b11101, 0b11, 0b1101, 0b1011,
457 (outs SPR:$dst), (ins DPR:$a),
458 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a",
459 [(set SPR:$dst, (arm_ftosi (f64 DPR:$a)))]> {
460 let Inst{7} = 1; // Z bit
463 def VTOSIZS : AVConv1In<0b11101, 0b11, 0b1101, 0b1010,
464 (outs SPR:$dst), (ins SPR:$a),
465 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a",
466 [(set SPR:$dst, (arm_ftosi SPR:$a))]> {
467 let Inst{7} = 1; // Z bit
470 def VTOUIZD : AVConv1I<0b11101, 0b11, 0b1100, 0b1011,
471 (outs SPR:$dst), (ins DPR:$a),
472 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a",
473 [(set SPR:$dst, (arm_ftoui (f64 DPR:$a)))]> {
474 let Inst{7} = 1; // Z bit
477 def VTOUIZS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
478 (outs SPR:$dst), (ins SPR:$a),
479 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a",
480 [(set SPR:$dst, (arm_ftoui SPR:$a))]> {
481 let Inst{7} = 1; // Z bit
484 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
485 // For disassembly only.
486 let Uses = [FPSCR] in {
487 def VTOSIRD : AVConv1I<0b11101, 0b11, 0b1101, 0b1011,
488 (outs SPR:$dst), (ins DPR:$a),
489 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$dst, $a",
490 [(set SPR:$dst, (int_arm_vcvtr (f64 DPR:$a)))]> {
491 let Inst{7} = 0; // Z bit
494 def VTOSIRS : AVConv1In<0b11101, 0b11, 0b1101, 0b1010,
495 (outs SPR:$dst), (ins SPR:$a),
496 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$dst, $a",
497 [(set SPR:$dst, (int_arm_vcvtr SPR:$a))]> {
498 let Inst{7} = 0; // Z bit
501 def VTOUIRD : AVConv1I<0b11101, 0b11, 0b1100, 0b1011,
502 (outs SPR:$dst), (ins DPR:$a),
503 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$dst, $a",
504 [(set SPR:$dst, (int_arm_vcvtru (f64 DPR:$a)))]> {
505 let Inst{7} = 0; // Z bit
508 def VTOUIRS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
509 (outs SPR:$dst), (ins SPR:$a),
510 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$dst, $a",
511 [(set SPR:$dst, (int_arm_vcvtru SPR:$a))]> {
512 let Inst{7} = 0; // Z bit
516 // Convert between floating-point and fixed-point
517 // Data type for fixed-point naming convention:
518 // S16 (U=0, sx=0) -> SH
519 // U16 (U=1, sx=0) -> UH
520 // S32 (U=0, sx=1) -> SL
521 // U32 (U=1, sx=1) -> UL
523 let Constraints = "$a = $dst" in {
525 // FP to Fixed-Point:
527 let isCodeGenOnly = 1 in {
528 def VTOSHS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 0,
529 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
530 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits",
531 [/* For disassembly only; pattern left blank */]>;
533 def VTOUHS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 0,
534 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
535 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits",
536 [/* For disassembly only; pattern left blank */]>;
538 def VTOSLS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 1,
539 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
540 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits",
541 [/* For disassembly only; pattern left blank */]>;
543 def VTOULS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 1,
544 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
545 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits",
546 [/* For disassembly only; pattern left blank */]>;
548 def VTOSHD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 0,
549 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
550 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits",
551 [/* For disassembly only; pattern left blank */]>;
553 def VTOUHD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 0,
554 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
555 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits",
556 [/* For disassembly only; pattern left blank */]>;
558 def VTOSLD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 1,
559 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
560 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits",
561 [/* For disassembly only; pattern left blank */]>;
563 def VTOULD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 1,
564 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
565 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits",
566 [/* For disassembly only; pattern left blank */]>;
569 // Fixed-Point to FP:
571 let isCodeGenOnly = 1 in {
572 def VSHTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 0,
573 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
574 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits",
575 [/* For disassembly only; pattern left blank */]>;
577 def VUHTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 0,
578 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
579 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits",
580 [/* For disassembly only; pattern left blank */]>;
582 def VSLTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 1,
583 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
584 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits",
585 [/* For disassembly only; pattern left blank */]>;
587 def VULTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 1,
588 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
589 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits",
590 [/* For disassembly only; pattern left blank */]>;
592 def VSHTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 0,
593 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
594 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits",
595 [/* For disassembly only; pattern left blank */]>;
597 def VUHTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 0,
598 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
599 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits",
600 [/* For disassembly only; pattern left blank */]>;
602 def VSLTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 1,
603 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
604 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits",
605 [/* For disassembly only; pattern left blank */]>;
607 def VULTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 1,
608 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
609 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits",
610 [/* For disassembly only; pattern left blank */]>;
613 } // End of 'let Constraints = "$src = $dst" in'
615 //===----------------------------------------------------------------------===//
616 // FP FMA Operations.
619 def VMLAD : ADbI_vmlX<0b11100, 0b00, 0, 0,
620 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
621 IIC_fpMAC64, "vmla", ".f64\t$dst, $a, $b",
622 [(set DPR:$dst, (fadd (fmul DPR:$a, DPR:$b),
623 (f64 DPR:$dstin)))]>,
624 RegConstraint<"$dstin = $dst">;
626 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
627 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
628 IIC_fpMAC32, "vmla", ".f32\t$dst, $a, $b",
629 [(set SPR:$dst, (fadd (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
630 RegConstraint<"$dstin = $dst">;
632 def VNMLSD : ADbI_vmlX<0b11100, 0b01, 0, 0,
633 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
634 IIC_fpMAC64, "vnmls", ".f64\t$dst, $a, $b",
635 [(set DPR:$dst, (fsub (fmul DPR:$a, DPR:$b),
636 (f64 DPR:$dstin)))]>,
637 RegConstraint<"$dstin = $dst">;
639 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
640 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
641 IIC_fpMAC32, "vnmls", ".f32\t$dst, $a, $b",
642 [(set SPR:$dst, (fsub (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
643 RegConstraint<"$dstin = $dst">;
645 def VMLSD : ADbI_vmlX<0b11100, 0b00, 1, 0,
646 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
647 IIC_fpMAC64, "vmls", ".f64\t$dst, $a, $b",
648 [(set DPR:$dst, (fadd (fneg (fmul DPR:$a, DPR:$b)),
649 (f64 DPR:$dstin)))]>,
650 RegConstraint<"$dstin = $dst">;
652 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
653 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
654 IIC_fpMAC32, "vmls", ".f32\t$dst, $a, $b",
655 [(set SPR:$dst, (fadd (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
656 RegConstraint<"$dstin = $dst">;
658 def : Pat<(fsub DPR:$dstin, (fmul DPR:$a, (f64 DPR:$b))),
659 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>, Requires<[DontUseNEONForFP]>;
660 def : Pat<(fsub SPR:$dstin, (fmul SPR:$a, SPR:$b)),
661 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>, Requires<[DontUseNEONForFP]>;
663 def VNMLAD : ADbI_vmlX<0b11100, 0b01, 1, 0,
664 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
665 IIC_fpMAC64, "vnmla", ".f64\t$dst, $a, $b",
666 [(set DPR:$dst, (fsub (fneg (fmul DPR:$a, DPR:$b)),
667 (f64 DPR:$dstin)))]>,
668 RegConstraint<"$dstin = $dst">;
670 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
671 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
672 IIC_fpMAC32, "vnmla", ".f32\t$dst, $a, $b",
673 [(set SPR:$dst, (fsub (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
674 RegConstraint<"$dstin = $dst">;
676 //===----------------------------------------------------------------------===//
677 // FP Conditional moves.
680 let neverHasSideEffects = 1 in {
681 def VMOVDcc : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
682 (outs DPR:$dst), (ins DPR:$false, DPR:$true),
683 IIC_fpUNA64, "vmov", ".f64\t$dst, $true",
684 [/*(set DPR:$dst, (ARMcmov DPR:$false, DPR:$true, imm:$cc))*/]>,
685 RegConstraint<"$false = $dst">;
687 def VMOVScc : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
688 (outs SPR:$dst), (ins SPR:$false, SPR:$true),
689 IIC_fpUNA32, "vmov", ".f32\t$dst, $true",
690 [/*(set SPR:$dst, (ARMcmov SPR:$false, SPR:$true, imm:$cc))*/]>,
691 RegConstraint<"$false = $dst">;
693 def VNEGDcc : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
694 (outs DPR:$dst), (ins DPR:$false, DPR:$true),
695 IIC_fpUNA64, "vneg", ".f64\t$dst, $true",
696 [/*(set DPR:$dst, (ARMcneg DPR:$false, DPR:$true, imm:$cc))*/]>,
697 RegConstraint<"$false = $dst">;
699 def VNEGScc : ASuI<0b11101, 0b11, 0b0001, 0b01, 0,
700 (outs SPR:$dst), (ins SPR:$false, SPR:$true),
701 IIC_fpUNA32, "vneg", ".f32\t$dst, $true",
702 [/*(set SPR:$dst, (ARMcneg SPR:$false, SPR:$true, imm:$cc))*/]>,
703 RegConstraint<"$false = $dst">;
704 } // neverHasSideEffects
706 //===----------------------------------------------------------------------===//
710 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
712 let Defs = [CPSR], Uses = [FPSCR] in
713 def FMSTAT : VFPAI<(outs), (ins), VFPMiscFrm, IIC_fpSTAT, "vmrs",
714 "\tapsr_nzcv, fpscr",
716 let Inst{27-20} = 0b11101111;
717 let Inst{19-16} = 0b0001;
718 let Inst{15-12} = 0b1111;
719 let Inst{11-8} = 0b1010;
724 // FPSCR <-> GPR (for disassembly only)
725 let hasSideEffects = 1, Uses = [FPSCR] in
726 def VMRS : VFPAI<(outs GPR:$dst), (ins), VFPMiscFrm, IIC_fpSTAT,
727 "vmrs", "\t$dst, fpscr",
728 [(set GPR:$dst, (int_arm_get_fpscr))]> {
729 let Inst{27-20} = 0b11101111;
730 let Inst{19-16} = 0b0001;
731 let Inst{11-8} = 0b1010;
736 let Defs = [FPSCR] in
737 def VMSR : VFPAI<(outs), (ins GPR:$src), VFPMiscFrm, IIC_fpSTAT,
738 "vmsr", "\tfpscr, $src",
739 [(int_arm_set_fpscr GPR:$src)]> {
740 let Inst{27-20} = 0b11101110;
741 let Inst{19-16} = 0b0001;
742 let Inst{11-8} = 0b1010;
747 // Materialize FP immediates. VFP3 only.
748 let isReMaterializable = 1 in {
749 def FCONSTD : VFPAI<(outs DPR:$dst), (ins vfp_f64imm:$imm),
750 VFPMiscFrm, IIC_fpUNA64,
751 "vmov", ".f64\t$dst, $imm",
752 [(set DPR:$dst, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
753 let Inst{27-23} = 0b11101;
754 let Inst{21-20} = 0b11;
755 let Inst{11-9} = 0b101;
757 let Inst{7-4} = 0b0000;
760 def FCONSTS : VFPAI<(outs SPR:$dst), (ins vfp_f32imm:$imm),
761 VFPMiscFrm, IIC_fpUNA32,
762 "vmov", ".f32\t$dst, $imm",
763 [(set SPR:$dst, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
764 let Inst{27-23} = 0b11101;
765 let Inst{21-20} = 0b11;
766 let Inst{11-9} = 0b101;
768 let Inst{7-4} = 0b0000;