1 //===- ARMInstrVFP.td - VFP support for ARM -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM VFP instruction set.
12 //===----------------------------------------------------------------------===//
15 SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
17 SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
19 SDTypeProfile<0, 1, [SDTCisFP<0>]>;
21 SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
24 def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
25 def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
26 def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
27 def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
28 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInFlag,SDNPOutFlag]>;
29 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutFlag]>;
30 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0",SDT_CMPFP0, [SDNPOutFlag]>;
31 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
33 //===----------------------------------------------------------------------===//
34 // Operand Definitions.
38 def vfp_f32imm : Operand<f32>,
39 PatLeaf<(f32 fpimm), [{
40 return ARM::getVFPf32Imm(N->getValueAPF()) != -1;
42 let PrintMethod = "printVFPf32ImmOperand";
45 def vfp_f64imm : Operand<f64>,
46 PatLeaf<(f64 fpimm), [{
47 return ARM::getVFPf64Imm(N->getValueAPF()) != -1;
49 let PrintMethod = "printVFPf64ImmOperand";
53 //===----------------------------------------------------------------------===//
54 // Load / store Instructions.
57 let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in {
58 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$dst), (ins addrmode5:$addr),
59 IIC_fpLoad64, "vldr", ".64\t$dst, $addr",
60 [(set DPR:$dst, (load addrmode5:$addr))]>;
62 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$dst), (ins addrmode5:$addr),
63 IIC_fpLoad32, "vldr", ".32\t$dst, $addr",
64 [(set SPR:$dst, (load addrmode5:$addr))]>;
67 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$src, addrmode5:$addr),
68 IIC_fpStore64, "vstr", ".64\t$src, $addr",
69 [(store DPR:$src, addrmode5:$addr)]>;
71 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$src, addrmode5:$addr),
72 IIC_fpStore32, "vstr", ".32\t$src, $addr",
73 [(store SPR:$src, addrmode5:$addr)]>;
75 //===----------------------------------------------------------------------===//
76 // Load / store multiple Instructions.
79 let mayLoad = 1, hasExtraDefRegAllocReq = 1 in {
80 def VLDMD : AXDI5<(outs), (ins addrmode5:$addr, pred:$p, reglist:$wb,
81 variable_ops), IIC_fpLoadm,
82 "vldm${addr:submode}${p}\t${addr:base}, $wb",
87 def VLDMS : AXSI5<(outs), (ins addrmode5:$addr, pred:$p, reglist:$wb,
88 variable_ops), IIC_fpLoadm,
89 "vldm${addr:submode}${p}\t${addr:base}, $wb",
93 } // mayLoad, hasExtraDefRegAllocReq
95 let mayStore = 1, hasExtraSrcRegAllocReq = 1 in {
96 def VSTMD : AXDI5<(outs), (ins addrmode5:$addr, pred:$p, reglist:$wb,
97 variable_ops), IIC_fpStorem,
98 "vstm${addr:submode}${p}\t${addr:base}, $wb",
103 def VSTMS : AXSI5<(outs), (ins addrmode5:$addr, pred:$p, reglist:$wb,
104 variable_ops), IIC_fpStorem,
105 "vstm${addr:submode}${p}\t${addr:base}, $wb",
109 } // mayStore, hasExtraSrcRegAllocReq
111 // FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
113 //===----------------------------------------------------------------------===//
114 // FP Binary Operations.
117 def VADDD : ADbI<0b11100, 0b11, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
118 IIC_fpALU64, "vadd", ".f64\t$dst, $a, $b",
119 [(set DPR:$dst, (fadd DPR:$a, DPR:$b))]>;
121 def VADDS : ASbIn<0b11100, 0b11, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
122 IIC_fpALU32, "vadd", ".f32\t$dst, $a, $b",
123 [(set SPR:$dst, (fadd SPR:$a, SPR:$b))]>;
125 // These are encoded as unary instructions.
126 let Defs = [FPSCR] in {
127 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins DPR:$a, DPR:$b),
128 IIC_fpCMP64, "vcmpe", ".f64\t$a, $b",
129 [(arm_cmpfp DPR:$a, DPR:$b)]>;
131 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins SPR:$a, SPR:$b),
132 IIC_fpCMP32, "vcmpe", ".f32\t$a, $b",
133 [(arm_cmpfp SPR:$a, SPR:$b)]>;
136 def VDIVD : ADbI<0b11101, 0b00, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
137 IIC_fpDIV64, "vdiv", ".f64\t$dst, $a, $b",
138 [(set DPR:$dst, (fdiv DPR:$a, DPR:$b))]>;
140 def VDIVS : ASbI<0b11101, 0b00, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
141 IIC_fpDIV32, "vdiv", ".f32\t$dst, $a, $b",
142 [(set SPR:$dst, (fdiv SPR:$a, SPR:$b))]>;
144 def VMULD : ADbI<0b11100, 0b10, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
145 IIC_fpMUL64, "vmul", ".f64\t$dst, $a, $b",
146 [(set DPR:$dst, (fmul DPR:$a, DPR:$b))]>;
148 def VMULS : ASbIn<0b11100, 0b10, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
149 IIC_fpMUL32, "vmul", ".f32\t$dst, $a, $b",
150 [(set SPR:$dst, (fmul SPR:$a, SPR:$b))]>;
152 def VNMULD : ADbI<0b11100, 0b10, 1, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
153 IIC_fpMUL64, "vnmul", ".f64\t$dst, $a, $b",
154 [(set DPR:$dst, (fneg (fmul DPR:$a, DPR:$b)))]>;
156 def VNMULS : ASbI<0b11100, 0b10, 1, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
157 IIC_fpMUL32, "vnmul", ".f32\t$dst, $a, $b",
158 [(set SPR:$dst, (fneg (fmul SPR:$a, SPR:$b)))]>;
160 // Match reassociated forms only if not sign dependent rounding.
161 def : Pat<(fmul (fneg DPR:$a), DPR:$b),
162 (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
163 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
164 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
167 def VSUBD : ADbI<0b11100, 0b11, 1, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
168 IIC_fpALU64, "vsub", ".f64\t$dst, $a, $b",
169 [(set DPR:$dst, (fsub DPR:$a, DPR:$b))]>;
171 def VSUBS : ASbIn<0b11100, 0b11, 1, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
172 IIC_fpALU32, "vsub", ".f32\t$dst, $a, $b",
173 [(set SPR:$dst, (fsub SPR:$a, SPR:$b))]>;
175 //===----------------------------------------------------------------------===//
176 // FP Unary Operations.
179 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
180 IIC_fpUNA64, "vabs", ".f64\t$dst, $a",
181 [(set DPR:$dst, (fabs DPR:$a))]>;
183 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,(outs SPR:$dst), (ins SPR:$a),
184 IIC_fpUNA32, "vabs", ".f32\t$dst, $a",
185 [(set SPR:$dst, (fabs SPR:$a))]>;
187 let Defs = [FPSCR] in {
188 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins DPR:$a),
189 IIC_fpCMP64, "vcmpe", ".f64\t$a, #0",
190 [(arm_cmpfp0 DPR:$a)]>;
192 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins SPR:$a),
193 IIC_fpCMP32, "vcmpe", ".f32\t$a, #0",
194 [(arm_cmpfp0 SPR:$a)]>;
197 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0, (outs DPR:$dst), (ins SPR:$a),
198 IIC_fpCVTDS, "vcvt", ".f64.f32\t$dst, $a",
199 [(set DPR:$dst, (fextend SPR:$a))]>;
201 // Special case encoding: bits 11-8 is 0b1011.
202 def VCVTSD : VFPAI<(outs SPR:$dst), (ins DPR:$a), VFPUnaryFrm,
203 IIC_fpCVTSD, "vcvt", ".f32.f64\t$dst, $a",
204 [(set SPR:$dst, (fround DPR:$a))]> {
205 let Inst{27-23} = 0b11101;
206 let Inst{21-16} = 0b110111;
207 let Inst{11-8} = 0b1011;
208 let Inst{7-6} = 0b11;
212 let neverHasSideEffects = 1 in {
213 def VMOVD: ADuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
214 IIC_fpUNA64, "vmov", ".f64\t$dst, $a", []>;
216 def VMOVS: ASuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
217 IIC_fpUNA32, "vmov", ".f32\t$dst, $a", []>;
218 } // neverHasSideEffects
220 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
221 IIC_fpUNA64, "vneg", ".f64\t$dst, $a",
222 [(set DPR:$dst, (fneg DPR:$a))]>;
224 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,(outs SPR:$dst), (ins SPR:$a),
225 IIC_fpUNA32, "vneg", ".f32\t$dst, $a",
226 [(set SPR:$dst, (fneg SPR:$a))]>;
228 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
229 IIC_fpSQRT64, "vsqrt", ".f64\t$dst, $a",
230 [(set DPR:$dst, (fsqrt DPR:$a))]>;
232 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
233 IIC_fpSQRT32, "vsqrt", ".f32\t$dst, $a",
234 [(set SPR:$dst, (fsqrt SPR:$a))]>;
236 //===----------------------------------------------------------------------===//
237 // FP <-> GPR Copies. Int <-> FP Conversions.
240 def VMOVRS : AVConv2I<0b11100001, 0b1010, (outs GPR:$dst), (ins SPR:$src),
241 IIC_VMOVSI, "vmov", "\t$dst, $src",
242 [(set GPR:$dst, (bitconvert SPR:$src))]>;
244 def VMOVSR : AVConv4I<0b11100000, 0b1010, (outs SPR:$dst), (ins GPR:$src),
245 IIC_VMOVIS, "vmov", "\t$dst, $src",
246 [(set SPR:$dst, (bitconvert GPR:$src))]>;
248 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
249 (outs GPR:$wb, GPR:$dst2), (ins DPR:$src),
250 IIC_VMOVDI, "vmov", "\t$wb, $dst2, $src",
251 [/* FIXME: Can't write pattern for multiple result instr*/]> {
252 let Inst{7-6} = 0b00;
255 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
256 (outs GPR:$wb, GPR:$dst2), (ins SPR:$src1, SPR:$src2),
257 IIC_VMOVDI, "vmov", "\t$wb, $dst2, $src1, $src2",
258 [/* For disassembly only; pattern left blank */]> {
259 let Inst{7-6} = 0b00;
265 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
266 (outs DPR:$dst), (ins GPR:$src1, GPR:$src2),
267 IIC_VMOVID, "vmov", "\t$dst, $src1, $src2",
268 [(set DPR:$dst, (arm_fmdrr GPR:$src1, GPR:$src2))]> {
269 let Inst{7-6} = 0b00;
272 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
273 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
274 IIC_VMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
275 [/* For disassembly only; pattern left blank */]> {
276 let Inst{7-6} = 0b00;
282 // FMRX : SPR system reg -> GPR
286 // FMXR: GPR -> VFP Sstem reg
291 def VSITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
292 (outs DPR:$dst), (ins SPR:$a),
293 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a",
294 [(set DPR:$dst, (arm_sitof SPR:$a))]> {
295 let Inst{7} = 1; // s32
298 def VSITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
299 (outs SPR:$dst),(ins SPR:$a),
300 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a",
301 [(set SPR:$dst, (arm_sitof SPR:$a))]> {
302 let Inst{7} = 1; // s32
305 def VUITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
306 (outs DPR:$dst), (ins SPR:$a),
307 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a",
308 [(set DPR:$dst, (arm_uitof SPR:$a))]> {
309 let Inst{7} = 0; // u32
312 def VUITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
313 (outs SPR:$dst), (ins SPR:$a),
314 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a",
315 [(set SPR:$dst, (arm_uitof SPR:$a))]> {
316 let Inst{7} = 0; // u32
320 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
322 def VTOSIZD : AVConv1I<0b11101, 0b11, 0b1101, 0b1011,
323 (outs SPR:$dst), (ins DPR:$a),
324 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a",
325 [(set SPR:$dst, (arm_ftosi DPR:$a))]> {
326 let Inst{7} = 1; // Z bit
329 def VTOSIZS : AVConv1In<0b11101, 0b11, 0b1101, 0b1010,
330 (outs SPR:$dst), (ins SPR:$a),
331 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a",
332 [(set SPR:$dst, (arm_ftosi SPR:$a))]> {
333 let Inst{7} = 1; // Z bit
336 def VTOUIZD : AVConv1I<0b11101, 0b11, 0b1100, 0b1011,
337 (outs SPR:$dst), (ins DPR:$a),
338 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a",
339 [(set SPR:$dst, (arm_ftoui DPR:$a))]> {
340 let Inst{7} = 1; // Z bit
343 def VTOUIZS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
344 (outs SPR:$dst), (ins SPR:$a),
345 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a",
346 [(set SPR:$dst, (arm_ftoui SPR:$a))]> {
347 let Inst{7} = 1; // Z bit
350 //===----------------------------------------------------------------------===//
351 // FP FMA Operations.
354 def VMLAD : ADbI<0b11100, 0b00, 0, 0,
355 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
356 IIC_fpMAC64, "vmla", ".f64\t$dst, $a, $b",
357 [(set DPR:$dst, (fadd (fmul DPR:$a, DPR:$b), DPR:$dstin))]>,
358 RegConstraint<"$dstin = $dst">;
360 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
361 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
362 IIC_fpMAC32, "vmla", ".f32\t$dst, $a, $b",
363 [(set SPR:$dst, (fadd (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
364 RegConstraint<"$dstin = $dst">;
366 def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
367 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
368 IIC_fpMAC64, "vnmls", ".f64\t$dst, $a, $b",
369 [(set DPR:$dst, (fsub (fmul DPR:$a, DPR:$b), DPR:$dstin))]>,
370 RegConstraint<"$dstin = $dst">;
372 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
373 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
374 IIC_fpMAC32, "vnmls", ".f32\t$dst, $a, $b",
375 [(set SPR:$dst, (fsub (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
376 RegConstraint<"$dstin = $dst">;
378 def VMLSD : ADbI<0b11100, 0b00, 1, 0,
379 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
380 IIC_fpMAC64, "vmls", ".f64\t$dst, $a, $b",
381 [(set DPR:$dst, (fadd (fneg (fmul DPR:$a, DPR:$b)), DPR:$dstin))]>,
382 RegConstraint<"$dstin = $dst">;
384 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
385 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
386 IIC_fpMAC32, "vmls", ".f32\t$dst, $a, $b",
387 [(set SPR:$dst, (fadd (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
388 RegConstraint<"$dstin = $dst">;
390 def : Pat<(fsub DPR:$dstin, (fmul DPR:$a, DPR:$b)),
391 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>, Requires<[DontUseNEONForFP]>;
392 def : Pat<(fsub SPR:$dstin, (fmul SPR:$a, SPR:$b)),
393 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>, Requires<[DontUseNEONForFP]>;
395 def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
396 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
397 IIC_fpMAC64, "vnmla", ".f64\t$dst, $a, $b",
398 [(set DPR:$dst, (fsub (fneg (fmul DPR:$a, DPR:$b)), DPR:$dstin))]>,
399 RegConstraint<"$dstin = $dst">;
401 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
402 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
403 IIC_fpMAC32, "vnmla", ".f32\t$dst, $a, $b",
404 [(set SPR:$dst, (fsub (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
405 RegConstraint<"$dstin = $dst">;
407 //===----------------------------------------------------------------------===//
408 // FP Conditional moves.
411 def VMOVDcc : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
412 (outs DPR:$dst), (ins DPR:$false, DPR:$true),
413 IIC_fpUNA64, "vmov", ".f64\t$dst, $true",
414 [/*(set DPR:$dst, (ARMcmov DPR:$false, DPR:$true, imm:$cc))*/]>,
415 RegConstraint<"$false = $dst">;
417 def VMOVScc : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
418 (outs SPR:$dst), (ins SPR:$false, SPR:$true),
419 IIC_fpUNA32, "vmov", ".f32\t$dst, $true",
420 [/*(set SPR:$dst, (ARMcmov SPR:$false, SPR:$true, imm:$cc))*/]>,
421 RegConstraint<"$false = $dst">;
423 def VNEGDcc : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
424 (outs DPR:$dst), (ins DPR:$false, DPR:$true),
425 IIC_fpUNA64, "vneg", ".f64\t$dst, $true",
426 [/*(set DPR:$dst, (ARMcneg DPR:$false, DPR:$true, imm:$cc))*/]>,
427 RegConstraint<"$false = $dst">;
429 def VNEGScc : ASuI<0b11101, 0b11, 0b0001, 0b01, 0,
430 (outs SPR:$dst), (ins SPR:$false, SPR:$true),
431 IIC_fpUNA32, "vneg", ".f32\t$dst, $true",
432 [/*(set SPR:$dst, (ARMcneg SPR:$false, SPR:$true, imm:$cc))*/]>,
433 RegConstraint<"$false = $dst">;
436 //===----------------------------------------------------------------------===//
440 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
442 let Defs = [CPSR], Uses = [FPSCR] in
443 def FMSTAT : VFPAI<(outs), (ins), VFPMiscFrm, IIC_fpSTAT, "vmrs",
444 "\tapsr_nzcv, fpscr",
446 let Inst{27-20} = 0b11101111;
447 let Inst{19-16} = 0b0001;
448 let Inst{15-12} = 0b1111;
449 let Inst{11-8} = 0b1010;
455 // Materialize FP immediates. VFP3 only.
456 let isReMaterializable = 1 in {
457 def FCONSTD : VFPAI<(outs DPR:$dst), (ins vfp_f64imm:$imm),
458 VFPMiscFrm, IIC_VMOVImm,
459 "vmov", ".f64\t$dst, $imm",
460 [(set DPR:$dst, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
461 let Inst{27-23} = 0b11101;
462 let Inst{21-20} = 0b11;
463 let Inst{11-9} = 0b101;
465 let Inst{7-4} = 0b0000;
468 def FCONSTS : VFPAI<(outs SPR:$dst), (ins vfp_f32imm:$imm),
469 VFPMiscFrm, IIC_VMOVImm,
470 "vmov", ".f32\t$dst, $imm",
471 [(set SPR:$dst, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
472 let Inst{27-23} = 0b11101;
473 let Inst{21-20} = 0b11;
474 let Inst{11-9} = 0b101;
476 let Inst{7-4} = 0b0000;