1 //===- ARMInstrVFP.td - VFP support for ARM ----------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM VFP instruction set.
12 //===----------------------------------------------------------------------===//
14 def SDT_FTOI : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
15 def SDT_ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
16 def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>;
17 def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
20 def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
21 def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
22 def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
23 def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
24 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInFlag, SDNPOutFlag]>;
25 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutFlag]>;
26 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutFlag]>;
27 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
30 //===----------------------------------------------------------------------===//
31 // Operand Definitions.
34 def vfp_f32imm : Operand<f32>,
35 PatLeaf<(f32 fpimm), [{
36 return ARM::getVFPf32Imm(N->getValueAPF()) != -1;
38 let PrintMethod = "printVFPf32ImmOperand";
41 def vfp_f64imm : Operand<f64>,
42 PatLeaf<(f64 fpimm), [{
43 return ARM::getVFPf64Imm(N->getValueAPF()) != -1;
45 let PrintMethod = "printVFPf64ImmOperand";
49 //===----------------------------------------------------------------------===//
50 // Load / store Instructions.
53 let canFoldAsLoad = 1, isReMaterializable = 1 in {
55 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
56 IIC_fpLoad64, "vldr", ".64\t$Dd, $addr",
57 [(set DPR:$Dd, (f64 (load addrmode5:$addr)))]>;
59 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
60 IIC_fpLoad32, "vldr", ".32\t$Sd, $addr",
61 [(set SPR:$Sd, (load addrmode5:$addr))]>;
63 } // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
65 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
66 IIC_fpStore64, "vstr", ".64\t$Dd, $addr",
67 [(store (f64 DPR:$Dd), addrmode5:$addr)]>;
69 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
70 IIC_fpStore32, "vstr", ".32\t$Sd, $addr",
71 [(store SPR:$Sd, addrmode5:$addr)]>;
73 //===----------------------------------------------------------------------===//
74 // Load / store multiple Instructions.
77 multiclass vfp_ldst_d_mult<string asm, bit L_bit,
78 InstrItinClass itin, InstrItinClass itin_upd> {
80 AXDI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
82 !strconcat(asm, "${p}\t$Rn, $regs"), "", []> {
83 let Inst{24-23} = 0b01; // Increment After
84 let Inst{21} = 0; // No writeback
88 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
89 IndexModeUpd, itin_upd,
90 !strconcat(asm, "${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
91 let Inst{24-23} = 0b01; // Increment After
92 let Inst{21} = 1; // Writeback
96 AXDI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
98 !strconcat(asm, "db${p}\t$Rn, $regs"), "", []> {
99 let Inst{24-23} = 0b10; // Decrement Before
100 let Inst{21} = 0; // No writeback
101 let Inst{20} = L_bit;
104 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
105 IndexModeUpd, itin_upd,
106 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
107 let Inst{24-23} = 0b10; // Decrement Before
108 let Inst{21} = 1; // Writeback
109 let Inst{20} = L_bit;
113 multiclass vfp_ldst_s_mult<string asm, bit L_bit,
114 InstrItinClass itin, InstrItinClass itin_upd> {
116 AXSI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
118 !strconcat(asm, "${p}\t$Rn, $regs"), "", []> {
119 let Inst{24-23} = 0b01; // Increment After
120 let Inst{21} = 0; // No writeback
121 let Inst{20} = L_bit;
124 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
125 IndexModeUpd, itin_upd,
126 !strconcat(asm, "${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
127 let Inst{24-23} = 0b01; // Increment After
128 let Inst{21} = 1; // Writeback
129 let Inst{20} = L_bit;
132 AXSI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
134 !strconcat(asm, "db${p}\t$Rn, $regs"), "", []> {
135 let Inst{24-23} = 0b10; // Decrement Before
136 let Inst{21} = 0; // No writeback
137 let Inst{20} = L_bit;
140 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
141 IndexModeUpd, itin_upd,
142 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
143 let Inst{24-23} = 0b10; // Decrement Before
144 let Inst{21} = 1; // Writeback
145 let Inst{20} = L_bit;
149 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1,
150 isCodeGenOnly = 1 in {
151 def VLDMD : AXDI4<(outs), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
152 reglist:$dsts, variable_ops),
153 IndexModeNone, IIC_fpLoad_m,
154 "vldm${amode}${p}\t$Rn, $dsts", "", []> {
155 let Inst{21} = 0; // wback = (W == '1')
156 let Inst{20} = 1; // Load
159 def VLDMS : AXSI4<(outs), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
160 reglist:$dsts, variable_ops),
161 IndexModeNone, IIC_fpLoad_m,
162 "vldm${amode}${p}\t$Rn, $dsts", "", []> {
163 let Inst{21} = 0; // wback = (W == '1')
164 let Inst{20} = 1; // Load
167 def VLDMD_UPD : AXDI4<(outs GPR:$wb), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
168 reglist:$dsts, variable_ops),
169 IndexModeUpd, IIC_fpLoad_mu,
170 "vldm${amode}${p}\t$Rn!, $dsts",
172 let Inst{21} = 1; // wback = (W == '1')
173 let Inst{20} = 1; // Load
176 def VLDMS_UPD : AXSI4<(outs GPR:$wb), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
177 reglist:$dsts, variable_ops),
178 IndexModeUpd, IIC_fpLoad_mu,
179 "vldm${amode}${p}\t$Rn!, $dsts",
181 let Inst{21} = 1; // wback = (W == '1')
182 let Inst{20} = 1; // Load
184 } // mayLoad, neverHasSideEffects, hasExtraDefRegAllocReq
186 let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1,
187 isCodeGenOnly = 1 in {
188 def VSTMD : AXDI4<(outs), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
189 reglist:$srcs, variable_ops),
190 IndexModeNone, IIC_fpStore_m,
191 "vstm${amode}${p}\t$Rn, $srcs", "", []> {
192 let Inst{21} = 0; // wback = (W == '1')
193 let Inst{20} = 0; // Store
196 def VSTMS : AXSI4<(outs), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
197 reglist:$srcs, variable_ops), IndexModeNone,
199 "vstm${amode}${p}\t$Rn, $srcs", "", []> {
200 let Inst{21} = 0; // wback = (W == '1')
201 let Inst{20} = 0; // Store
204 def VSTMD_UPD : AXDI4<(outs GPR:$wb), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
205 reglist:$srcs, variable_ops),
206 IndexModeUpd, IIC_fpStore_mu,
207 "vstm${amode}${p}\t$Rn!, $srcs",
209 let Inst{21} = 1; // wback = (W == '1')
210 let Inst{20} = 0; // Store
213 def VSTMS_UPD : AXSI4<(outs GPR:$wb), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
214 reglist:$srcs, variable_ops),
215 IndexModeUpd, IIC_fpStore_mu,
216 "vstm${amode}${p}\t$Rn!, $srcs",
218 let Inst{21} = 1; // wback = (W == '1')
219 let Inst{20} = 0; // Store
221 } // mayStore, neverHasSideEffects, hasExtraSrcRegAllocReq
223 // FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
225 //===----------------------------------------------------------------------===//
226 // FP Binary Operations.
229 def VADDD : ADbI<0b11100, 0b11, 0, 0,
230 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
231 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
232 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>;
234 def VADDS : ASbIn<0b11100, 0b11, 0, 0,
235 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
236 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
237 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]>;
239 def VSUBD : ADbI<0b11100, 0b11, 1, 0,
240 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
241 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
242 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>;
244 def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
245 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
246 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
247 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]>;
249 def VDIVD : ADbI<0b11101, 0b00, 0, 0,
250 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
251 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
252 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>;
254 def VDIVS : ASbI<0b11101, 0b00, 0, 0,
255 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
256 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
257 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>;
259 def VMULD : ADbI<0b11100, 0b10, 0, 0,
260 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
261 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
262 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>;
264 def VMULS : ASbIn<0b11100, 0b10, 0, 0,
265 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
266 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
267 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]>;
269 def VNMULD : ADbI<0b11100, 0b10, 1, 0,
270 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
271 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
272 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>;
274 def VNMULS : ASbI<0b11100, 0b10, 1, 0,
275 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
276 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
277 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]>;
279 // Match reassociated forms only if not sign dependent rounding.
280 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
281 (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
282 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
283 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
285 // These are encoded as unary instructions.
286 let Defs = [FPSCR] in {
287 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
288 (outs), (ins DPR:$Dd, DPR:$Dm),
289 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
290 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>;
292 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
293 (outs), (ins SPR:$Sd, SPR:$Sm),
294 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm",
295 [(arm_cmpfp SPR:$Sd, SPR:$Sm)]>;
297 // FIXME: Verify encoding after integrated assembler is working.
298 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
299 (outs), (ins DPR:$Dd, DPR:$Dm),
300 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm",
301 [/* For disassembly only; pattern left blank */]>;
303 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
304 (outs), (ins SPR:$Sd, SPR:$Sm),
305 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm",
306 [/* For disassembly only; pattern left blank */]>;
309 //===----------------------------------------------------------------------===//
310 // FP Unary Operations.
313 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0,
314 (outs DPR:$Dd), (ins DPR:$Dm),
315 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm",
316 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>;
318 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
319 (outs SPR:$Sd), (ins SPR:$Sm),
320 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm",
321 [(set SPR:$Sd, (fabs SPR:$Sm))]>;
323 let Defs = [FPSCR] in {
324 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
325 (outs), (ins DPR:$Dd),
326 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0",
327 [(arm_cmpfp0 (f64 DPR:$Dd))]> {
328 let Inst{3-0} = 0b0000;
332 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
333 (outs), (ins SPR:$Sd),
334 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0",
335 [(arm_cmpfp0 SPR:$Sd)]> {
336 let Inst{3-0} = 0b0000;
340 // FIXME: Verify encoding after integrated assembler is working.
341 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
342 (outs), (ins DPR:$Dd),
343 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0",
344 [/* For disassembly only; pattern left blank */]> {
345 let Inst{3-0} = 0b0000;
349 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
350 (outs), (ins SPR:$Sd),
351 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0",
352 [/* For disassembly only; pattern left blank */]> {
353 let Inst{3-0} = 0b0000;
358 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
359 (outs DPR:$Dd), (ins SPR:$Sm),
360 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm",
361 [(set DPR:$Dd, (fextend SPR:$Sm))]> {
362 // Instruction operands.
366 // Encode instruction operands.
367 let Inst{3-0} = Sm{4-1};
369 let Inst{15-12} = Dd{3-0};
370 let Inst{22} = Dd{4};
373 // Special case encoding: bits 11-8 is 0b1011.
374 def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
375 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm",
376 [(set SPR:$Sd, (fround DPR:$Dm))]> {
377 // Instruction operands.
381 // Encode instruction operands.
382 let Inst{3-0} = Dm{3-0};
384 let Inst{15-12} = Sd{4-1};
385 let Inst{22} = Sd{0};
387 let Inst{27-23} = 0b11101;
388 let Inst{21-16} = 0b110111;
389 let Inst{11-8} = 0b1011;
390 let Inst{7-6} = 0b11;
394 // Between half-precision and single-precision. For disassembly only.
396 // FIXME: Verify encoding after integrated assembler is working.
397 def VCVTBSH: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
398 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$dst, $a",
399 [/* For disassembly only; pattern left blank */]>;
401 def : ARMPat<(f32_to_f16 SPR:$a),
402 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
404 def VCVTBHS: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
405 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$dst, $a",
406 [/* For disassembly only; pattern left blank */]>;
408 def : ARMPat<(f16_to_f32 GPR:$a),
409 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
411 def VCVTTSH: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
412 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$dst, $a",
413 [/* For disassembly only; pattern left blank */]>;
415 def VCVTTHS: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
416 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$dst, $a",
417 [/* For disassembly only; pattern left blank */]>;
419 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
420 (outs DPR:$Dd), (ins DPR:$Dm),
421 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
422 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>;
424 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
425 (outs SPR:$Sd), (ins SPR:$Sm),
426 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
427 [(set SPR:$Sd, (fneg SPR:$Sm))]>;
429 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
430 (outs DPR:$Dd), (ins DPR:$Dm),
431 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm",
432 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>;
434 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
435 (outs SPR:$Sd), (ins SPR:$Sm),
436 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm",
437 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>;
439 let neverHasSideEffects = 1 in {
440 def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
441 (outs DPR:$Dd), (ins DPR:$Dm),
442 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>;
444 def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
445 (outs SPR:$Sd), (ins SPR:$Sm),
446 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>;
447 } // neverHasSideEffects
449 //===----------------------------------------------------------------------===//
450 // FP <-> GPR Copies. Int <-> FP Conversions.
453 def VMOVRS : AVConv2I<0b11100001, 0b1010,
454 (outs GPR:$Rt), (ins SPR:$Sn),
455 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn",
456 [(set GPR:$Rt, (bitconvert SPR:$Sn))]> {
457 // Instruction operands.
461 // Encode instruction operands.
462 let Inst{19-16} = Sn{4-1};
464 let Inst{15-12} = Rt;
466 let Inst{6-5} = 0b00;
467 let Inst{3-0} = 0b0000;
470 def VMOVSR : AVConv4I<0b11100000, 0b1010,
471 (outs SPR:$Sn), (ins GPR:$Rt),
472 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
473 [(set SPR:$Sn, (bitconvert GPR:$Rt))]> {
474 // Instruction operands.
478 // Encode instruction operands.
479 let Inst{19-16} = Sn{4-1};
481 let Inst{15-12} = Rt;
483 let Inst{6-5} = 0b00;
484 let Inst{3-0} = 0b0000;
487 let neverHasSideEffects = 1 in {
488 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
489 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
490 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
491 [/* FIXME: Can't write pattern for multiple result instr*/]> {
492 // Instruction operands.
497 // Encode instruction operands.
498 let Inst{3-0} = Dm{3-0};
500 let Inst{15-12} = Rt;
501 let Inst{19-16} = Rt2;
503 let Inst{7-6} = 0b00;
506 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
507 (outs GPR:$wb, GPR:$dst2), (ins SPR:$src1, SPR:$src2),
508 IIC_fpMOVDI, "vmov", "\t$wb, $dst2, $src1, $src2",
509 [/* For disassembly only; pattern left blank */]> {
510 let Inst{7-6} = 0b00;
512 } // neverHasSideEffects
517 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
518 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2),
519 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2",
520 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]> {
521 // Instruction operands.
526 // Encode instruction operands.
527 let Inst{3-0} = Dm{3-0};
529 let Inst{15-12} = Rt;
530 let Inst{19-16} = Rt2;
532 let Inst{7-6} = 0b00;
535 let neverHasSideEffects = 1 in
536 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
537 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
538 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
539 [/* For disassembly only; pattern left blank */]> {
540 let Inst{7-6} = 0b00;
546 // FMRX: SPR system reg -> GPR
548 // FMXR: GPR -> VFP system reg
553 class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
554 bits<4> opcod4, dag oops, dag iops,
555 InstrItinClass itin, string opc, string asm,
557 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
559 // Instruction operands.
563 // Encode instruction operands.
564 let Inst{3-0} = Sm{4-1};
566 let Inst{15-12} = Dd{3-0};
567 let Inst{22} = Dd{4};
570 class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
571 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin,
572 string opc, string asm, list<dag> pattern>
573 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
575 // Instruction operands.
579 // Encode instruction operands.
580 let Inst{3-0} = Sm{4-1};
582 let Inst{15-12} = Sd{4-1};
583 let Inst{22} = Sd{0};
586 def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
587 (outs DPR:$Dd), (ins SPR:$Sm),
588 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
589 [(set DPR:$Dd, (f64 (arm_sitof SPR:$Sm)))]> {
590 let Inst{7} = 1; // s32
593 def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
594 (outs SPR:$Sd),(ins SPR:$Sm),
595 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm",
596 [(set SPR:$Sd, (arm_sitof SPR:$Sm))]> {
597 let Inst{7} = 1; // s32
600 def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
601 (outs DPR:$Dd), (ins SPR:$Sm),
602 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
603 [(set DPR:$Dd, (f64 (arm_uitof SPR:$Sm)))]> {
604 let Inst{7} = 0; // u32
607 def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
608 (outs SPR:$Sd), (ins SPR:$Sm),
609 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm",
610 [(set SPR:$Sd, (arm_uitof SPR:$Sm))]> {
611 let Inst{7} = 0; // u32
616 class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
617 bits<4> opcod4, dag oops, dag iops,
618 InstrItinClass itin, string opc, string asm,
620 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
622 // Instruction operands.
626 // Encode instruction operands.
627 let Inst{3-0} = Dm{3-0};
629 let Inst{15-12} = Sd{4-1};
630 let Inst{22} = Sd{0};
633 class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
634 bits<4> opcod4, dag oops, dag iops,
635 InstrItinClass itin, string opc, string asm,
637 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
639 // Instruction operands.
643 // Encode instruction operands.
644 let Inst{3-0} = Sm{4-1};
646 let Inst{15-12} = Sd{4-1};
647 let Inst{22} = Sd{0};
650 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
651 def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
652 (outs SPR:$Sd), (ins DPR:$Dm),
653 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
654 [(set SPR:$Sd, (arm_ftosi (f64 DPR:$Dm)))]> {
655 let Inst{7} = 1; // Z bit
658 def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
659 (outs SPR:$Sd), (ins SPR:$Sm),
660 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
661 [(set SPR:$Sd, (arm_ftosi SPR:$Sm))]> {
662 let Inst{7} = 1; // Z bit
665 def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
666 (outs SPR:$Sd), (ins DPR:$Dm),
667 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
668 [(set SPR:$Sd, (arm_ftoui (f64 DPR:$Dm)))]> {
669 let Inst{7} = 1; // Z bit
672 def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
673 (outs SPR:$Sd), (ins SPR:$Sm),
674 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
675 [(set SPR:$Sd, (arm_ftoui SPR:$Sm))]> {
676 let Inst{7} = 1; // Z bit
679 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
680 // For disassembly only.
681 let Uses = [FPSCR] in {
682 // FIXME: Verify encoding after integrated assembler is working.
683 def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
684 (outs SPR:$Sd), (ins DPR:$Dm),
685 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
686 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>{
687 let Inst{7} = 0; // Z bit
690 def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
691 (outs SPR:$Sd), (ins SPR:$Sm),
692 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm",
693 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]> {
694 let Inst{7} = 0; // Z bit
697 def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
698 (outs SPR:$Sd), (ins DPR:$Dm),
699 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
700 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>{
701 let Inst{7} = 0; // Z bit
704 def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
705 (outs SPR:$Sd), (ins SPR:$Sm),
706 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm",
707 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]> {
708 let Inst{7} = 0; // Z bit
712 // Convert between floating-point and fixed-point
713 // Data type for fixed-point naming convention:
714 // S16 (U=0, sx=0) -> SH
715 // U16 (U=1, sx=0) -> UH
716 // S32 (U=0, sx=1) -> SL
717 // U32 (U=1, sx=1) -> UL
719 // FIXME: Marking these as codegen only seems wrong. They are real
721 let Constraints = "$a = $dst", isCodeGenOnly = 1 in {
723 // FP to Fixed-Point:
725 def VTOSHS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 0,
726 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
727 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits",
728 [/* For disassembly only; pattern left blank */]>;
730 def VTOUHS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 0,
731 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
732 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits",
733 [/* For disassembly only; pattern left blank */]>;
735 def VTOSLS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 1,
736 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
737 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits",
738 [/* For disassembly only; pattern left blank */]>;
740 def VTOULS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 1,
741 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
742 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits",
743 [/* For disassembly only; pattern left blank */]>;
745 def VTOSHD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 0,
746 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
747 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits",
748 [/* For disassembly only; pattern left blank */]>;
750 def VTOUHD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 0,
751 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
752 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits",
753 [/* For disassembly only; pattern left blank */]>;
755 def VTOSLD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 1,
756 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
757 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits",
758 [/* For disassembly only; pattern left blank */]>;
760 def VTOULD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 1,
761 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
762 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits",
763 [/* For disassembly only; pattern left blank */]>;
765 // Fixed-Point to FP:
767 def VSHTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 0,
768 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
769 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits",
770 [/* For disassembly only; pattern left blank */]>;
772 def VUHTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 0,
773 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
774 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits",
775 [/* For disassembly only; pattern left blank */]>;
777 def VSLTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 1,
778 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
779 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits",
780 [/* For disassembly only; pattern left blank */]>;
782 def VULTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 1,
783 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
784 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits",
785 [/* For disassembly only; pattern left blank */]>;
787 def VSHTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 0,
788 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
789 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits",
790 [/* For disassembly only; pattern left blank */]>;
792 def VUHTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 0,
793 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
794 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits",
795 [/* For disassembly only; pattern left blank */]>;
797 def VSLTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 1,
798 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
799 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits",
800 [/* For disassembly only; pattern left blank */]>;
802 def VULTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 1,
803 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
804 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits",
805 [/* For disassembly only; pattern left blank */]>;
807 } // End of 'let Constraints = "$a = $dst", isCodeGenOnly = 1 in'
809 //===----------------------------------------------------------------------===//
810 // FP FMA Operations.
813 def VMLAD : ADbI<0b11100, 0b00, 0, 0,
814 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
815 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
816 [(set DPR:$Dd, (fadd (fmul DPR:$Dn, DPR:$Dm),
818 RegConstraint<"$Ddin = $Dd">,
819 Requires<[HasVFP2,UseVMLx]>;
821 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
822 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
823 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
824 [(set SPR:$Sd, (fadd (fmul SPR:$Sn, SPR:$Sm),
826 RegConstraint<"$Sdin = $Sd">,
827 Requires<[HasVFP2,DontUseNEONForFP,UseVMLx]>;
829 def : Pat<(fadd DPR:$dstin, (fmul DPR:$a, (f64 DPR:$b))),
830 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
831 Requires<[HasVFP2,UseVMLx]>;
832 def : Pat<(fadd SPR:$dstin, (fmul SPR:$a, SPR:$b)),
833 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
834 Requires<[HasVFP2,DontUseNEONForFP, UseVMLx]>;
836 def VMLSD : ADbI<0b11100, 0b00, 1, 0,
837 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
838 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
839 [(set DPR:$Dd, (fadd (fneg (fmul DPR:$Dn,DPR:$Dm)),
841 RegConstraint<"$Ddin = $Dd">,
842 Requires<[HasVFP2,UseVMLx]>;
844 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
845 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
846 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
847 [(set SPR:$Sd, (fadd (fneg (fmul SPR:$Sn, SPR:$Sm)),
849 RegConstraint<"$Sdin = $Sd">,
850 Requires<[HasVFP2,DontUseNEONForFP,UseVMLx]>;
852 def : Pat<(fsub DPR:$dstin, (fmul DPR:$a, (f64 DPR:$b))),
853 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
854 Requires<[HasVFP2,UseVMLx]>;
855 def : Pat<(fsub SPR:$dstin, (fmul SPR:$a, SPR:$b)),
856 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
857 Requires<[HasVFP2,DontUseNEONForFP,UseVMLx]>;
859 def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
860 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
861 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
862 [(set DPR:$Dd,(fsub (fneg (fmul DPR:$Dn,DPR:$Dm)),
864 RegConstraint<"$Ddin = $Dd">,
865 Requires<[HasVFP2,UseVMLx]>;
867 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
868 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
869 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
870 [(set SPR:$Sd, (fsub (fneg (fmul SPR:$Sn, SPR:$Sm)),
872 RegConstraint<"$Sdin = $Sd">,
873 Requires<[HasVFP2,DontUseNEONForFP,UseVMLx]>;
875 def : Pat<(fsub (fneg (fmul DPR:$a, (f64 DPR:$b))), DPR:$dstin),
876 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
877 Requires<[HasVFP2,UseVMLx]>;
878 def : Pat<(fsub (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin),
879 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
880 Requires<[HasVFP2,DontUseNEONForFP,UseVMLx]>;
882 def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
883 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
884 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
885 [(set DPR:$Dd, (fsub (fmul DPR:$Dn, DPR:$Dm),
887 RegConstraint<"$Ddin = $Dd">,
888 Requires<[HasVFP2,UseVMLx]>;
890 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
891 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
892 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
893 [(set SPR:$Sd, (fsub (fmul SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
894 RegConstraint<"$Sdin = $Sd">,
895 Requires<[HasVFP2,DontUseNEONForFP,UseVMLx]>;
897 def : Pat<(fsub (fmul DPR:$a, (f64 DPR:$b)), DPR:$dstin),
898 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
899 Requires<[HasVFP2,UseVMLx]>;
900 def : Pat<(fsub (fmul SPR:$a, SPR:$b), SPR:$dstin),
901 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
902 Requires<[HasVFP2,DontUseNEONForFP,UseVMLx]>;
905 //===----------------------------------------------------------------------===//
906 // FP Conditional moves.
909 let neverHasSideEffects = 1 in {
910 def VMOVDcc : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
911 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
912 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm",
913 [/*(set DPR:$Dd, (ARMcmov DPR:$Dn, DPR:$Dm, imm:$cc))*/]>,
914 RegConstraint<"$Dn = $Dd">;
916 def VMOVScc : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
917 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
918 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm",
919 [/*(set SPR:$Sd, (ARMcmov SPR:$Sn, SPR:$Sm, imm:$cc))*/]>,
920 RegConstraint<"$Sn = $Sd">;
922 def VNEGDcc : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
923 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
924 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
925 [/*(set DPR:$Dd, (ARMcneg DPR:$Dn, DPR:$Dm, imm:$cc))*/]>,
926 RegConstraint<"$Dn = $Dd">;
928 def VNEGScc : ASuI<0b11101, 0b11, 0b0001, 0b01, 0,
929 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
930 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
931 [/*(set SPR:$Sd, (ARMcneg SPR:$Sn, SPR:$Sm, imm:$cc))*/]>,
932 RegConstraint<"$Sn = $Sd">;
933 } // neverHasSideEffects
935 //===----------------------------------------------------------------------===//
939 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
941 let Defs = [CPSR], Uses = [FPSCR] in
942 def FMSTAT : VFPAI<(outs), (ins), VFPMiscFrm, IIC_fpSTAT,
943 "vmrs", "\tapsr_nzcv, fpscr",
945 let Inst{27-20} = 0b11101111;
946 let Inst{19-16} = 0b0001;
947 let Inst{15-12} = 0b1111;
948 let Inst{11-8} = 0b1010;
950 let Inst{6-5} = 0b00;
952 let Inst{3-0} = 0b0000;
956 let hasSideEffects = 1, Uses = [FPSCR] in
957 def VMRS : VFPAI<(outs GPR:$Rt), (ins), VFPMiscFrm, IIC_fpSTAT,
958 "vmrs", "\t$Rt, fpscr",
959 [(set GPR:$Rt, (int_arm_get_fpscr))]> {
960 // Instruction operand.
963 // Encode instruction operand.
964 let Inst{15-12} = Rt;
966 let Inst{27-20} = 0b11101111;
967 let Inst{19-16} = 0b0001;
968 let Inst{11-8} = 0b1010;
970 let Inst{6-5} = 0b00;
972 let Inst{3-0} = 0b0000;
975 let Defs = [FPSCR] in
976 def VMSR : VFPAI<(outs), (ins GPR:$src), VFPMiscFrm, IIC_fpSTAT,
977 "vmsr", "\tfpscr, $src",
978 [(int_arm_set_fpscr GPR:$src)]> {
979 // Instruction operand.
982 // Encode instruction operand.
983 let Inst{15-12} = src;
985 let Inst{27-20} = 0b11101110;
986 let Inst{19-16} = 0b0001;
987 let Inst{11-8} = 0b1010;
992 // Materialize FP immediates. VFP3 only.
993 let isReMaterializable = 1 in {
994 def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
995 VFPMiscFrm, IIC_fpUNA64,
996 "vmov", ".f64\t$Dd, $imm",
997 [(set DPR:$Dd, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
998 // Instruction operands.
1002 // Encode instruction operands.
1003 let Inst{15-12} = Dd{3-0};
1004 let Inst{22} = Dd{4};
1005 let Inst{19} = imm{31};
1006 let Inst{18-16} = imm{22-20};
1007 let Inst{3-0} = imm{19-16};
1009 // Encode remaining instruction bits.
1010 let Inst{27-23} = 0b11101;
1011 let Inst{21-20} = 0b11;
1012 let Inst{11-9} = 0b101;
1013 let Inst{8} = 1; // Double precision.
1014 let Inst{7-4} = 0b0000;
1017 def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
1018 VFPMiscFrm, IIC_fpUNA32,
1019 "vmov", ".f32\t$Sd, $imm",
1020 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
1021 // Instruction operands.
1025 // Encode instruction operands.
1026 let Inst{15-12} = Sd{4-1};
1027 let Inst{22} = Sd{0};
1028 let Inst{19} = imm{31}; // The immediate is handled as a double.
1029 let Inst{18-16} = imm{22-20};
1030 let Inst{3-0} = imm{19-16};
1032 // Encode remaining instruction bits.
1033 let Inst{27-23} = 0b11101;
1034 let Inst{21-20} = 0b11;
1035 let Inst{11-9} = 0b101;
1036 let Inst{8} = 0; // Single precision.
1037 let Inst{7-4} = 0b0000;