1 //===- ARMInstrVFP.td - VFP support for ARM ----------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM VFP instruction set.
12 //===----------------------------------------------------------------------===//
14 def SDT_FTOI : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
15 def SDT_ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
16 def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>;
17 def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
20 def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
21 def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
22 def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
23 def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
24 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>;
25 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutGlue]>;
26 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>;
27 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
30 //===----------------------------------------------------------------------===//
31 // Operand Definitions.
34 def vfp_f32imm : Operand<f32>,
35 PatLeaf<(f32 fpimm), [{
36 return ARM::getVFPf32Imm(N->getValueAPF()) != -1;
38 let PrintMethod = "printVFPf32ImmOperand";
41 def vfp_f64imm : Operand<f64>,
42 PatLeaf<(f64 fpimm), [{
43 return ARM::getVFPf64Imm(N->getValueAPF()) != -1;
45 let PrintMethod = "printVFPf64ImmOperand";
49 //===----------------------------------------------------------------------===//
50 // Load / store Instructions.
53 let canFoldAsLoad = 1, isReMaterializable = 1 in {
55 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
56 IIC_fpLoad64, "vldr", ".64\t$Dd, $addr",
57 [(set DPR:$Dd, (f64 (load addrmode5:$addr)))]>;
59 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
60 IIC_fpLoad32, "vldr", ".32\t$Sd, $addr",
61 [(set SPR:$Sd, (load addrmode5:$addr))]> {
62 // Some single precision VFP instructions may be executed on both NEON and VFP
64 let D = VFPNeonDomain;
67 } // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
69 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
70 IIC_fpStore64, "vstr", ".64\t$Dd, $addr",
71 [(store (f64 DPR:$Dd), addrmode5:$addr)]>;
73 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
74 IIC_fpStore32, "vstr", ".32\t$Sd, $addr",
75 [(store SPR:$Sd, addrmode5:$addr)]> {
76 // Some single precision VFP instructions may be executed on both NEON and VFP
78 let D = VFPNeonDomain;
81 //===----------------------------------------------------------------------===//
82 // Load / store multiple Instructions.
85 multiclass vfp_ldst_mult<string asm, bit L_bit,
86 InstrItinClass itin, InstrItinClass itin_upd> {
89 AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
91 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
92 let Inst{24-23} = 0b01; // Increment After
93 let Inst{21} = 0; // No writeback
97 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
98 IndexModeUpd, itin_upd,
99 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
100 let Inst{24-23} = 0b01; // Increment After
101 let Inst{21} = 1; // Writeback
102 let Inst{20} = L_bit;
105 AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
107 !strconcat(asm, "db${p}\t$Rn, $regs"), "", []> {
108 let Inst{24-23} = 0b10; // Decrement Before
109 let Inst{21} = 0; // No writeback
110 let Inst{20} = L_bit;
113 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
114 IndexModeUpd, itin_upd,
115 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
116 let Inst{24-23} = 0b10; // Decrement Before
117 let Inst{21} = 1; // Writeback
118 let Inst{20} = L_bit;
123 AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
125 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
126 let Inst{24-23} = 0b01; // Increment After
127 let Inst{21} = 0; // No writeback
128 let Inst{20} = L_bit;
130 // Some single precision VFP instructions may be executed on both NEON and
132 let D = VFPNeonDomain;
135 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
136 IndexModeUpd, itin_upd,
137 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
138 let Inst{24-23} = 0b01; // Increment After
139 let Inst{21} = 1; // Writeback
140 let Inst{20} = L_bit;
142 // Some single precision VFP instructions may be executed on both NEON and
144 let D = VFPNeonDomain;
147 AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
149 !strconcat(asm, "db${p}\t$Rn, $regs"), "", []> {
150 let Inst{24-23} = 0b10; // Decrement Before
151 let Inst{21} = 0; // No writeback
152 let Inst{20} = L_bit;
154 // Some single precision VFP instructions may be executed on both NEON and
156 let D = VFPNeonDomain;
159 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
160 IndexModeUpd, itin_upd,
161 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
162 let Inst{24-23} = 0b10; // Decrement Before
163 let Inst{21} = 1; // Writeback
164 let Inst{20} = L_bit;
166 // Some single precision VFP instructions may be executed on both NEON and
168 let D = VFPNeonDomain;
172 let neverHasSideEffects = 1 in {
174 let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
175 defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
177 let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
178 defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpLoad_m, IIC_fpLoad_mu>;
180 } // neverHasSideEffects
182 def : MnemonicAlias<"vldm", "vldmia">;
183 def : MnemonicAlias<"vstm", "vstmia">;
185 // FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
187 //===----------------------------------------------------------------------===//
188 // FP Binary Operations.
191 def VADDD : ADbI<0b11100, 0b11, 0, 0,
192 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
193 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
194 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>;
196 def VADDS : ASbIn<0b11100, 0b11, 0, 0,
197 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
198 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
199 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]> {
200 // Some single precision VFP instructions may be executed on both NEON and
201 // VFP pipelines on A8.
202 let D = VFPNeonA8Domain;
205 def VSUBD : ADbI<0b11100, 0b11, 1, 0,
206 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
207 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
208 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>;
210 def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
211 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
212 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
213 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]> {
214 // Some single precision VFP instructions may be executed on both NEON and
215 // VFP pipelines on A8.
216 let D = VFPNeonA8Domain;
219 def VDIVD : ADbI<0b11101, 0b00, 0, 0,
220 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
221 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
222 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>;
224 def VDIVS : ASbI<0b11101, 0b00, 0, 0,
225 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
226 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
227 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>;
229 def VMULD : ADbI<0b11100, 0b10, 0, 0,
230 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
231 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
232 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>;
234 def VMULS : ASbIn<0b11100, 0b10, 0, 0,
235 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
236 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
237 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]> {
238 // Some single precision VFP instructions may be executed on both NEON and
239 // VFP pipelines on A8.
240 let D = VFPNeonA8Domain;
243 def VNMULD : ADbI<0b11100, 0b10, 1, 0,
244 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
245 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
246 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>;
248 def VNMULS : ASbI<0b11100, 0b10, 1, 0,
249 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
250 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
251 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]> {
252 // Some single precision VFP instructions may be executed on both NEON and
253 // VFP pipelines on A8.
254 let D = VFPNeonA8Domain;
257 // Match reassociated forms only if not sign dependent rounding.
258 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
259 (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
260 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
261 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
263 // These are encoded as unary instructions.
264 let Defs = [FPSCR] in {
265 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
266 (outs), (ins DPR:$Dd, DPR:$Dm),
267 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
268 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>;
270 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
271 (outs), (ins SPR:$Sd, SPR:$Sm),
272 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm",
273 [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> {
274 // Some single precision VFP instructions may be executed on both NEON and
275 // VFP pipelines on A8.
276 let D = VFPNeonA8Domain;
279 // FIXME: Verify encoding after integrated assembler is working.
280 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
281 (outs), (ins DPR:$Dd, DPR:$Dm),
282 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm",
283 [/* For disassembly only; pattern left blank */]>;
285 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
286 (outs), (ins SPR:$Sd, SPR:$Sm),
287 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm",
288 [/* For disassembly only; pattern left blank */]> {
289 // Some single precision VFP instructions may be executed on both NEON and
290 // VFP pipelines on A8.
291 let D = VFPNeonA8Domain;
295 //===----------------------------------------------------------------------===//
296 // FP Unary Operations.
299 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0,
300 (outs DPR:$Dd), (ins DPR:$Dm),
301 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm",
302 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>;
304 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
305 (outs SPR:$Sd), (ins SPR:$Sm),
306 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm",
307 [(set SPR:$Sd, (fabs SPR:$Sm))]> {
308 // Some single precision VFP instructions may be executed on both NEON and
309 // VFP pipelines on A8.
310 let D = VFPNeonA8Domain;
313 let Defs = [FPSCR] in {
314 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
315 (outs), (ins DPR:$Dd),
316 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0",
317 [(arm_cmpfp0 (f64 DPR:$Dd))]> {
318 let Inst{3-0} = 0b0000;
322 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
323 (outs), (ins SPR:$Sd),
324 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0",
325 [(arm_cmpfp0 SPR:$Sd)]> {
326 let Inst{3-0} = 0b0000;
329 // Some single precision VFP instructions may be executed on both NEON and
330 // VFP pipelines on A8.
331 let D = VFPNeonA8Domain;
334 // FIXME: Verify encoding after integrated assembler is working.
335 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
336 (outs), (ins DPR:$Dd),
337 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0",
338 [/* For disassembly only; pattern left blank */]> {
339 let Inst{3-0} = 0b0000;
343 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
344 (outs), (ins SPR:$Sd),
345 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0",
346 [/* For disassembly only; pattern left blank */]> {
347 let Inst{3-0} = 0b0000;
350 // Some single precision VFP instructions may be executed on both NEON and
351 // VFP pipelines on A8.
352 let D = VFPNeonA8Domain;
356 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
357 (outs DPR:$Dd), (ins SPR:$Sm),
358 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm",
359 [(set DPR:$Dd, (fextend SPR:$Sm))]> {
360 // Instruction operands.
364 // Encode instruction operands.
365 let Inst{3-0} = Sm{4-1};
367 let Inst{15-12} = Dd{3-0};
368 let Inst{22} = Dd{4};
371 // Special case encoding: bits 11-8 is 0b1011.
372 def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
373 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm",
374 [(set SPR:$Sd, (fround DPR:$Dm))]> {
375 // Instruction operands.
379 // Encode instruction operands.
380 let Inst{3-0} = Dm{3-0};
382 let Inst{15-12} = Sd{4-1};
383 let Inst{22} = Sd{0};
385 let Inst{27-23} = 0b11101;
386 let Inst{21-16} = 0b110111;
387 let Inst{11-8} = 0b1011;
388 let Inst{7-6} = 0b11;
392 // Between half-precision and single-precision. For disassembly only.
394 // FIXME: Verify encoding after integrated assembler is working.
395 def VCVTBSH: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
396 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$dst, $a",
397 [/* For disassembly only; pattern left blank */]>;
399 def : ARMPat<(f32_to_f16 SPR:$a),
400 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
402 def VCVTBHS: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
403 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$dst, $a",
404 [/* For disassembly only; pattern left blank */]>;
406 def : ARMPat<(f16_to_f32 GPR:$a),
407 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
409 def VCVTTSH: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
410 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$dst, $a",
411 [/* For disassembly only; pattern left blank */]>;
413 def VCVTTHS: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
414 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$dst, $a",
415 [/* For disassembly only; pattern left blank */]>;
417 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
418 (outs DPR:$Dd), (ins DPR:$Dm),
419 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
420 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>;
422 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
423 (outs SPR:$Sd), (ins SPR:$Sm),
424 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
425 [(set SPR:$Sd, (fneg SPR:$Sm))]> {
426 // Some single precision VFP instructions may be executed on both NEON and
427 // VFP pipelines on A8.
428 let D = VFPNeonA8Domain;
431 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
432 (outs DPR:$Dd), (ins DPR:$Dm),
433 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm",
434 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>;
436 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
437 (outs SPR:$Sd), (ins SPR:$Sm),
438 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm",
439 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>;
441 let neverHasSideEffects = 1 in {
442 def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
443 (outs DPR:$Dd), (ins DPR:$Dm),
444 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>;
446 def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
447 (outs SPR:$Sd), (ins SPR:$Sm),
448 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>;
449 } // neverHasSideEffects
451 //===----------------------------------------------------------------------===//
452 // FP <-> GPR Copies. Int <-> FP Conversions.
455 def VMOVRS : AVConv2I<0b11100001, 0b1010,
456 (outs GPR:$Rt), (ins SPR:$Sn),
457 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn",
458 [(set GPR:$Rt, (bitconvert SPR:$Sn))]> {
459 // Instruction operands.
463 // Encode instruction operands.
464 let Inst{19-16} = Sn{4-1};
466 let Inst{15-12} = Rt;
468 let Inst{6-5} = 0b00;
469 let Inst{3-0} = 0b0000;
472 def VMOVSR : AVConv4I<0b11100000, 0b1010,
473 (outs SPR:$Sn), (ins GPR:$Rt),
474 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
475 [(set SPR:$Sn, (bitconvert GPR:$Rt))]> {
476 // Instruction operands.
480 // Encode instruction operands.
481 let Inst{19-16} = Sn{4-1};
483 let Inst{15-12} = Rt;
485 let Inst{6-5} = 0b00;
486 let Inst{3-0} = 0b0000;
489 let neverHasSideEffects = 1 in {
490 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
491 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
492 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
493 [/* FIXME: Can't write pattern for multiple result instr*/]> {
494 // Instruction operands.
499 // Encode instruction operands.
500 let Inst{3-0} = Dm{3-0};
502 let Inst{15-12} = Rt;
503 let Inst{19-16} = Rt2;
505 let Inst{7-6} = 0b00;
508 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
509 (outs GPR:$wb, GPR:$dst2), (ins SPR:$src1, SPR:$src2),
510 IIC_fpMOVDI, "vmov", "\t$wb, $dst2, $src1, $src2",
511 [/* For disassembly only; pattern left blank */]> {
512 let Inst{7-6} = 0b00;
514 } // neverHasSideEffects
519 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
520 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2),
521 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2",
522 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]> {
523 // Instruction operands.
528 // Encode instruction operands.
529 let Inst{3-0} = Dm{3-0};
531 let Inst{15-12} = Rt;
532 let Inst{19-16} = Rt2;
534 let Inst{7-6} = 0b00;
537 let neverHasSideEffects = 1 in
538 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
539 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
540 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
541 [/* For disassembly only; pattern left blank */]> {
542 let Inst{7-6} = 0b00;
548 // FMRX: SPR system reg -> GPR
550 // FMXR: GPR -> VFP system reg
555 class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
556 bits<4> opcod4, dag oops, dag iops,
557 InstrItinClass itin, string opc, string asm,
559 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
561 // Instruction operands.
565 // Encode instruction operands.
566 let Inst{3-0} = Sm{4-1};
568 let Inst{15-12} = Dd{3-0};
569 let Inst{22} = Dd{4};
572 class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
573 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin,
574 string opc, string asm, list<dag> pattern>
575 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
577 // Instruction operands.
581 // Encode instruction operands.
582 let Inst{3-0} = Sm{4-1};
584 let Inst{15-12} = Sd{4-1};
585 let Inst{22} = Sd{0};
588 def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
589 (outs DPR:$Dd), (ins SPR:$Sm),
590 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
591 [(set DPR:$Dd, (f64 (arm_sitof SPR:$Sm)))]> {
592 let Inst{7} = 1; // s32
595 def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
596 (outs SPR:$Sd),(ins SPR:$Sm),
597 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm",
598 [(set SPR:$Sd, (arm_sitof SPR:$Sm))]> {
599 let Inst{7} = 1; // s32
601 // Some single precision VFP instructions may be executed on both NEON and
602 // VFP pipelines on A8.
603 let D = VFPNeonA8Domain;
606 def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
607 (outs DPR:$Dd), (ins SPR:$Sm),
608 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
609 [(set DPR:$Dd, (f64 (arm_uitof SPR:$Sm)))]> {
610 let Inst{7} = 0; // u32
613 def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
614 (outs SPR:$Sd), (ins SPR:$Sm),
615 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm",
616 [(set SPR:$Sd, (arm_uitof SPR:$Sm))]> {
617 let Inst{7} = 0; // u32
619 // Some single precision VFP instructions may be executed on both NEON and
620 // VFP pipelines on A8.
621 let D = VFPNeonA8Domain;
626 class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
627 bits<4> opcod4, dag oops, dag iops,
628 InstrItinClass itin, string opc, string asm,
630 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
632 // Instruction operands.
636 // Encode instruction operands.
637 let Inst{3-0} = Dm{3-0};
639 let Inst{15-12} = Sd{4-1};
640 let Inst{22} = Sd{0};
643 class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
644 bits<4> opcod4, dag oops, dag iops,
645 InstrItinClass itin, string opc, string asm,
647 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
649 // Instruction operands.
653 // Encode instruction operands.
654 let Inst{3-0} = Sm{4-1};
656 let Inst{15-12} = Sd{4-1};
657 let Inst{22} = Sd{0};
660 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
661 def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
662 (outs SPR:$Sd), (ins DPR:$Dm),
663 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
664 [(set SPR:$Sd, (arm_ftosi (f64 DPR:$Dm)))]> {
665 let Inst{7} = 1; // Z bit
668 def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
669 (outs SPR:$Sd), (ins SPR:$Sm),
670 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
671 [(set SPR:$Sd, (arm_ftosi SPR:$Sm))]> {
672 let Inst{7} = 1; // Z bit
674 // Some single precision VFP instructions may be executed on both NEON and
675 // VFP pipelines on A8.
676 let D = VFPNeonA8Domain;
679 def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
680 (outs SPR:$Sd), (ins DPR:$Dm),
681 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
682 [(set SPR:$Sd, (arm_ftoui (f64 DPR:$Dm)))]> {
683 let Inst{7} = 1; // Z bit
686 def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
687 (outs SPR:$Sd), (ins SPR:$Sm),
688 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
689 [(set SPR:$Sd, (arm_ftoui SPR:$Sm))]> {
690 let Inst{7} = 1; // Z bit
692 // Some single precision VFP instructions may be executed on both NEON and
693 // VFP pipelines on A8.
694 let D = VFPNeonA8Domain;
697 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
698 let Uses = [FPSCR] in {
699 // FIXME: Verify encoding after integrated assembler is working.
700 def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
701 (outs SPR:$Sd), (ins DPR:$Dm),
702 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
703 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>{
704 let Inst{7} = 0; // Z bit
707 def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
708 (outs SPR:$Sd), (ins SPR:$Sm),
709 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm",
710 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]> {
711 let Inst{7} = 0; // Z bit
714 def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
715 (outs SPR:$Sd), (ins DPR:$Dm),
716 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
717 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>{
718 let Inst{7} = 0; // Z bit
721 def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
722 (outs SPR:$Sd), (ins SPR:$Sm),
723 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm",
724 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]> {
725 let Inst{7} = 0; // Z bit
729 // Convert between floating-point and fixed-point
730 // Data type for fixed-point naming convention:
731 // S16 (U=0, sx=0) -> SH
732 // U16 (U=1, sx=0) -> UH
733 // S32 (U=0, sx=1) -> SL
734 // U32 (U=1, sx=1) -> UL
736 // FIXME: Marking these as codegen only seems wrong. They are real
738 let Constraints = "$a = $dst", isCodeGenOnly = 1 in {
740 // FP to Fixed-Point:
742 def VTOSHS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 0,
743 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
744 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits",
745 [/* For disassembly only; pattern left blank */]> {
746 // Some single precision VFP instructions may be executed on both NEON and
747 // VFP pipelines on A8.
748 let D = VFPNeonA8Domain;
751 def VTOUHS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 0,
752 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
753 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits",
754 [/* For disassembly only; pattern left blank */]> {
755 // Some single precision VFP instructions may be executed on both NEON and
756 // VFP pipelines on A8.
757 let D = VFPNeonA8Domain;
760 def VTOSLS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 1,
761 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
762 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits",
763 [/* For disassembly only; pattern left blank */]> {
764 // Some single precision VFP instructions may be executed on both NEON and
765 // VFP pipelines on A8.
766 let D = VFPNeonA8Domain;
769 def VTOULS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 1,
770 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
771 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits",
772 [/* For disassembly only; pattern left blank */]> {
773 // Some single precision VFP instructions may be executed on both NEON and
774 // VFP pipelines on A8.
775 let D = VFPNeonA8Domain;
778 def VTOSHD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 0,
779 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
780 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits",
781 [/* For disassembly only; pattern left blank */]>;
783 def VTOUHD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 0,
784 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
785 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits",
786 [/* For disassembly only; pattern left blank */]>;
788 def VTOSLD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 1,
789 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
790 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits",
791 [/* For disassembly only; pattern left blank */]>;
793 def VTOULD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 1,
794 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
795 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits",
796 [/* For disassembly only; pattern left blank */]>;
798 // Fixed-Point to FP:
800 def VSHTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 0,
801 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
802 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits",
803 [/* For disassembly only; pattern left blank */]> {
804 // Some single precision VFP instructions may be executed on both NEON and
805 // VFP pipelines on A8.
806 let D = VFPNeonA8Domain;
809 def VUHTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 0,
810 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
811 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits",
812 [/* For disassembly only; pattern left blank */]> {
813 // Some single precision VFP instructions may be executed on both NEON and
814 // VFP pipelines on A8.
815 let D = VFPNeonA8Domain;
818 def VSLTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 1,
819 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
820 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits",
821 [/* For disassembly only; pattern left blank */]> {
822 // Some single precision VFP instructions may be executed on both NEON and
823 // VFP pipelines on A8.
824 let D = VFPNeonA8Domain;
827 def VULTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 1,
828 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
829 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits",
830 [/* For disassembly only; pattern left blank */]> {
831 // Some single precision VFP instructions may be executed on both NEON and
832 // VFP pipelines on A8.
833 let D = VFPNeonA8Domain;
836 def VSHTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 0,
837 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
838 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits",
839 [/* For disassembly only; pattern left blank */]>;
841 def VUHTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 0,
842 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
843 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits",
844 [/* For disassembly only; pattern left blank */]>;
846 def VSLTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 1,
847 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
848 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits",
849 [/* For disassembly only; pattern left blank */]>;
851 def VULTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 1,
852 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
853 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits",
854 [/* For disassembly only; pattern left blank */]>;
856 } // End of 'let Constraints = "$a = $dst", isCodeGenOnly = 1 in'
858 //===----------------------------------------------------------------------===//
859 // FP FMA Operations.
862 def VMLAD : ADbI<0b11100, 0b00, 0, 0,
863 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
864 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
865 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
867 RegConstraint<"$Ddin = $Dd">,
868 Requires<[HasVFP2,UseFPVMLx]>;
870 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
871 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
872 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
873 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
875 RegConstraint<"$Sdin = $Sd">,
876 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
877 // Some single precision VFP instructions may be executed on both NEON and
878 // VFP pipelines on A8.
879 let D = VFPNeonA8Domain;
882 def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
883 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
884 Requires<[HasVFP2,UseFPVMLx]>;
885 def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
886 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
887 Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx]>;
889 def VMLSD : ADbI<0b11100, 0b00, 1, 0,
890 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
891 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
892 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
894 RegConstraint<"$Ddin = $Dd">,
895 Requires<[HasVFP2,UseFPVMLx]>;
897 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
898 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
899 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
900 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
902 RegConstraint<"$Sdin = $Sd">,
903 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
904 // Some single precision VFP instructions may be executed on both NEON and
905 // VFP pipelines on A8.
906 let D = VFPNeonA8Domain;
909 def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
910 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
911 Requires<[HasVFP2,UseFPVMLx]>;
912 def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
913 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
914 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
916 def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
917 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
918 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
919 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
921 RegConstraint<"$Ddin = $Dd">,
922 Requires<[HasVFP2,UseFPVMLx]>;
924 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
925 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
926 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
927 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
929 RegConstraint<"$Sdin = $Sd">,
930 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
931 // Some single precision VFP instructions may be executed on both NEON and
932 // VFP pipelines on A8.
933 let D = VFPNeonA8Domain;
936 def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
937 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
938 Requires<[HasVFP2,UseFPVMLx]>;
939 def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
940 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
941 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
943 def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
944 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
945 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
946 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
948 RegConstraint<"$Ddin = $Dd">,
949 Requires<[HasVFP2,UseFPVMLx]>;
951 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
952 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
953 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
954 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
955 RegConstraint<"$Sdin = $Sd">,
956 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
957 // Some single precision VFP instructions may be executed on both NEON and
958 // VFP pipelines on A8.
959 let D = VFPNeonA8Domain;
962 def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
963 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
964 Requires<[HasVFP2,UseFPVMLx]>;
965 def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
966 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
967 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
970 //===----------------------------------------------------------------------===//
971 // FP Conditional moves.
974 let neverHasSideEffects = 1 in {
975 def VMOVDcc : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
976 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
977 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm",
978 [/*(set DPR:$Dd, (ARMcmov DPR:$Dn, DPR:$Dm, imm:$cc))*/]>,
979 RegConstraint<"$Dn = $Dd">;
981 def VMOVScc : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
982 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
983 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm",
984 [/*(set SPR:$Sd, (ARMcmov SPR:$Sn, SPR:$Sm, imm:$cc))*/]>,
985 RegConstraint<"$Sn = $Sd">;
986 } // neverHasSideEffects
988 //===----------------------------------------------------------------------===//
989 // Move from VFP System Register to ARM core register.
992 class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
994 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
996 // Instruction operand.
999 let Inst{27-20} = 0b11101111;
1000 let Inst{19-16} = opc19_16;
1001 let Inst{15-12} = Rt;
1002 let Inst{11-8} = 0b1010;
1004 let Inst{6-5} = 0b00;
1006 let Inst{3-0} = 0b0000;
1009 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
1011 let Defs = [CPSR], Uses = [FPSCR], Rt = 0b1111 /* apsr_nzcv */ in
1012 def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins),
1013 "vmrs", "\tapsr_nzcv, fpscr", [(arm_fmstat)]>;
1015 // Application level FPSCR -> GPR
1016 let hasSideEffects = 1, Uses = [FPSCR] in
1017 def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPR:$Rt), (ins),
1018 "vmrs", "\t$Rt, fpscr",
1019 [(set GPR:$Rt, (int_arm_get_fpscr))]>;
1021 // System level FPEXC, FPSID -> GPR
1022 let Uses = [FPSCR] in {
1023 def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPR:$Rt), (ins),
1024 "vmrs", "\t$Rt, fpexc", []>;
1025 def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPR:$Rt), (ins),
1026 "vmrs", "\t$Rt, fpsid", []>;
1029 //===----------------------------------------------------------------------===//
1030 // Move from ARM core register to VFP System Register.
1033 class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
1035 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
1037 // Instruction operand.
1040 // Encode instruction operand.
1041 let Inst{15-12} = src;
1043 let Inst{27-20} = 0b11101110;
1044 let Inst{19-16} = opc19_16;
1045 let Inst{11-8} = 0b1010;
1050 let Defs = [FPSCR] in {
1051 // Application level GPR -> FPSCR
1052 def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPR:$src),
1053 "vmsr", "\tfpscr, $src", [(int_arm_set_fpscr GPR:$src)]>;
1054 // System level GPR -> FPEXC
1055 def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPR:$src),
1056 "vmsr", "\tfpexc, $src", []>;
1057 // System level GPR -> FPSID
1058 def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPR:$src),
1059 "vmsr", "\tfpsid, $src", []>;
1062 //===----------------------------------------------------------------------===//
1066 // Materialize FP immediates. VFP3 only.
1067 let isReMaterializable = 1 in {
1068 def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
1069 VFPMiscFrm, IIC_fpUNA64,
1070 "vmov", ".f64\t$Dd, $imm",
1071 [(set DPR:$Dd, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
1072 // Instruction operands.
1076 // Encode instruction operands.
1077 let Inst{15-12} = Dd{3-0};
1078 let Inst{22} = Dd{4};
1079 let Inst{19} = imm{31};
1080 let Inst{18-16} = imm{22-20};
1081 let Inst{3-0} = imm{19-16};
1083 // Encode remaining instruction bits.
1084 let Inst{27-23} = 0b11101;
1085 let Inst{21-20} = 0b11;
1086 let Inst{11-9} = 0b101;
1087 let Inst{8} = 1; // Double precision.
1088 let Inst{7-4} = 0b0000;
1091 def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
1092 VFPMiscFrm, IIC_fpUNA32,
1093 "vmov", ".f32\t$Sd, $imm",
1094 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
1095 // Instruction operands.
1099 // Encode instruction operands.
1100 let Inst{15-12} = Sd{4-1};
1101 let Inst{22} = Sd{0};
1102 let Inst{19} = imm{31}; // The immediate is handled as a double.
1103 let Inst{18-16} = imm{22-20};
1104 let Inst{3-0} = imm{19-16};
1106 // Encode remaining instruction bits.
1107 let Inst{27-23} = 0b11101;
1108 let Inst{21-20} = 0b11;
1109 let Inst{11-9} = 0b101;
1110 let Inst{8} = 0; // Single precision.
1111 let Inst{7-4} = 0b0000;