1 //===- ARMInstrVFP.td - VFP support for ARM ----------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM VFP instruction set.
12 //===----------------------------------------------------------------------===//
14 def SDT_FTOI : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
15 def SDT_ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
16 def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>;
17 def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
20 def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
21 def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
22 def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
23 def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
24 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>;
25 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutGlue]>;
26 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>;
27 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
30 //===----------------------------------------------------------------------===//
31 // Operand Definitions.
34 def vfp_f32imm : Operand<f32>,
35 PatLeaf<(f32 fpimm), [{
36 return ARM_AM::getFP32Imm(N->getValueAPF()) != -1;
37 }], SDNodeXForm<fpimm, [{
38 APFloat InVal = N->getValueAPF();
39 uint32_t enc = ARM_AM::getFP32Imm(InVal);
40 return CurDAG->getTargetConstant(enc, MVT::i32);
42 let PrintMethod = "printFPImmOperand";
45 def vfp_f64imm : Operand<f64>,
46 PatLeaf<(f64 fpimm), [{
47 return ARM_AM::getFP64Imm(N->getValueAPF()) != -1;
48 }], SDNodeXForm<fpimm, [{
49 APFloat InVal = N->getValueAPF();
50 uint32_t enc = ARM_AM::getFP64Imm(InVal);
51 return CurDAG->getTargetConstant(enc, MVT::i32);
53 let PrintMethod = "printFPImmOperand";
57 //===----------------------------------------------------------------------===//
58 // Load / store Instructions.
61 let canFoldAsLoad = 1, isReMaterializable = 1 in {
63 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
64 IIC_fpLoad64, "vldr", ".64\t$Dd, $addr",
65 [(set DPR:$Dd, (f64 (load addrmode5:$addr)))]>;
67 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
68 IIC_fpLoad32, "vldr", ".32\t$Sd, $addr",
69 [(set SPR:$Sd, (load addrmode5:$addr))]> {
70 // Some single precision VFP instructions may be executed on both NEON and VFP
72 let D = VFPNeonDomain;
75 } // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
77 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
78 IIC_fpStore64, "vstr", ".64\t$Dd, $addr",
79 [(store (f64 DPR:$Dd), addrmode5:$addr)]>;
81 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
82 IIC_fpStore32, "vstr", ".32\t$Sd, $addr",
83 [(store SPR:$Sd, addrmode5:$addr)]> {
84 // Some single precision VFP instructions may be executed on both NEON and VFP
86 let D = VFPNeonDomain;
89 //===----------------------------------------------------------------------===//
90 // Load / store multiple Instructions.
93 multiclass vfp_ldst_mult<string asm, bit L_bit,
94 InstrItinClass itin, InstrItinClass itin_upd> {
97 AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
99 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
100 let Inst{24-23} = 0b01; // Increment After
101 let Inst{21} = 0; // No writeback
102 let Inst{20} = L_bit;
105 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
107 IndexModeUpd, itin_upd,
108 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
109 let Inst{24-23} = 0b01; // Increment After
110 let Inst{21} = 1; // Writeback
111 let Inst{20} = L_bit;
114 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
116 IndexModeUpd, itin_upd,
117 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
118 let Inst{24-23} = 0b10; // Decrement Before
119 let Inst{21} = 1; // Writeback
120 let Inst{20} = L_bit;
125 AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
127 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
128 let Inst{24-23} = 0b01; // Increment After
129 let Inst{21} = 0; // No writeback
130 let Inst{20} = L_bit;
132 // Some single precision VFP instructions may be executed on both NEON and
134 let D = VFPNeonDomain;
137 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
139 IndexModeUpd, itin_upd,
140 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
141 let Inst{24-23} = 0b01; // Increment After
142 let Inst{21} = 1; // Writeback
143 let Inst{20} = L_bit;
145 // Some single precision VFP instructions may be executed on both NEON and
147 let D = VFPNeonDomain;
150 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
152 IndexModeUpd, itin_upd,
153 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
154 let Inst{24-23} = 0b10; // Decrement Before
155 let Inst{21} = 1; // Writeback
156 let Inst{20} = L_bit;
158 // Some single precision VFP instructions may be executed on both NEON and
160 let D = VFPNeonDomain;
164 let neverHasSideEffects = 1 in {
166 let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
167 defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
169 let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
170 defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpLoad_m, IIC_fpLoad_mu>;
172 } // neverHasSideEffects
174 def : MnemonicAlias<"vldm", "vldmia">;
175 def : MnemonicAlias<"vstm", "vstmia">;
177 def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>,
179 def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>,
181 def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>,
183 def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>,
186 // FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
188 //===----------------------------------------------------------------------===//
189 // FP Binary Operations.
192 def VADDD : ADbI<0b11100, 0b11, 0, 0,
193 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
194 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
195 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>;
197 def VADDS : ASbIn<0b11100, 0b11, 0, 0,
198 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
199 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
200 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]> {
201 // Some single precision VFP instructions may be executed on both NEON and
202 // VFP pipelines on A8.
203 let D = VFPNeonA8Domain;
206 def VSUBD : ADbI<0b11100, 0b11, 1, 0,
207 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
208 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
209 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>;
211 def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
212 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
213 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
214 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]> {
215 // Some single precision VFP instructions may be executed on both NEON and
216 // VFP pipelines on A8.
217 let D = VFPNeonA8Domain;
220 def VDIVD : ADbI<0b11101, 0b00, 0, 0,
221 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
222 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
223 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>;
225 def VDIVS : ASbI<0b11101, 0b00, 0, 0,
226 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
227 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
228 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>;
230 def VMULD : ADbI<0b11100, 0b10, 0, 0,
231 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
232 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
233 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>;
235 def VMULS : ASbIn<0b11100, 0b10, 0, 0,
236 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
237 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
238 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]> {
239 // Some single precision VFP instructions may be executed on both NEON and
240 // VFP pipelines on A8.
241 let D = VFPNeonA8Domain;
244 def VNMULD : ADbI<0b11100, 0b10, 1, 0,
245 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
246 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
247 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>;
249 def VNMULS : ASbI<0b11100, 0b10, 1, 0,
250 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
251 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
252 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]> {
253 // Some single precision VFP instructions may be executed on both NEON and
254 // VFP pipelines on A8.
255 let D = VFPNeonA8Domain;
258 // Match reassociated forms only if not sign dependent rounding.
259 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
260 (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
261 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
262 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
264 // These are encoded as unary instructions.
265 let Defs = [FPSCR] in {
266 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
267 (outs), (ins DPR:$Dd, DPR:$Dm),
268 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
269 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>;
271 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
272 (outs), (ins SPR:$Sd, SPR:$Sm),
273 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm",
274 [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> {
275 // Some single precision VFP instructions may be executed on both NEON and
276 // VFP pipelines on A8.
277 let D = VFPNeonA8Domain;
280 // FIXME: Verify encoding after integrated assembler is working.
281 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
282 (outs), (ins DPR:$Dd, DPR:$Dm),
283 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm",
284 [/* For disassembly only; pattern left blank */]>;
286 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
287 (outs), (ins SPR:$Sd, SPR:$Sm),
288 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm",
289 [/* For disassembly only; pattern left blank */]> {
290 // Some single precision VFP instructions may be executed on both NEON and
291 // VFP pipelines on A8.
292 let D = VFPNeonA8Domain;
296 //===----------------------------------------------------------------------===//
297 // FP Unary Operations.
300 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0,
301 (outs DPR:$Dd), (ins DPR:$Dm),
302 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm",
303 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>;
305 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
306 (outs SPR:$Sd), (ins SPR:$Sm),
307 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm",
308 [(set SPR:$Sd, (fabs SPR:$Sm))]> {
309 // Some single precision VFP instructions may be executed on both NEON and
310 // VFP pipelines on A8.
311 let D = VFPNeonA8Domain;
314 let Defs = [FPSCR] in {
315 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
316 (outs), (ins DPR:$Dd),
317 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0",
318 [(arm_cmpfp0 (f64 DPR:$Dd))]> {
319 let Inst{3-0} = 0b0000;
323 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
324 (outs), (ins SPR:$Sd),
325 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0",
326 [(arm_cmpfp0 SPR:$Sd)]> {
327 let Inst{3-0} = 0b0000;
330 // Some single precision VFP instructions may be executed on both NEON and
331 // VFP pipelines on A8.
332 let D = VFPNeonA8Domain;
335 // FIXME: Verify encoding after integrated assembler is working.
336 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
337 (outs), (ins DPR:$Dd),
338 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0",
339 [/* For disassembly only; pattern left blank */]> {
340 let Inst{3-0} = 0b0000;
344 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
345 (outs), (ins SPR:$Sd),
346 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0",
347 [/* For disassembly only; pattern left blank */]> {
348 let Inst{3-0} = 0b0000;
351 // Some single precision VFP instructions may be executed on both NEON and
352 // VFP pipelines on A8.
353 let D = VFPNeonA8Domain;
357 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
358 (outs DPR:$Dd), (ins SPR:$Sm),
359 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm",
360 [(set DPR:$Dd, (fextend SPR:$Sm))]> {
361 // Instruction operands.
365 // Encode instruction operands.
366 let Inst{3-0} = Sm{4-1};
368 let Inst{15-12} = Dd{3-0};
369 let Inst{22} = Dd{4};
372 // Special case encoding: bits 11-8 is 0b1011.
373 def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
374 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm",
375 [(set SPR:$Sd, (fround DPR:$Dm))]> {
376 // Instruction operands.
380 // Encode instruction operands.
381 let Inst{3-0} = Dm{3-0};
383 let Inst{15-12} = Sd{4-1};
384 let Inst{22} = Sd{0};
386 let Inst{27-23} = 0b11101;
387 let Inst{21-16} = 0b110111;
388 let Inst{11-8} = 0b1011;
389 let Inst{7-6} = 0b11;
393 // Between half-precision and single-precision. For disassembly only.
395 // FIXME: Verify encoding after integrated assembler is working.
396 def VCVTBSH: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
397 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm",
398 [/* For disassembly only; pattern left blank */]>;
400 def : ARMPat<(f32_to_f16 SPR:$a),
401 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
403 def VCVTBHS: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
404 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm",
405 [/* For disassembly only; pattern left blank */]>;
407 def : ARMPat<(f16_to_f32 GPR:$a),
408 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
410 def VCVTTSH: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
411 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm",
412 [/* For disassembly only; pattern left blank */]>;
414 def VCVTTHS: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
415 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm",
416 [/* For disassembly only; pattern left blank */]>;
418 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
419 (outs DPR:$Dd), (ins DPR:$Dm),
420 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
421 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>;
423 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
424 (outs SPR:$Sd), (ins SPR:$Sm),
425 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
426 [(set SPR:$Sd, (fneg SPR:$Sm))]> {
427 // Some single precision VFP instructions may be executed on both NEON and
428 // VFP pipelines on A8.
429 let D = VFPNeonA8Domain;
432 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
433 (outs DPR:$Dd), (ins DPR:$Dm),
434 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm",
435 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>;
437 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
438 (outs SPR:$Sd), (ins SPR:$Sm),
439 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm",
440 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>;
442 let neverHasSideEffects = 1 in {
443 def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
444 (outs DPR:$Dd), (ins DPR:$Dm),
445 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>;
447 def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
448 (outs SPR:$Sd), (ins SPR:$Sm),
449 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>;
450 } // neverHasSideEffects
452 //===----------------------------------------------------------------------===//
453 // FP <-> GPR Copies. Int <-> FP Conversions.
456 def VMOVRS : AVConv2I<0b11100001, 0b1010,
457 (outs GPR:$Rt), (ins SPR:$Sn),
458 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn",
459 [(set GPR:$Rt, (bitconvert SPR:$Sn))]> {
460 // Instruction operands.
464 // Encode instruction operands.
465 let Inst{19-16} = Sn{4-1};
467 let Inst{15-12} = Rt;
469 let Inst{6-5} = 0b00;
470 let Inst{3-0} = 0b0000;
472 // Some single precision VFP instructions may be executed on both NEON and VFP
474 let D = VFPNeonDomain;
477 def VMOVSR : AVConv4I<0b11100000, 0b1010,
478 (outs SPR:$Sn), (ins GPR:$Rt),
479 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
480 [(set SPR:$Sn, (bitconvert GPR:$Rt))]> {
481 // Instruction operands.
485 // Encode instruction operands.
486 let Inst{19-16} = Sn{4-1};
488 let Inst{15-12} = Rt;
490 let Inst{6-5} = 0b00;
491 let Inst{3-0} = 0b0000;
493 // Some single precision VFP instructions may be executed on both NEON and VFP
495 let D = VFPNeonDomain;
498 let neverHasSideEffects = 1 in {
499 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
500 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
501 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
502 [/* FIXME: Can't write pattern for multiple result instr*/]> {
503 // Instruction operands.
508 // Encode instruction operands.
509 let Inst{3-0} = Dm{3-0};
511 let Inst{15-12} = Rt;
512 let Inst{19-16} = Rt2;
514 let Inst{7-6} = 0b00;
516 // Some single precision VFP instructions may be executed on both NEON and VFP
518 let D = VFPNeonDomain;
521 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
522 (outs GPR:$Rt, GPR:$Rt2), (ins SPR:$src1, SPR:$src2),
523 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $src1, $src2",
524 [/* For disassembly only; pattern left blank */]> {
529 // Encode instruction operands.
530 let Inst{3-0} = src1{3-0};
531 let Inst{5} = src1{4};
532 let Inst{15-12} = Rt;
533 let Inst{19-16} = Rt2;
535 let Inst{7-6} = 0b00;
537 // Some single precision VFP instructions may be executed on both NEON and VFP
539 let D = VFPNeonDomain;
540 let DecoderMethod = "DecodeVMOVRRS";
542 } // neverHasSideEffects
547 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
548 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2),
549 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2",
550 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]> {
551 // Instruction operands.
556 // Encode instruction operands.
557 let Inst{3-0} = Dm{3-0};
559 let Inst{15-12} = Rt;
560 let Inst{19-16} = Rt2;
562 let Inst{7-6} = 0b00;
564 // Some single precision VFP instructions may be executed on both NEON and VFP
566 let D = VFPNeonDomain;
569 let neverHasSideEffects = 1 in
570 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
571 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
572 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
573 [/* For disassembly only; pattern left blank */]> {
574 // Instruction operands.
579 // Encode instruction operands.
580 let Inst{3-0} = dst1{3-0};
581 let Inst{5} = dst1{4};
582 let Inst{15-12} = src1;
583 let Inst{19-16} = src2;
585 let Inst{7-6} = 0b00;
587 // Some single precision VFP instructions may be executed on both NEON and VFP
589 let D = VFPNeonDomain;
591 let DecoderMethod = "DecodeVMOVSRR";
597 // FMRX: SPR system reg -> GPR
599 // FMXR: GPR -> VFP system reg
604 class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
605 bits<4> opcod4, dag oops, dag iops,
606 InstrItinClass itin, string opc, string asm,
608 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
610 // Instruction operands.
614 // Encode instruction operands.
615 let Inst{3-0} = Sm{4-1};
617 let Inst{15-12} = Dd{3-0};
618 let Inst{22} = Dd{4};
621 class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
622 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin,
623 string opc, string asm, list<dag> pattern>
624 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
626 // Instruction operands.
630 // Encode instruction operands.
631 let Inst{3-0} = Sm{4-1};
633 let Inst{15-12} = Sd{4-1};
634 let Inst{22} = Sd{0};
637 def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
638 (outs DPR:$Dd), (ins SPR:$Sm),
639 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
640 [(set DPR:$Dd, (f64 (arm_sitof SPR:$Sm)))]> {
641 let Inst{7} = 1; // s32
644 def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
645 (outs SPR:$Sd),(ins SPR:$Sm),
646 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm",
647 [(set SPR:$Sd, (arm_sitof SPR:$Sm))]> {
648 let Inst{7} = 1; // s32
650 // Some single precision VFP instructions may be executed on both NEON and
651 // VFP pipelines on A8.
652 let D = VFPNeonA8Domain;
655 def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
656 (outs DPR:$Dd), (ins SPR:$Sm),
657 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
658 [(set DPR:$Dd, (f64 (arm_uitof SPR:$Sm)))]> {
659 let Inst{7} = 0; // u32
662 def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
663 (outs SPR:$Sd), (ins SPR:$Sm),
664 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm",
665 [(set SPR:$Sd, (arm_uitof SPR:$Sm))]> {
666 let Inst{7} = 0; // u32
668 // Some single precision VFP instructions may be executed on both NEON and
669 // VFP pipelines on A8.
670 let D = VFPNeonA8Domain;
675 class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
676 bits<4> opcod4, dag oops, dag iops,
677 InstrItinClass itin, string opc, string asm,
679 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
681 // Instruction operands.
685 // Encode instruction operands.
686 let Inst{3-0} = Dm{3-0};
688 let Inst{15-12} = Sd{4-1};
689 let Inst{22} = Sd{0};
692 class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
693 bits<4> opcod4, dag oops, dag iops,
694 InstrItinClass itin, string opc, string asm,
696 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
698 // Instruction operands.
702 // Encode instruction operands.
703 let Inst{3-0} = Sm{4-1};
705 let Inst{15-12} = Sd{4-1};
706 let Inst{22} = Sd{0};
709 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
710 def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
711 (outs SPR:$Sd), (ins DPR:$Dm),
712 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
713 [(set SPR:$Sd, (arm_ftosi (f64 DPR:$Dm)))]> {
714 let Inst{7} = 1; // Z bit
717 def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
718 (outs SPR:$Sd), (ins SPR:$Sm),
719 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
720 [(set SPR:$Sd, (arm_ftosi SPR:$Sm))]> {
721 let Inst{7} = 1; // Z bit
723 // Some single precision VFP instructions may be executed on both NEON and
724 // VFP pipelines on A8.
725 let D = VFPNeonA8Domain;
728 def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
729 (outs SPR:$Sd), (ins DPR:$Dm),
730 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
731 [(set SPR:$Sd, (arm_ftoui (f64 DPR:$Dm)))]> {
732 let Inst{7} = 1; // Z bit
735 def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
736 (outs SPR:$Sd), (ins SPR:$Sm),
737 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
738 [(set SPR:$Sd, (arm_ftoui SPR:$Sm))]> {
739 let Inst{7} = 1; // Z bit
741 // Some single precision VFP instructions may be executed on both NEON and
742 // VFP pipelines on A8.
743 let D = VFPNeonA8Domain;
746 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
747 let Uses = [FPSCR] in {
748 // FIXME: Verify encoding after integrated assembler is working.
749 def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
750 (outs SPR:$Sd), (ins DPR:$Dm),
751 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
752 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>{
753 let Inst{7} = 0; // Z bit
756 def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
757 (outs SPR:$Sd), (ins SPR:$Sm),
758 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm",
759 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]> {
760 let Inst{7} = 0; // Z bit
763 def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
764 (outs SPR:$Sd), (ins DPR:$Dm),
765 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
766 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>{
767 let Inst{7} = 0; // Z bit
770 def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
771 (outs SPR:$Sd), (ins SPR:$Sm),
772 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm",
773 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]> {
774 let Inst{7} = 0; // Z bit
778 // Convert between floating-point and fixed-point
779 // Data type for fixed-point naming convention:
780 // S16 (U=0, sx=0) -> SH
781 // U16 (U=1, sx=0) -> UH
782 // S32 (U=0, sx=1) -> SL
783 // U32 (U=1, sx=1) -> UL
785 // FIXME: Marking these as codegen only seems wrong. They are real
787 let Constraints = "$a = $dst", isCodeGenOnly = 1 in {
789 // FP to Fixed-Point:
791 def VTOSHS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 0,
792 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
793 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits",
794 [/* For disassembly only; pattern left blank */]> {
795 // Some single precision VFP instructions may be executed on both NEON and
796 // VFP pipelines on A8.
797 let D = VFPNeonA8Domain;
800 def VTOUHS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 0,
801 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
802 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits",
803 [/* For disassembly only; pattern left blank */]> {
804 // Some single precision VFP instructions may be executed on both NEON and
805 // VFP pipelines on A8.
806 let D = VFPNeonA8Domain;
809 def VTOSLS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 1,
810 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
811 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits",
812 [/* For disassembly only; pattern left blank */]> {
813 // Some single precision VFP instructions may be executed on both NEON and
814 // VFP pipelines on A8.
815 let D = VFPNeonA8Domain;
818 def VTOULS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 1,
819 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
820 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits",
821 [/* For disassembly only; pattern left blank */]> {
822 // Some single precision VFP instructions may be executed on both NEON and
823 // VFP pipelines on A8.
824 let D = VFPNeonA8Domain;
827 def VTOSHD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 0,
828 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
829 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits",
830 [/* For disassembly only; pattern left blank */]>;
832 def VTOUHD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 0,
833 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
834 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits",
835 [/* For disassembly only; pattern left blank */]>;
837 def VTOSLD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 1,
838 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
839 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits",
840 [/* For disassembly only; pattern left blank */]>;
842 def VTOULD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 1,
843 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
844 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits",
845 [/* For disassembly only; pattern left blank */]>;
847 // Fixed-Point to FP:
849 def VSHTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 0,
850 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
851 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits",
852 [/* For disassembly only; pattern left blank */]> {
853 // Some single precision VFP instructions may be executed on both NEON and
854 // VFP pipelines on A8.
855 let D = VFPNeonA8Domain;
858 def VUHTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 0,
859 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
860 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits",
861 [/* For disassembly only; pattern left blank */]> {
862 // Some single precision VFP instructions may be executed on both NEON and
863 // VFP pipelines on A8.
864 let D = VFPNeonA8Domain;
867 def VSLTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 1,
868 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
869 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits",
870 [/* For disassembly only; pattern left blank */]> {
871 // Some single precision VFP instructions may be executed on both NEON and
872 // VFP pipelines on A8.
873 let D = VFPNeonA8Domain;
876 def VULTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 1,
877 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
878 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits",
879 [/* For disassembly only; pattern left blank */]> {
880 // Some single precision VFP instructions may be executed on both NEON and
881 // VFP pipelines on A8.
882 let D = VFPNeonA8Domain;
885 def VSHTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 0,
886 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
887 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits",
888 [/* For disassembly only; pattern left blank */]>;
890 def VUHTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 0,
891 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
892 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits",
893 [/* For disassembly only; pattern left blank */]>;
895 def VSLTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 1,
896 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
897 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits",
898 [/* For disassembly only; pattern left blank */]>;
900 def VULTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 1,
901 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
902 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits",
903 [/* For disassembly only; pattern left blank */]>;
905 } // End of 'let Constraints = "$a = $dst", isCodeGenOnly = 1 in'
907 //===----------------------------------------------------------------------===//
908 // FP Multiply-Accumulate Operations.
911 def VMLAD : ADbI<0b11100, 0b00, 0, 0,
912 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
913 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
914 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
916 RegConstraint<"$Ddin = $Dd">,
917 Requires<[HasVFP2,UseFPVMLx]>;
919 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
920 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
921 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
922 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
924 RegConstraint<"$Sdin = $Sd">,
925 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
926 // Some single precision VFP instructions may be executed on both NEON and
927 // VFP pipelines on A8.
928 let D = VFPNeonA8Domain;
931 def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
932 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
933 Requires<[HasVFP2,UseFPVMLx]>;
934 def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
935 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
936 Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx]>;
938 def VMLSD : ADbI<0b11100, 0b00, 1, 0,
939 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
940 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
941 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
943 RegConstraint<"$Ddin = $Dd">,
944 Requires<[HasVFP2,UseFPVMLx]>;
946 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
947 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
948 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
949 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
951 RegConstraint<"$Sdin = $Sd">,
952 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
953 // Some single precision VFP instructions may be executed on both NEON and
954 // VFP pipelines on A8.
955 let D = VFPNeonA8Domain;
958 def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
959 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
960 Requires<[HasVFP2,UseFPVMLx]>;
961 def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
962 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
963 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
965 def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
966 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
967 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
968 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
970 RegConstraint<"$Ddin = $Dd">,
971 Requires<[HasVFP2,UseFPVMLx]>;
973 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
974 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
975 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
976 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
978 RegConstraint<"$Sdin = $Sd">,
979 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
980 // Some single precision VFP instructions may be executed on both NEON and
981 // VFP pipelines on A8.
982 let D = VFPNeonA8Domain;
985 def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
986 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
987 Requires<[HasVFP2,UseFPVMLx]>;
988 def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
989 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
990 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
992 def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
993 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
994 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
995 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
997 RegConstraint<"$Ddin = $Dd">,
998 Requires<[HasVFP2,UseFPVMLx]>;
1000 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
1001 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1002 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
1003 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
1004 RegConstraint<"$Sdin = $Sd">,
1005 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
1006 // Some single precision VFP instructions may be executed on both NEON and
1007 // VFP pipelines on A8.
1008 let D = VFPNeonA8Domain;
1011 def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
1012 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
1013 Requires<[HasVFP2,UseFPVMLx]>;
1014 def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
1015 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
1016 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
1019 //===----------------------------------------------------------------------===//
1020 // FP Conditional moves.
1023 let neverHasSideEffects = 1 in {
1024 def VMOVDcc : ARMPseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, pred:$p),
1026 [/*(set DPR:$Dd, (ARMcmov DPR:$Dn, DPR:$Dm, imm:$cc))*/]>,
1027 RegConstraint<"$Dn = $Dd">;
1029 def VMOVScc : ARMPseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, pred:$p),
1031 [/*(set SPR:$Sd, (ARMcmov SPR:$Sn, SPR:$Sm, imm:$cc))*/]>,
1032 RegConstraint<"$Sn = $Sd">;
1033 } // neverHasSideEffects
1035 //===----------------------------------------------------------------------===//
1036 // Move from VFP System Register to ARM core register.
1039 class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
1041 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
1043 // Instruction operand.
1046 let Inst{27-20} = 0b11101111;
1047 let Inst{19-16} = opc19_16;
1048 let Inst{15-12} = Rt;
1049 let Inst{11-8} = 0b1010;
1051 let Inst{6-5} = 0b00;
1053 let Inst{3-0} = 0b0000;
1056 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
1058 let Defs = [CPSR], Uses = [FPSCR], Rt = 0b1111 /* apsr_nzcv */ in
1059 def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins),
1060 "vmrs", "\tapsr_nzcv, fpscr", [(arm_fmstat)]>;
1062 // Application level FPSCR -> GPR
1063 let hasSideEffects = 1, Uses = [FPSCR] in
1064 def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPR:$Rt), (ins),
1065 "vmrs", "\t$Rt, fpscr",
1066 [(set GPR:$Rt, (int_arm_get_fpscr))]>;
1068 // System level FPEXC, FPSID -> GPR
1069 let Uses = [FPSCR] in {
1070 def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPR:$Rt), (ins),
1071 "vmrs", "\t$Rt, fpexc", []>;
1072 def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPR:$Rt), (ins),
1073 "vmrs", "\t$Rt, fpsid", []>;
1076 //===----------------------------------------------------------------------===//
1077 // Move from ARM core register to VFP System Register.
1080 class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
1082 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
1084 // Instruction operand.
1087 // Encode instruction operand.
1088 let Inst{15-12} = src;
1090 let Inst{27-20} = 0b11101110;
1091 let Inst{19-16} = opc19_16;
1092 let Inst{11-8} = 0b1010;
1097 let Defs = [FPSCR] in {
1098 // Application level GPR -> FPSCR
1099 def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPR:$src),
1100 "vmsr", "\tfpscr, $src", [(int_arm_set_fpscr GPR:$src)]>;
1101 // System level GPR -> FPEXC
1102 def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPR:$src),
1103 "vmsr", "\tfpexc, $src", []>;
1104 // System level GPR -> FPSID
1105 def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPR:$src),
1106 "vmsr", "\tfpsid, $src", []>;
1109 //===----------------------------------------------------------------------===//
1113 // Materialize FP immediates. VFP3 only.
1114 let isReMaterializable = 1 in {
1115 def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
1116 VFPMiscFrm, IIC_fpUNA64,
1117 "vmov", ".f64\t$Dd, $imm",
1118 [(set DPR:$Dd, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
1122 let Inst{27-23} = 0b11101;
1123 let Inst{22} = Dd{4};
1124 let Inst{21-20} = 0b11;
1125 let Inst{19-16} = imm{7-4};
1126 let Inst{15-12} = Dd{3-0};
1127 let Inst{11-9} = 0b101;
1128 let Inst{8} = 1; // Double precision.
1129 let Inst{7-4} = 0b0000;
1130 let Inst{3-0} = imm{3-0};
1133 def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
1134 VFPMiscFrm, IIC_fpUNA32,
1135 "vmov", ".f32\t$Sd, $imm",
1136 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
1140 let Inst{27-23} = 0b11101;
1141 let Inst{22} = Sd{0};
1142 let Inst{21-20} = 0b11;
1143 let Inst{19-16} = imm{7-4};
1144 let Inst{15-12} = Sd{4-1};
1145 let Inst{11-9} = 0b101;
1146 let Inst{8} = 0; // Single precision.
1147 let Inst{7-4} = 0b0000;
1148 let Inst{3-0} = imm{3-0};