1 //===-- X86InstrFMA.td - FMA Instruction Set ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes FMA (Fused Multiply-Add) instructions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // FMA3 - Intel 3 operand Fused Multiply-Add instructions
16 //===----------------------------------------------------------------------===//
18 let Constraints = "$src1 = $dst" in {
19 multiclass fma3p_rm<bits<8> opc, string OpcodeStr,
20 PatFrag MemFrag128, PatFrag MemFrag256,
21 ValueType OpVT128, ValueType OpVT256,
22 bit IsRVariantCommutable = 0, bit IsMVariantCommutable = 0,
23 SDPatternOperator Op = null_frag> {
24 let usesCustomInserter = 1, isCommutable = IsRVariantCommutable in
25 def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
26 (ins VR128:$src1, VR128:$src2, VR128:$src3),
28 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
29 [(set VR128:$dst, (OpVT128 (Op VR128:$src2,
30 VR128:$src1, VR128:$src3)))]>;
32 let mayLoad = 1, isCommutable = IsMVariantCommutable in
33 def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
34 (ins VR128:$src1, VR128:$src2, f128mem:$src3),
36 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
37 [(set VR128:$dst, (OpVT128 (Op VR128:$src2, VR128:$src1,
38 (MemFrag128 addr:$src3))))]>;
40 let usesCustomInserter = 1, isCommutable = IsRVariantCommutable in
41 def rY : FMA3<opc, MRMSrcReg, (outs VR256:$dst),
42 (ins VR256:$src1, VR256:$src2, VR256:$src3),
44 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
45 [(set VR256:$dst, (OpVT256 (Op VR256:$src2, VR256:$src1,
46 VR256:$src3)))]>, VEX_L;
48 let mayLoad = 1, isCommutable = IsMVariantCommutable in
49 def mY : FMA3<opc, MRMSrcMem, (outs VR256:$dst),
50 (ins VR256:$src1, VR256:$src2, f256mem:$src3),
52 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
54 (OpVT256 (Op VR256:$src2, VR256:$src1,
55 (MemFrag256 addr:$src3))))]>, VEX_L;
57 } // Constraints = "$src1 = $dst"
59 multiclass fma3p_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
60 string OpcodeStr, string PackTy,
61 PatFrag MemFrag128, PatFrag MemFrag256,
62 SDNode Op, ValueType OpTy128, ValueType OpTy256> {
63 // For 213, both the register and memory variant are commutable.
64 // Indeed, the commutable operands are 1 and 2 and both live in registers
66 defm r213 : fma3p_rm<opc213,
67 !strconcat(OpcodeStr, "213", PackTy),
68 MemFrag128, MemFrag256, OpTy128, OpTy256,
69 /* IsRVariantCommutable */ 1,
70 /* IsMVariantCommutable */ 1,
72 let hasSideEffects = 0 in {
73 defm r132 : fma3p_rm<opc132,
74 !strconcat(OpcodeStr, "132", PackTy),
75 MemFrag128, MemFrag256, OpTy128, OpTy256>;
76 // For 231, only the register variant is commutable.
77 // For the memory variant the folded operand must be in 3. Thus,
78 // in that case, it cannot be swapped with 2.
79 defm r231 : fma3p_rm<opc231,
80 !strconcat(OpcodeStr, "231", PackTy),
81 MemFrag128, MemFrag256, OpTy128, OpTy256,
82 /* IsRVariantCommutable */ 1,
83 /* IsMVariantCommutable */ 0>;
84 } // hasSideEffects = 0
88 let ExeDomain = SSEPackedSingle in {
89 defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", loadv4f32,
90 loadv8f32, X86Fmadd, v4f32, v8f32>;
91 defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", loadv4f32,
92 loadv8f32, X86Fmsub, v4f32, v8f32>;
93 defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps",
94 loadv4f32, loadv8f32, X86Fmaddsub,
96 defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps",
97 loadv4f32, loadv8f32, X86Fmsubadd,
101 let ExeDomain = SSEPackedDouble in {
102 defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", loadv2f64,
103 loadv4f64, X86Fmadd, v2f64, v4f64>, VEX_W;
104 defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", loadv2f64,
105 loadv4f64, X86Fmsub, v2f64, v4f64>, VEX_W;
106 defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd",
107 loadv2f64, loadv4f64, X86Fmaddsub,
108 v2f64, v4f64>, VEX_W;
109 defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd",
110 loadv2f64, loadv4f64, X86Fmsubadd,
111 v2f64, v4f64>, VEX_W;
114 // Fused Negative Multiply-Add
115 let ExeDomain = SSEPackedSingle in {
116 defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", loadv4f32,
117 loadv8f32, X86Fnmadd, v4f32, v8f32>;
118 defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", loadv4f32,
119 loadv8f32, X86Fnmsub, v4f32, v8f32>;
121 let ExeDomain = SSEPackedDouble in {
122 defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", loadv2f64,
123 loadv4f64, X86Fnmadd, v2f64, v4f64>, VEX_W;
124 defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd",
125 loadv2f64, loadv4f64, X86Fnmsub, v2f64,
129 // All source register operands of FMA instructions can be commuted.
130 // In many cases such commute transformation requres an opcode adjustment,
131 // for example, commuting the operands 1 and 2 in FMA*132 form would require
132 // an opcode change to FMA*231:
133 // FMA*132* reg1, reg2, reg3; // reg1 * reg3 + reg2;
135 // FMA*231* reg2, reg1, reg3; // reg1 * reg3 + reg2;
136 // Currently, the commute transformation is supported for only few FMA forms.
137 // That is the reason why \p IsRVariantCommutable and \p IsMVariantCommutable
138 // parameters are used here.
139 // The general commute operands optimization working for all forms is going
140 // to be implemented soon. (Please, see http://reviews.llvm.org/D13269
142 let Constraints = "$src1 = $dst", hasSideEffects = 0 in {
143 multiclass fma3s_rm<bits<8> opc, string OpcodeStr,
144 X86MemOperand x86memop, RegisterClass RC,
145 bit IsRVariantCommutable = 0, bit IsMVariantCommutable = 0,
146 SDPatternOperator OpNode = null_frag> {
147 let usesCustomInserter = 1, isCommutable = IsRVariantCommutable in
148 def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
149 (ins RC:$src1, RC:$src2, RC:$src3),
150 !strconcat(OpcodeStr,
151 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
152 [(set RC:$dst, (OpNode RC:$src2, RC:$src1, RC:$src3))]>;
154 let mayLoad = 1, isCommutable = IsMVariantCommutable in
155 def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
156 (ins RC:$src1, RC:$src2, x86memop:$src3),
157 !strconcat(OpcodeStr,
158 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
160 (OpNode RC:$src2, RC:$src1, (load addr:$src3)))]>;
162 } // Constraints = "$src1 = $dst", hasSideEffects = 0
164 // These FMA*_Int instructions are defined specially for being used when
165 // the scalar FMA intrinsics are lowered to machine instructions, and in that
166 // sence they are similar to existing ADD*_Int, SUB*_Int, MUL*_Int, etc.
169 // The FMA*_Int instructions are _TEMPORARILY_ defined as NOT commutable.
170 // The upper bits of the result of scalar FMA intrinsics must be copied from
171 // the upper bits of the 1st operand. So, commuting the 1st operand would
172 // invalidate the upper bits of the intrinsic result.
173 // The corresponding optimization which allows commuting 2nd and 3rd operands
174 // of FMA*_Int instructions has been developed and is waiting for
175 // code-review approval and checkin (Please see http://reviews.llvm.org/D13269).
176 let Constraints = "$src1 = $dst", isCommutable = 0, isCodeGenOnly =1,
177 hasSideEffects = 0 in {
178 multiclass fma3s_rm_int<bits<8> opc, string OpcodeStr,
179 Operand memopr, RegisterClass RC> {
180 def r_Int : FMA3<opc, MRMSrcReg, (outs RC:$dst),
181 (ins RC:$src1, RC:$src2, RC:$src3),
182 !strconcat(OpcodeStr,
183 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
187 def m_Int : FMA3<opc, MRMSrcMem, (outs RC:$dst),
188 (ins RC:$src1, RC:$src2, memopr:$src3),
189 !strconcat(OpcodeStr,
190 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
193 } // Constraints = "$src1 = $dst", isCommutable = 0, isCodeGenOnly =1,
194 // hasSideEffects = 0
196 multiclass fma3s_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
197 string OpStr, string PackTy,
198 SDNode OpNode, RegisterClass RC,
199 X86MemOperand x86memop> {
200 defm r132 : fma3s_rm<opc132, !strconcat(OpStr, "132", PackTy), x86memop, RC>;
201 defm r213 : fma3s_rm<opc213, !strconcat(OpStr, "213", PackTy), x86memop, RC,
202 /* IsRVariantCommutable */ 1,
203 /* IsMVariantCommutable */ 1,
205 defm r231 : fma3s_rm<opc231, !strconcat(OpStr, "231", PackTy), x86memop, RC,
206 /* IsRVariantCommutable */ 1,
207 /* IsMVariantCommutable */ 0,
211 // The FMA 213 form is created for lowering of scalar FMA intrinscis
212 // to machine instructions.
213 // The FMA 132 form can trivially be get by commuting the 2nd and 3rd operands
215 // The FMA 231 form can be get only by commuting the 1st operand of 213 or 132
216 // forms and is possible only after special analysis of all uses of the initial
217 // instruction. Such analysis do not exist yet and thus introducing the 231
218 // form of FMA*_Int instructions is done using an optimistic assumption that
219 // such analysis will be implemented eventually.
220 multiclass fma3s_int_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
221 string OpStr, string PackTy,
222 RegisterClass RC, Operand memop> {
223 defm r132 : fma3s_rm_int<opc132, !strconcat(OpStr, "132", PackTy),
225 defm r213 : fma3s_rm_int<opc213, !strconcat(OpStr, "213", PackTy),
227 defm r231 : fma3s_rm_int<opc231, !strconcat(OpStr, "231", PackTy),
231 multiclass fma3s<bits<8> opc132, bits<8> opc213, bits<8> opc231,
232 string OpStr, Intrinsic IntF32, Intrinsic IntF64,
234 defm SS : fma3s_forms<opc132, opc213, opc231, OpStr, "ss", OpNode,
236 fma3s_int_forms<opc132, opc213, opc231, OpStr, "ss", VR128, ssmem>;
237 defm SD : fma3s_forms<opc132, opc213, opc231, OpStr, "sd", OpNode,
239 fma3s_int_forms<opc132, opc213, opc231, OpStr, "sd", VR128, sdmem>,
242 // These patterns use the 123 ordering, instead of 213, even though
243 // they match the intrinsic to the 213 version of the instruction.
244 // This is because src1 is tied to dest, and the scalar intrinsics
245 // require the pass-through values to come from the first source
246 // operand, not the second.
247 def : Pat<(IntF32 VR128:$src1, VR128:$src2, VR128:$src3),
249 (!cast<Instruction>(NAME#"SSr213r_Int")
250 (COPY_TO_REGCLASS $src1, FR32),
251 (COPY_TO_REGCLASS $src2, FR32),
252 (COPY_TO_REGCLASS $src3, FR32)),
255 def : Pat<(IntF64 VR128:$src1, VR128:$src2, VR128:$src3),
257 (!cast<Instruction>(NAME#"SDr213r_Int")
258 (COPY_TO_REGCLASS $src1, FR64),
259 (COPY_TO_REGCLASS $src2, FR64),
260 (COPY_TO_REGCLASS $src3, FR64)),
264 defm VFMADD : fma3s<0x99, 0xA9, 0xB9, "vfmadd", int_x86_fma_vfmadd_ss,
265 int_x86_fma_vfmadd_sd, X86Fmadd>, VEX_LIG;
266 defm VFMSUB : fma3s<0x9B, 0xAB, 0xBB, "vfmsub", int_x86_fma_vfmsub_ss,
267 int_x86_fma_vfmsub_sd, X86Fmsub>, VEX_LIG;
269 defm VFNMADD : fma3s<0x9D, 0xAD, 0xBD, "vfnmadd", int_x86_fma_vfnmadd_ss,
270 int_x86_fma_vfnmadd_sd, X86Fnmadd>, VEX_LIG;
271 defm VFNMSUB : fma3s<0x9F, 0xAF, 0xBF, "vfnmsub", int_x86_fma_vfnmsub_ss,
272 int_x86_fma_vfnmsub_sd, X86Fnmsub>, VEX_LIG;
275 //===----------------------------------------------------------------------===//
276 // FMA4 - AMD 4 operand Fused Multiply-Add instructions
277 //===----------------------------------------------------------------------===//
280 multiclass fma4s<bits<8> opc, string OpcodeStr, RegisterClass RC,
281 X86MemOperand x86memop, ValueType OpVT, SDNode OpNode,
283 let isCommutable = 1 in
284 def rr : FMA4<opc, MRMSrcReg, (outs RC:$dst),
285 (ins RC:$src1, RC:$src2, RC:$src3),
286 !strconcat(OpcodeStr,
287 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
289 (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>, VEX_W, VEX_LIG, MemOp4;
290 def rm : FMA4<opc, MRMSrcMem, (outs RC:$dst),
291 (ins RC:$src1, RC:$src2, x86memop:$src3),
292 !strconcat(OpcodeStr,
293 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
294 [(set RC:$dst, (OpNode RC:$src1, RC:$src2,
295 (mem_frag addr:$src3)))]>, VEX_W, VEX_LIG, MemOp4;
296 def mr : FMA4<opc, MRMSrcMem, (outs RC:$dst),
297 (ins RC:$src1, x86memop:$src2, RC:$src3),
298 !strconcat(OpcodeStr,
299 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
301 (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3))]>, VEX_LIG;
303 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
304 def rr_REV : FMA4<opc, MRMSrcReg, (outs RC:$dst),
305 (ins RC:$src1, RC:$src2, RC:$src3),
306 !strconcat(OpcodeStr,
307 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
311 multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop,
312 ComplexPattern mem_cpat, Intrinsic Int> {
313 let isCodeGenOnly = 1 in {
314 let isCommutable = 1 in
315 def rr_Int : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
316 (ins VR128:$src1, VR128:$src2, VR128:$src3),
317 !strconcat(OpcodeStr,
318 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
320 (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_W, VEX_LIG, MemOp4;
321 def rm_Int : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
322 (ins VR128:$src1, VR128:$src2, memop:$src3),
323 !strconcat(OpcodeStr,
324 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
325 [(set VR128:$dst, (Int VR128:$src1, VR128:$src2,
326 mem_cpat:$src3))]>, VEX_W, VEX_LIG, MemOp4;
327 def mr_Int : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
328 (ins VR128:$src1, memop:$src2, VR128:$src3),
329 !strconcat(OpcodeStr,
330 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
332 (Int VR128:$src1, mem_cpat:$src2, VR128:$src3))]>, VEX_LIG;
333 } // isCodeGenOnly = 1
336 multiclass fma4p<bits<8> opc, string OpcodeStr, SDNode OpNode,
337 ValueType OpVT128, ValueType OpVT256,
338 PatFrag ld_frag128, PatFrag ld_frag256> {
339 let isCommutable = 1 in
340 def rr : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
341 (ins VR128:$src1, VR128:$src2, VR128:$src3),
342 !strconcat(OpcodeStr,
343 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
345 (OpVT128 (OpNode VR128:$src1, VR128:$src2, VR128:$src3)))]>,
347 def rm : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
348 (ins VR128:$src1, VR128:$src2, f128mem:$src3),
349 !strconcat(OpcodeStr,
350 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
351 [(set VR128:$dst, (OpNode VR128:$src1, VR128:$src2,
352 (ld_frag128 addr:$src3)))]>, VEX_W, MemOp4;
353 def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
354 (ins VR128:$src1, f128mem:$src2, VR128:$src3),
355 !strconcat(OpcodeStr,
356 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
358 (OpNode VR128:$src1, (ld_frag128 addr:$src2), VR128:$src3))]>;
359 let isCommutable = 1 in
360 def rrY : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
361 (ins VR256:$src1, VR256:$src2, VR256:$src3),
362 !strconcat(OpcodeStr,
363 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
365 (OpVT256 (OpNode VR256:$src1, VR256:$src2, VR256:$src3)))]>,
366 VEX_W, MemOp4, VEX_L;
367 def rmY : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
368 (ins VR256:$src1, VR256:$src2, f256mem:$src3),
369 !strconcat(OpcodeStr,
370 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
371 [(set VR256:$dst, (OpNode VR256:$src1, VR256:$src2,
372 (ld_frag256 addr:$src3)))]>, VEX_W, MemOp4, VEX_L;
373 def mrY : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
374 (ins VR256:$src1, f256mem:$src2, VR256:$src3),
375 !strconcat(OpcodeStr,
376 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
377 [(set VR256:$dst, (OpNode VR256:$src1,
378 (ld_frag256 addr:$src2), VR256:$src3))]>, VEX_L;
380 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
381 def rr_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
382 (ins VR128:$src1, VR128:$src2, VR128:$src3),
383 !strconcat(OpcodeStr,
384 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>;
385 def rrY_REV : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
386 (ins VR256:$src1, VR256:$src2, VR256:$src3),
387 !strconcat(OpcodeStr,
388 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
390 } // isCodeGenOnly = 1
393 defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", FR32, f32mem, f32, X86Fmadd, loadf32>,
394 fma4s_int<0x6A, "vfmaddss", ssmem, sse_load_f32,
395 int_x86_fma_vfmadd_ss>;
396 defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", FR64, f64mem, f64, X86Fmadd, loadf64>,
397 fma4s_int<0x6B, "vfmaddsd", sdmem, sse_load_f64,
398 int_x86_fma_vfmadd_sd>;
399 defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", FR32, f32mem, f32, X86Fmsub, loadf32>,
400 fma4s_int<0x6E, "vfmsubss", ssmem, sse_load_f32,
401 int_x86_fma_vfmsub_ss>;
402 defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", FR64, f64mem, f64, X86Fmsub, loadf64>,
403 fma4s_int<0x6F, "vfmsubsd", sdmem, sse_load_f64,
404 int_x86_fma_vfmsub_sd>;
405 defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", FR32, f32mem, f32,
407 fma4s_int<0x7A, "vfnmaddss", ssmem, sse_load_f32,
408 int_x86_fma_vfnmadd_ss>;
409 defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", FR64, f64mem, f64,
411 fma4s_int<0x7B, "vfnmaddsd", sdmem, sse_load_f64,
412 int_x86_fma_vfnmadd_sd>;
413 defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", FR32, f32mem, f32,
415 fma4s_int<0x7E, "vfnmsubss", ssmem, sse_load_f32,
416 int_x86_fma_vfnmsub_ss>;
417 defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", FR64, f64mem, f64,
419 fma4s_int<0x7F, "vfnmsubsd", sdmem, sse_load_f64,
420 int_x86_fma_vfnmsub_sd>;
422 let ExeDomain = SSEPackedSingle in {
423 defm VFMADDPS4 : fma4p<0x68, "vfmaddps", X86Fmadd, v4f32, v8f32,
424 loadv4f32, loadv8f32>;
425 defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86Fmsub, v4f32, v8f32,
426 loadv4f32, loadv8f32>;
427 defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86Fnmadd, v4f32, v8f32,
428 loadv4f32, loadv8f32>;
429 defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86Fnmsub, v4f32, v8f32,
430 loadv4f32, loadv8f32>;
431 defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", X86Fmaddsub, v4f32, v8f32,
432 loadv4f32, loadv8f32>;
433 defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", X86Fmsubadd, v4f32, v8f32,
434 loadv4f32, loadv8f32>;
437 let ExeDomain = SSEPackedDouble in {
438 defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", X86Fmadd, v2f64, v4f64,
439 loadv2f64, loadv4f64>;
440 defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86Fmsub, v2f64, v4f64,
441 loadv2f64, loadv4f64>;
442 defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86Fnmadd, v2f64, v4f64,
443 loadv2f64, loadv4f64>;
444 defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86Fnmsub, v2f64, v4f64,
445 loadv2f64, loadv4f64>;
446 defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", X86Fmaddsub, v2f64, v4f64,
447 loadv2f64, loadv4f64>;
448 defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", X86Fmsubadd, v2f64, v4f64,
449 loadv2f64, loadv4f64>;