1 //====- X86InstrSSE.td - Describe the X86 Instruction Set -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the Evan Cheng and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 def MOVAPSrr : I<0x28, MRMSrcReg, (ops V4F32:$dst, V4F32:$src),
17 "movaps {$src, $dst|$dst, $src}", []>,
18 Requires<[HasSSE1]>, TB;
19 def MOVAPDrr : I<0x28, MRMSrcReg, (ops V2F64:$dst, V2F64:$src),
20 "movapd {$src, $dst|$dst, $src}", []>,
21 Requires<[HasSSE2]>, TB, OpSize;
23 def MOVAPSrm : I<0x28, MRMSrcMem, (ops V4F32:$dst, f128mem:$src),
24 "movaps {$src, $dst|$dst, $src}", []>,
25 Requires<[HasSSE1]>, TB;
26 def MOVAPSmr : I<0x29, MRMDestMem, (ops f128mem:$dst, V4F32:$src),
27 "movaps {$src, $dst|$dst, $src}",[]>,
28 Requires<[HasSSE1]>, TB;
29 def MOVAPDrm : I<0x28, MRMSrcMem, (ops V2F64:$dst, f128mem:$src),
30 "movapd {$src, $dst|$dst, $src}", []>,
31 Requires<[HasSSE1]>, TB, OpSize;
32 def MOVAPDmr : I<0x29, MRMDestMem, (ops f128mem:$dst, V2F64:$src),
33 "movapd {$src, $dst|$dst, $src}",[]>,
34 Requires<[HasSSE2]>, TB, OpSize;
37 let isTwoAddress = 1 in {
38 let isCommutable = 1 in {
39 def ANDPSrr : I<0x54, MRMSrcReg, (ops V4F32:$dst, V4F32:$src1, V4F32:$src2),
40 "andps {$src2, $dst|$dst, $src2}",
41 [(set V4F32:$dst, (X86fand V4F32:$src1, V4F32:$src2))]>,
42 Requires<[HasSSE1]>, TB;
43 def ANDPDrr : I<0x54, MRMSrcReg, (ops V2F64:$dst, V2F64:$src1, V2F64:$src2),
44 "andpd {$src2, $dst|$dst, $src2}",
45 [(set V2F64:$dst, (X86fand V2F64:$src1, V2F64:$src2))]>,
46 Requires<[HasSSE2]>, TB, OpSize;
47 def ORPSrr : I<0x56, MRMSrcReg, (ops V4F32:$dst, V4F32:$src1, V4F32:$src2),
48 "orps {$src2, $dst|$dst, $src2}", []>,
49 Requires<[HasSSE1]>, TB;
50 def ORPDrr : I<0x56, MRMSrcReg, (ops V2F64:$dst, V2F64:$src1, V2F64:$src2),
51 "orpd {$src2, $dst|$dst, $src2}", []>,
52 Requires<[HasSSE2]>, TB, OpSize;
53 def XORPSrr : I<0x57, MRMSrcReg, (ops V4F32:$dst, V4F32:$src1, V4F32:$src2),
54 "xorps {$src2, $dst|$dst, $src2}",
55 [(set V4F32:$dst, (X86fxor V4F32:$src1, V4F32:$src2))]>,
56 Requires<[HasSSE1]>, TB;
57 def XORPDrr : I<0x57, MRMSrcReg, (ops V2F64:$dst, V2F64:$src1, V2F64:$src2),
58 "xorpd {$src2, $dst|$dst, $src2}",
59 [(set V2F64:$dst, (X86fxor V2F64:$src1, V2F64:$src2))]>,
60 Requires<[HasSSE2]>, TB, OpSize;
62 def ANDPSrm : I<0x54, MRMSrcMem, (ops V4F32:$dst, V4F32:$src1, f128mem:$src2),
63 "andps {$src2, $dst|$dst, $src2}",
64 [(set V4F32:$dst, (X86fand V4F32:$src1,
65 (X86loadpv4f32 addr:$src2)))]>,
66 Requires<[HasSSE1]>, TB;
67 def ANDPDrm : I<0x54, MRMSrcMem, (ops V2F64:$dst, V2F64:$src1, f128mem:$src2),
68 "andpd {$src2, $dst|$dst, $src2}",
69 [(set V2F64:$dst, (X86fand V2F64:$src1,
70 (X86loadpv2f64 addr:$src2)))]>,
71 Requires<[HasSSE2]>, TB, OpSize;
72 def ORPSrm : I<0x56, MRMSrcMem, (ops V4F32:$dst, V4F32:$src1, f128mem:$src2),
73 "orps {$src2, $dst|$dst, $src2}", []>,
74 Requires<[HasSSE1]>, TB;
75 def ORPDrm : I<0x56, MRMSrcMem, (ops V2F64:$dst, V2F64:$src1, f128mem:$src2),
76 "orpd {$src2, $dst|$dst, $src2}", []>,
77 Requires<[HasSSE2]>, TB, OpSize;
78 def XORPSrm : I<0x57, MRMSrcMem, (ops V4F32:$dst, V4F32:$src1, f128mem:$src2),
79 "xorps {$src2, $dst|$dst, $src2}",
80 [(set V4F32:$dst, (X86fxor V4F32:$src1,
81 (X86loadpv4f32 addr:$src2)))]>,
82 Requires<[HasSSE1]>, TB;
83 def XORPDrm : I<0x57, MRMSrcMem, (ops V2F64:$dst, V2F64:$src1, f128mem:$src2),
84 "xorpd {$src2, $dst|$dst, $src2}",
85 [(set V2F64:$dst, (X86fxor V2F64:$src1,
86 (X86loadpv2f64 addr:$src2)))]>,
87 Requires<[HasSSE2]>, TB, OpSize;
89 def ANDNPSrr : I<0x55, MRMSrcReg, (ops V4F32:$dst, V4F32:$src1, V4F32:$src2),
90 "andnps {$src2, $dst|$dst, $src2}", []>,
91 Requires<[HasSSE1]>, TB;
92 def ANDNPSrm : I<0x55, MRMSrcMem, (ops V4F32:$dst, V4F32:$src1, f128mem:$src2),
93 "andnps {$src2, $dst|$dst, $src2}", []>,
94 Requires<[HasSSE1]>, TB;
95 def ANDNPDrr : I<0x55, MRMSrcReg, (ops V2F64:$dst, V2F64:$src1, V2F64:$src2),
96 "andnpd {$src2, $dst|$dst, $src2}", []>,
97 Requires<[HasSSE2]>, TB, OpSize;
98 def ANDNPDrm : I<0x55, MRMSrcMem, (ops V2F64:$dst, V2F64:$src1, f128mem:$src2),
99 "andnpd {$src2, $dst|$dst, $src2}", []>,
100 Requires<[HasSSE2]>, TB, OpSize;
103 //===----------------------------------------------------------------------===//
104 // XMM Floating point support (requires SSE / SSE2)
105 //===----------------------------------------------------------------------===//
107 def MOVSSrr : I<0x10, MRMSrcReg, (ops FR32:$dst, FR32:$src),
108 "movss {$src, $dst|$dst, $src}", []>,
109 Requires<[HasSSE1]>, XS;
110 def MOVSDrr : I<0x10, MRMSrcReg, (ops FR64:$dst, FR64:$src),
111 "movsd {$src, $dst|$dst, $src}", []>,
112 Requires<[HasSSE2]>, XD;
114 def MOVSSrm : I<0x10, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
115 "movss {$src, $dst|$dst, $src}",
116 [(set FR32:$dst, (loadf32 addr:$src))]>,
117 Requires<[HasSSE1]>, XS;
118 def MOVSSmr : I<0x11, MRMDestMem, (ops f32mem:$dst, FR32:$src),
119 "movss {$src, $dst|$dst, $src}",
120 [(store FR32:$src, addr:$dst)]>,
121 Requires<[HasSSE1]>, XS;
122 def MOVSDrm : I<0x10, MRMSrcMem, (ops FR64:$dst, f64mem:$src),
123 "movsd {$src, $dst|$dst, $src}",
124 [(set FR64:$dst, (loadf64 addr:$src))]>,
125 Requires<[HasSSE2]>, XD;
126 def MOVSDmr : I<0x11, MRMDestMem, (ops f64mem:$dst, FR64:$src),
127 "movsd {$src, $dst|$dst, $src}",
128 [(store FR64:$src, addr:$dst)]>,
129 Requires<[HasSSE2]>, XD;
131 def CVTTSS2SIrr: I<0x2C, MRMSrcReg, (ops R32:$dst, FR32:$src),
132 "cvttss2si {$src, $dst|$dst, $src}",
133 [(set R32:$dst, (fp_to_sint FR32:$src))]>,
134 Requires<[HasSSE1]>, XS;
135 def CVTTSS2SIrm: I<0x2C, MRMSrcMem, (ops R32:$dst, f32mem:$src),
136 "cvttss2si {$src, $dst|$dst, $src}",
137 [(set R32:$dst, (fp_to_sint (loadf32 addr:$src)))]>,
138 Requires<[HasSSE1]>, XS;
139 def CVTTSD2SIrr: I<0x2C, MRMSrcReg, (ops R32:$dst, FR64:$src),
140 "cvttsd2si {$src, $dst|$dst, $src}",
141 [(set R32:$dst, (fp_to_sint FR64:$src))]>,
142 Requires<[HasSSE2]>, XD;
143 def CVTTSD2SIrm: I<0x2C, MRMSrcMem, (ops R32:$dst, f64mem:$src),
144 "cvttsd2si {$src, $dst|$dst, $src}",
145 [(set R32:$dst, (fp_to_sint (loadf64 addr:$src)))]>,
146 Requires<[HasSSE2]>, XD;
147 def CVTSS2SDrr: I<0x5A, MRMSrcReg, (ops FR64:$dst, FR32:$src),
148 "cvtss2sd {$src, $dst|$dst, $src}",
149 [(set FR64:$dst, (fextend FR32:$src))]>,
150 Requires<[HasSSE2]>, XS;
151 def CVTSS2SDrm: I<0x5A, MRMSrcMem, (ops FR64:$dst, f32mem:$src),
152 "cvtss2sd {$src, $dst|$dst, $src}",
153 [(set FR64:$dst, (fextend (loadf32 addr:$src)))]>,
154 Requires<[HasSSE2]>, XS;
155 def CVTSD2SSrr: I<0x5A, MRMSrcReg, (ops FR32:$dst, FR64:$src),
156 "cvtsd2ss {$src, $dst|$dst, $src}",
157 [(set FR32:$dst, (fround FR64:$src))]>,
158 Requires<[HasSSE2]>, XD;
159 def CVTSD2SSrm: I<0x5A, MRMSrcMem, (ops FR32:$dst, f64mem:$src),
160 "cvtsd2ss {$src, $dst|$dst, $src}",
161 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>,
162 Requires<[HasSSE2]>, XD;
163 def CVTSI2SSrr: I<0x2A, MRMSrcReg, (ops FR32:$dst, R32:$src),
164 "cvtsi2ss {$src, $dst|$dst, $src}",
165 [(set FR32:$dst, (sint_to_fp R32:$src))]>,
166 Requires<[HasSSE2]>, XS;
167 def CVTSI2SSrm: I<0x2A, MRMSrcMem, (ops FR32:$dst, i32mem:$src),
168 "cvtsi2ss {$src, $dst|$dst, $src}",
169 [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>,
170 Requires<[HasSSE2]>, XS;
171 def CVTSI2SDrr: I<0x2A, MRMSrcReg, (ops FR64:$dst, R32:$src),
172 "cvtsi2sd {$src, $dst|$dst, $src}",
173 [(set FR64:$dst, (sint_to_fp R32:$src))]>,
174 Requires<[HasSSE2]>, XD;
175 def CVTSI2SDrm: I<0x2A, MRMSrcMem, (ops FR64:$dst, i32mem:$src),
176 "cvtsi2sd {$src, $dst|$dst, $src}",
177 [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>,
178 Requires<[HasSSE2]>, XD;
180 def SQRTSSrr : I<0x51, MRMSrcReg, (ops FR32:$dst, FR32:$src),
181 "sqrtss {$src, $dst|$dst, $src}",
182 [(set FR32:$dst, (fsqrt FR32:$src))]>,
183 Requires<[HasSSE1]>, XS;
184 def SQRTSSrm : I<0x51, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
185 "sqrtss {$src, $dst|$dst, $src}",
186 [(set FR32:$dst, (fsqrt (loadf32 addr:$src)))]>,
187 Requires<[HasSSE1]>, XS;
188 def SQRTSDrr : I<0x51, MRMSrcReg, (ops FR64:$dst, FR64:$src),
189 "sqrtsd {$src, $dst|$dst, $src}",
190 [(set FR64:$dst, (fsqrt FR64:$src))]>,
191 Requires<[HasSSE2]>, XD;
192 def SQRTSDrm : I<0x51, MRMSrcMem, (ops FR64:$dst, f64mem:$src),
193 "sqrtsd {$src, $dst|$dst, $src}",
194 [(set FR64:$dst, (fsqrt (loadf64 addr:$src)))]>,
195 Requires<[HasSSE2]>, XD;
197 def UCOMISSrr: I<0x2E, MRMSrcReg, (ops FR32:$src1, FR32:$src2),
198 "ucomiss {$src2, $src1|$src1, $src2}",
199 [(X86cmp FR32:$src1, FR32:$src2)]>,
200 Requires<[HasSSE1]>, TB;
201 def UCOMISSrm: I<0x2E, MRMSrcMem, (ops FR32:$src1, f32mem:$src2),
202 "ucomiss {$src2, $src1|$src1, $src2}",
203 [(X86cmp FR32:$src1, (loadf32 addr:$src2))]>,
204 Requires<[HasSSE1]>, TB;
205 def UCOMISDrr: I<0x2E, MRMSrcReg, (ops FR64:$src1, FR64:$src2),
206 "ucomisd {$src2, $src1|$src1, $src2}",
207 [(X86cmp FR64:$src1, FR64:$src2)]>,
208 Requires<[HasSSE2]>, TB, OpSize;
209 def UCOMISDrm: I<0x2E, MRMSrcMem, (ops FR64:$src1, f64mem:$src2),
210 "ucomisd {$src2, $src1|$src1, $src2}",
211 [(X86cmp FR64:$src1, (loadf64 addr:$src2))]>,
212 Requires<[HasSSE2]>, TB, OpSize;
214 let isTwoAddress = 1 in {
215 // SSE Scalar Arithmetic
216 let isCommutable = 1 in {
217 def ADDSSrr : I<0x58, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
218 "addss {$src2, $dst|$dst, $src2}",
219 [(set FR32:$dst, (fadd FR32:$src1, FR32:$src2))]>,
220 Requires<[HasSSE1]>, XS;
221 def ADDSDrr : I<0x58, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
222 "addsd {$src2, $dst|$dst, $src2}",
223 [(set FR64:$dst, (fadd FR64:$src1, FR64:$src2))]>,
224 Requires<[HasSSE2]>, XD;
225 def MULSSrr : I<0x59, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
226 "mulss {$src2, $dst|$dst, $src2}",
227 [(set FR32:$dst, (fmul FR32:$src1, FR32:$src2))]>,
228 Requires<[HasSSE1]>, XS;
229 def MULSDrr : I<0x59, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
230 "mulsd {$src2, $dst|$dst, $src2}",
231 [(set FR64:$dst, (fmul FR64:$src1, FR64:$src2))]>,
232 Requires<[HasSSE2]>, XD;
235 def ADDSSrm : I<0x58, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
236 "addss {$src2, $dst|$dst, $src2}",
237 [(set FR32:$dst, (fadd FR32:$src1, (loadf32 addr:$src2)))]>,
238 Requires<[HasSSE1]>, XS;
239 def ADDSDrm : I<0x58, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
240 "addsd {$src2, $dst|$dst, $src2}",
241 [(set FR64:$dst, (fadd FR64:$src1, (loadf64 addr:$src2)))]>,
242 Requires<[HasSSE2]>, XD;
243 def MULSSrm : I<0x59, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
244 "mulss {$src2, $dst|$dst, $src2}",
245 [(set FR32:$dst, (fmul FR32:$src1, (loadf32 addr:$src2)))]>,
246 Requires<[HasSSE1]>, XS;
247 def MULSDrm : I<0x59, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
248 "mulsd {$src2, $dst|$dst, $src2}",
249 [(set FR64:$dst, (fmul FR64:$src1, (loadf64 addr:$src2)))]>,
250 Requires<[HasSSE2]>, XD;
252 def DIVSSrr : I<0x5E, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
253 "divss {$src2, $dst|$dst, $src2}",
254 [(set FR32:$dst, (fdiv FR32:$src1, FR32:$src2))]>,
255 Requires<[HasSSE1]>, XS;
256 def DIVSSrm : I<0x5E, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
257 "divss {$src2, $dst|$dst, $src2}",
258 [(set FR32:$dst, (fdiv FR32:$src1, (loadf32 addr:$src2)))]>,
259 Requires<[HasSSE1]>, XS;
260 def DIVSDrr : I<0x5E, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
261 "divsd {$src2, $dst|$dst, $src2}",
262 [(set FR64:$dst, (fdiv FR64:$src1, FR64:$src2))]>,
263 Requires<[HasSSE2]>, XD;
264 def DIVSDrm : I<0x5E, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
265 "divsd {$src2, $dst|$dst, $src2}",
266 [(set FR64:$dst, (fdiv FR64:$src1, (loadf64 addr:$src2)))]>,
267 Requires<[HasSSE2]>, XD;
269 def SUBSSrr : I<0x5C, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
270 "subss {$src2, $dst|$dst, $src2}",
271 [(set FR32:$dst, (fsub FR32:$src1, FR32:$src2))]>,
272 Requires<[HasSSE1]>, XS;
273 def SUBSSrm : I<0x5C, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
274 "subss {$src2, $dst|$dst, $src2}",
275 [(set FR32:$dst, (fsub FR32:$src1, (loadf32 addr:$src2)))]>,
276 Requires<[HasSSE1]>, XS;
277 def SUBSDrr : I<0x5C, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
278 "subsd {$src2, $dst|$dst, $src2}",
279 [(set FR64:$dst, (fsub FR64:$src1, FR64:$src2))]>,
280 Requires<[HasSSE2]>, XD;
281 def SUBSDrm : I<0x5C, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
282 "subsd {$src2, $dst|$dst, $src2}",
283 [(set FR64:$dst, (fsub FR64:$src1, (loadf64 addr:$src2)))]>,
284 Requires<[HasSSE2]>, XD;
287 def CMPSSrr : I<0xC2, MRMSrcReg,
288 (ops FR32:$dst, FR32:$src1, FR32:$src, SSECC:$cc),
289 "cmp${cc}ss {$src, $dst|$dst, $src}", []>,
290 Requires<[HasSSE1]>, XS;
291 def CMPSSrm : I<0xC2, MRMSrcMem,
292 (ops FR32:$dst, FR32:$src1, f32mem:$src, SSECC:$cc),
293 "cmp${cc}ss {$src, $dst|$dst, $src}", []>,
294 Requires<[HasSSE1]>, XS;
295 def CMPSDrr : I<0xC2, MRMSrcReg,
296 (ops FR64:$dst, FR64:$src1, FR64:$src, SSECC:$cc),
297 "cmp${cc}sd {$src, $dst|$dst, $src}", []>,
298 Requires<[HasSSE1]>, XD;
299 def CMPSDrm : I<0xC2, MRMSrcMem,
300 (ops FR64:$dst, FR64:$src1, f64mem:$src, SSECC:$cc),
301 "cmp${cc}sd {$src, $dst|$dst, $src}", []>,
302 Requires<[HasSSE2]>, XD;
306 //===----------------------------------------------------------------------===//
307 // Alias Instructions
308 //===----------------------------------------------------------------------===//
310 // Alias instructions that map fld0 to pxor for sse.
311 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
312 def FsFLD0SS : I<0xEF, MRMInitReg, (ops FR32:$dst),
313 "pxor $dst, $dst", [(set FR32:$dst, fp32imm0)]>,
314 Requires<[HasSSE1]>, TB, OpSize;
315 def FsFLD0SD : I<0xEF, MRMInitReg, (ops FR64:$dst),
316 "pxor $dst, $dst", [(set FR64:$dst, fp64imm0)]>,
317 Requires<[HasSSE2]>, TB, OpSize;
319 // Alias instructions to do FR32 / FR64 reg-to-reg copy using movaps / movapd.
320 // Upper bits are disregarded.
321 def FsMOVAPSrr : I<0x28, MRMSrcReg, (ops V4F32:$dst, V4F32:$src),
322 "movaps {$src, $dst|$dst, $src}", []>,
323 Requires<[HasSSE1]>, TB;
324 def FsMOVAPDrr : I<0x28, MRMSrcReg, (ops V2F64:$dst, V2F64:$src),
325 "movapd {$src, $dst|$dst, $src}", []>,
326 Requires<[HasSSE2]>, TB, OpSize;
328 // Alias instructions to load FR32 / FR64 from f128mem using movaps / movapd.
329 // Upper bits are disregarded.
330 def FsMOVAPSrm : I<0x28, MRMSrcMem, (ops FR32:$dst, f128mem:$src),
331 "movaps {$src, $dst|$dst, $src}",
332 [(set FR32:$dst, (X86loadpf32 addr:$src))]>,
333 Requires<[HasSSE1]>, TB;
334 def FsMOVAPDrm : I<0x28, MRMSrcMem, (ops FR64:$dst, f128mem:$src),
335 "movapd {$src, $dst|$dst, $src}",
336 [(set FR64:$dst, (X86loadpf64 addr:$src))]>,
337 Requires<[HasSSE2]>, TB, OpSize;
339 // Alias bitwise logical operations using SSE logical ops on packed FP values.
340 let isTwoAddress = 1 in {
341 let isCommutable = 1 in {
342 def FsANDPSrr : I<0x54, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
343 "andps {$src2, $dst|$dst, $src2}",
344 [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>,
345 Requires<[HasSSE1]>, TB;
346 def FsANDPDrr : I<0x54, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
347 "andpd {$src2, $dst|$dst, $src2}",
348 [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>,
349 Requires<[HasSSE2]>, TB, OpSize;
350 def FsORPSrr : I<0x56, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
351 "orps {$src2, $dst|$dst, $src2}", []>,
352 Requires<[HasSSE1]>, TB;
353 def FsORPDrr : I<0x56, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
354 "orpd {$src2, $dst|$dst, $src2}", []>,
355 Requires<[HasSSE2]>, TB, OpSize;
356 def FsXORPSrr : I<0x57, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
357 "xorps {$src2, $dst|$dst, $src2}",
358 [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>,
359 Requires<[HasSSE1]>, TB;
360 def FsXORPDrr : I<0x57, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
361 "xorpd {$src2, $dst|$dst, $src2}",
362 [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>,
363 Requires<[HasSSE2]>, TB, OpSize;
365 def FsANDPSrm : I<0x54, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f128mem:$src2),
366 "andps {$src2, $dst|$dst, $src2}",
367 [(set FR32:$dst, (X86fand FR32:$src1,
368 (X86loadpf32 addr:$src2)))]>,
369 Requires<[HasSSE1]>, TB;
370 def FsANDPDrm : I<0x54, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f128mem:$src2),
371 "andpd {$src2, $dst|$dst, $src2}",
372 [(set FR64:$dst, (X86fand FR64:$src1,
373 (X86loadpf64 addr:$src2)))]>,
374 Requires<[HasSSE2]>, TB, OpSize;
375 def FsORPSrm : I<0x56, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f128mem:$src2),
376 "orps {$src2, $dst|$dst, $src2}", []>,
377 Requires<[HasSSE1]>, TB;
378 def FsORPDrm : I<0x56, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f128mem:$src2),
379 "orpd {$src2, $dst|$dst, $src2}", []>,
380 Requires<[HasSSE2]>, TB, OpSize;
381 def FsXORPSrm : I<0x57, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f128mem:$src2),
382 "xorps {$src2, $dst|$dst, $src2}",
383 [(set FR32:$dst, (X86fxor FR32:$src1,
384 (X86loadpf32 addr:$src2)))]>,
385 Requires<[HasSSE1]>, TB;
386 def FsXORPDrm : I<0x57, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f128mem:$src2),
387 "xorpd {$src2, $dst|$dst, $src2}",
388 [(set FR64:$dst, (X86fxor FR64:$src1,
389 (X86loadpf64 addr:$src2)))]>,
390 Requires<[HasSSE2]>, TB, OpSize;
392 def FsANDNPSrr : I<0x55, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
393 "andnps {$src2, $dst|$dst, $src2}", []>,
394 Requires<[HasSSE1]>, TB;
395 def FsANDNPSrm : I<0x55, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f128mem:$src2),
396 "andnps {$src2, $dst|$dst, $src2}", []>,
397 Requires<[HasSSE1]>, TB;
398 def FsANDNPDrr : I<0x55, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
399 "andnpd {$src2, $dst|$dst, $src2}", []>,
400 Requires<[HasSSE2]>, TB, OpSize;
401 def FsANDNPDrm : I<0x55, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f128mem:$src2),
402 "andnpd {$src2, $dst|$dst, $src2}", []>,
403 Requires<[HasSSE2]>, TB, OpSize;