1 //===- PPCInstrAltivec.td - The PowerPC Altivec Extension --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the Altivec extension to the PowerPC instruction set.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Altivec transformation functions and pattern fragments.
18 // VSPLT_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
19 def VSPLT_get_imm : SDNodeXForm<build_vector, [{
20 return getI32Imm(PPC::getVSPLTImmediate(N));
23 def VSPLT_shuffle_mask : PatLeaf<(build_vector), [{
24 return PPC::isSplatShuffleMask(N);
28 // VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm.
29 def VSPLTISB_get_imm : SDNodeXForm<build_vector, [{
31 PPC::isVecSplatImm(N, 1, &Val);
32 return getI32Imm(Val);
34 def vecspltisb : PatLeaf<(build_vector), [{
35 return PPC::isVecSplatImm(N, 1);
36 }], VSPLTISB_get_imm>;
38 // VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm.
39 def VSPLTISH_get_imm : SDNodeXForm<build_vector, [{
41 PPC::isVecSplatImm(N, 2, &Val);
42 return getI32Imm(Val);
44 def vecspltish : PatLeaf<(build_vector), [{
45 return PPC::isVecSplatImm(N, 2);
46 }], VSPLTISH_get_imm>;
48 // VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm.
49 def VSPLTISW_get_imm : SDNodeXForm<build_vector, [{
51 PPC::isVecSplatImm(N, 4, &Val);
52 return getI32Imm(Val);
54 def vecspltisw : PatLeaf<(build_vector), [{
55 return PPC::isVecSplatImm(N, 4);
56 }], VSPLTISW_get_imm>;
58 class isVDOT { // vector dot instruction.
59 list<Register> Defs = [CR6];
63 //===----------------------------------------------------------------------===//
64 // Instruction Definitions.
66 def IMPLICIT_DEF_VRRC : Pseudo<(ops VRRC:$rD), "; $rD = IMPLICIT_DEF_VRRC",
67 [(set VRRC:$rD, (v4f32 (undef)))]>;
69 let isLoad = 1, PPC970_Unit = 2 in { // Loads.
70 def LVEBX: XForm_1<31, 7, (ops VRRC:$vD, memrr:$src),
71 "lvebx $vD, $src", LdStGeneral,
72 [(set VRRC:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>;
73 def LVEHX: XForm_1<31, 39, (ops VRRC:$vD, memrr:$src),
74 "lvehx $vD, $src", LdStGeneral,
75 [(set VRRC:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>;
76 def LVEWX: XForm_1<31, 71, (ops VRRC:$vD, memrr:$src),
77 "lvewx $vD, $src", LdStGeneral,
78 [(set VRRC:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>;
79 def LVX : XForm_1<31, 103, (ops VRRC:$vD, memrr:$src),
80 "lvx $vD, $src", LdStGeneral,
81 [(set VRRC:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>;
82 def LVXL : XForm_1<31, 359, (ops VRRC:$vD, memrr:$src),
83 "lvxl $vD, $src", LdStGeneral,
84 [(set VRRC:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>;
87 def LVSL : XForm_1<31, 6, (ops VRRC:$vD, GPRC:$base, GPRC:$rA),
88 "lvsl $vD, $base, $rA", LdStGeneral,
90 def LVSR : XForm_1<31, 38, (ops VRRC:$vD, GPRC:$base, GPRC:$rA),
91 "lvsl $vD, $base, $rA", LdStGeneral,
94 let isStore = 1, noResults = 1, PPC970_Unit = 2 in { // Stores.
95 def STVEBX: XForm_8<31, 135, (ops VRRC:$rS, memrr:$dst),
96 "stvebx $rS, $dst", LdStGeneral,
97 [(int_ppc_altivec_stvebx VRRC:$rS, xoaddr:$dst)]>;
98 def STVEHX: XForm_8<31, 167, (ops VRRC:$rS, memrr:$dst),
99 "stvehx $rS, $dst", LdStGeneral,
100 [(int_ppc_altivec_stvehx VRRC:$rS, xoaddr:$dst)]>;
101 def STVEWX: XForm_8<31, 199, (ops VRRC:$rS, memrr:$dst),
102 "stvewx $rS, $dst", LdStGeneral,
103 [(int_ppc_altivec_stvewx VRRC:$rS, xoaddr:$dst)]>;
104 def STVX : XForm_8<31, 231, (ops VRRC:$rS, memrr:$dst),
105 "stvx $rS, $dst", LdStGeneral,
106 [(int_ppc_altivec_stvx VRRC:$rS, xoaddr:$dst)]>;
107 def STVXL : XForm_8<31, 487, (ops VRRC:$rS, memrr:$dst),
108 "stvxl $rS, $dst", LdStGeneral,
109 [(int_ppc_altivec_stvxl VRRC:$rS, xoaddr:$dst)]>;
112 let PPC970_Unit = 5 in { // VALU Operations.
113 // VA-Form instructions. 3-input AltiVec ops.
114 def VMADDFP : VAForm_1<46, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
115 "vmaddfp $vD, $vA, $vC, $vB", VecFP,
116 [(set VRRC:$vD, (fadd (fmul VRRC:$vA, VRRC:$vC),
118 Requires<[FPContractions]>;
119 def VNMSUBFP: VAForm_1<47, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
120 "vnmsubfp $vD, $vA, $vC, $vB", VecFP,
121 [(set VRRC:$vD, (fneg (fsub (fmul VRRC:$vA, VRRC:$vC),
123 Requires<[FPContractions]>;
125 def VPERM : VAForm_1a<43, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
126 "vperm $vD, $vA, $vB, $vC", VecPerm,
128 (PPCvperm (v4f32 VRRC:$vA), VRRC:$vB, VRRC:$vC))]>;
129 def VSLDOI : VAForm_2<44, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, u5imm:$SH),
130 "vsldoi $vD, $vA, $vB, $SH", VecFP,
132 (int_ppc_altivec_vsldoi VRRC:$vA, VRRC:$vB,
134 def VSEL : VAForm_1a<42, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
135 "vsel $vD, $vA, $vB, $vC", VecFP,
137 (int_ppc_altivec_vsel VRRC:$vA, VRRC:$vB, VRRC:$vC))]>;
139 // VX-Form instructions. AltiVec arithmetic ops.
140 def VADDCUW : VXForm_1<384, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
141 "vaddcuw $vD, $vA, $vB", VecFP,
143 (int_ppc_altivec_vaddcuw VRRC:$vA, VRRC:$vB))]>;
144 def VADDFP : VXForm_1<10, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
145 "vaddfp $vD, $vA, $vB", VecFP,
146 [(set VRRC:$vD, (fadd VRRC:$vA, VRRC:$vB))]>;
148 def VADDUBM : VXForm_1<0, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
149 "vaddubm $vD, $vA, $vB", VecGeneral,
150 [(set VRRC:$vD, (add (v16i8 VRRC:$vA), VRRC:$vB))]>;
151 def VADDUHM : VXForm_1<64, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
152 "vadduhm $vD, $vA, $vB", VecGeneral,
153 [(set VRRC:$vD, (add (v8i16 VRRC:$vA), VRRC:$vB))]>;
154 def VADDUWM : VXForm_1<128, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
155 "vadduwm $vD, $vA, $vB", VecGeneral,
156 [(set VRRC:$vD, (add (v4i32 VRRC:$vA), VRRC:$vB))]>;
158 def VADDSBS : VXForm_1<768, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
159 "vaddsbs $vD, $vA, $vB", VecFP,
161 (int_ppc_altivec_vaddsbs VRRC:$vA, VRRC:$vB))]>;
162 def VADDSHS : VXForm_1<832, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
163 "vaddshs $vD, $vA, $vB", VecFP,
165 (int_ppc_altivec_vaddshs VRRC:$vA, VRRC:$vB))]>;
166 def VADDSWS : VXForm_1<896, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
167 "vaddsws $vD, $vA, $vB", VecFP,
169 (int_ppc_altivec_vaddsws VRRC:$vA, VRRC:$vB))]>;
171 def VADDUBS : VXForm_1<512, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
172 "vaddubs $vD, $vA, $vB", VecFP,
174 (int_ppc_altivec_vaddubs VRRC:$vA, VRRC:$vB))]>;
175 def VADDUHS : VXForm_1<576, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
176 "vadduhs $vD, $vA, $vB", VecFP,
178 (int_ppc_altivec_vadduhs VRRC:$vA, VRRC:$vB))]>;
179 def VADDUWS : VXForm_1<640, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
180 "vadduws $vD, $vA, $vB", VecFP,
182 (int_ppc_altivec_vadduws VRRC:$vA, VRRC:$vB))]>;
183 def VAND : VXForm_1<1028, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
184 "vand $vD, $vA, $vB", VecFP,
185 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), VRRC:$vB))]>;
186 def VANDC : VXForm_1<1092, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
187 "vandc $vD, $vA, $vB", VecFP,
188 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), (vnot VRRC:$vB)))]>;
190 def VCFSX : VXForm_1<842, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
191 "vcfsx $vD, $vB, $UIMM", VecFP,
193 (int_ppc_altivec_vcfsx VRRC:$vB, imm:$UIMM))]>;
194 def VCFUX : VXForm_1<778, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
195 "vcfux $vD, $vB, $UIMM", VecFP,
197 (int_ppc_altivec_vcfux VRRC:$vB, imm:$UIMM))]>;
198 def VCTSXS : VXForm_1<970, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
199 "vctsxs $vD, $vB, $UIMM", VecFP,
201 def VCTUXS : VXForm_1<906, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
202 "vctuxs $vD, $vB, $UIMM", VecFP,
204 def VEXPTEFP : VXForm_2<394, (ops VRRC:$vD, VRRC:$vB),
205 "vexptefp $vD, $vB", VecFP,
206 [(set VRRC:$vD, (int_ppc_altivec_vexptefp VRRC:$vB))]>;
207 def VLOGEFP : VXForm_2<458, (ops VRRC:$vD, VRRC:$vB),
208 "vlogefp $vD, $vB", VecFP,
209 [(set VRRC:$vD, (int_ppc_altivec_vlogefp VRRC:$vB))]>;
210 def VMAXFP : VXForm_1<1034, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
211 "vmaxfp $vD, $vA, $vB", VecFP,
213 def VMINFP : VXForm_1<1098, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
214 "vminfp $vD, $vA, $vB", VecFP,
216 def VREFP : VXForm_2<266, (ops VRRC:$vD, VRRC:$vB),
217 "vrefp $vD, $vB", VecFP,
218 [(set VRRC:$vD, (int_ppc_altivec_vrefp VRRC:$vB))]>;
219 def VRFIM : VXForm_2<714, (ops VRRC:$vD, VRRC:$vB),
220 "vrfim $vD, $vB", VecFP,
221 [(set VRRC:$vD, (int_ppc_altivec_vrfim VRRC:$vB))]>;
222 def VRFIN : VXForm_2<522, (ops VRRC:$vD, VRRC:$vB),
223 "vrfin $vD, $vB", VecFP,
224 [(set VRRC:$vD, (int_ppc_altivec_vrfin VRRC:$vB))]>;
225 def VRFIP : VXForm_2<650, (ops VRRC:$vD, VRRC:$vB),
226 "vrfip $vD, $vB", VecFP,
227 [(set VRRC:$vD, (int_ppc_altivec_vrfip VRRC:$vB))]>;
228 def VRFIZ : VXForm_2<586, (ops VRRC:$vD, VRRC:$vB),
229 "vrfiz $vD, $vB", VecFP,
230 [(set VRRC:$vD, (int_ppc_altivec_vrfiz VRRC:$vB))]>;
231 def VRSQRTEFP : VXForm_2<330, (ops VRRC:$vD, VRRC:$vB),
232 "vrsqrtefp $vD, $vB", VecFP,
233 [(set VRRC:$vD,(int_ppc_altivec_vrsqrtefp VRRC:$vB))]>;
234 def VSUBCUW : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
235 "vsubcuw $vD, $vA, $vB", VecFP,
237 (int_ppc_altivec_vsubcuw VRRC:$vA, VRRC:$vB))]>;
238 def VSUBFP : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
239 "vsubfp $vD, $vA, $vB", VecFP,
240 [(set VRRC:$vD, (fsub VRRC:$vA, VRRC:$vB))]>;
242 def VSUBUBM : VXForm_1<1024, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
243 "vsububm $vD, $vA, $vB", VecGeneral,
244 [(set VRRC:$vD, (sub (v16i8 VRRC:$vA), VRRC:$vB))]>;
245 def VSUBUHM : VXForm_1<1088, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
246 "vsubuhm $vD, $vA, $vB", VecGeneral,
247 [(set VRRC:$vD, (sub (v8i16 VRRC:$vA), VRRC:$vB))]>;
248 def VSUBUWM : VXForm_1<1152, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
249 "vsubuwm $vD, $vA, $vB", VecGeneral,
250 [(set VRRC:$vD, (sub (v4i32 VRRC:$vA), VRRC:$vB))]>;
252 def VSUBSBS : VXForm_1<1792, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
253 "vsubsbs $vD, $vA, $vB", VecFP,
255 (int_ppc_altivec_vsubsbs VRRC:$vA, VRRC:$vB))]>;
256 def VSUBSHS : VXForm_1<1856, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
257 "vsubshs $vD, $vA, $vB", VecFP,
259 (int_ppc_altivec_vsubshs VRRC:$vA, VRRC:$vB))]>;
260 def VSUBSWS : VXForm_1<1920, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
261 "vsubsws $vD, $vA, $vB", VecFP,
263 (int_ppc_altivec_vsubsws VRRC:$vA, VRRC:$vB))]>;
265 def VSUBUBS : VXForm_1<1536, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
266 "vsububs $vD, $vA, $vB", VecFP,
268 (int_ppc_altivec_vsububs VRRC:$vA, VRRC:$vB))]>;
269 def VSUBUHS : VXForm_1<1600, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
270 "vsubuhs $vD, $vA, $vB", VecFP,
272 (int_ppc_altivec_vsubuhs VRRC:$vA, VRRC:$vB))]>;
273 def VSUBUWS : VXForm_1<1664, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
274 "vsubuws $vD, $vA, $vB", VecFP,
276 (int_ppc_altivec_vsubuws VRRC:$vA, VRRC:$vB))]>;
278 def VSUMSWS : VXForm_1<1928, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
279 "vsumsws $vD, $vA, $vB", VecFP,
281 (int_ppc_altivec_vsumsws VRRC:$vA, VRRC:$vB))]>;
282 def VSUM2SWS: VXForm_1<1672, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
283 "vsum2sws $vD, $vA, $vB", VecFP,
285 (int_ppc_altivec_vsum2sws VRRC:$vA, VRRC:$vB))]>;
286 def VSUM4SBS: VXForm_1<1672, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
287 "vsum4sbs $vD, $vA, $vB", VecFP,
289 (int_ppc_altivec_vsum4sbs VRRC:$vA, VRRC:$vB))]>;
290 def VSUM4SHS: VXForm_1<1608, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
291 "vsum4shs $vD, $vA, $vB", VecFP,
293 (int_ppc_altivec_vsum4shs VRRC:$vA, VRRC:$vB))]>;
294 def VSUM4UBS: VXForm_1<1544, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
295 "vsum4ubs $vD, $vA, $vB", VecFP,
297 (int_ppc_altivec_vsum4ubs VRRC:$vA, VRRC:$vB))]>;
299 def VNOR : VXForm_1<1284, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
300 "vnor $vD, $vA, $vB", VecFP,
301 [(set VRRC:$vD, (vnot (or (v4i32 VRRC:$vA), VRRC:$vB)))]>;
302 def VOR : VXForm_1<1156, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
303 "vor $vD, $vA, $vB", VecFP,
304 [(set VRRC:$vD, (or (v4i32 VRRC:$vA), VRRC:$vB))]>;
305 def VXOR : VXForm_1<1220, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
306 "vxor $vD, $vA, $vB", VecFP,
307 [(set VRRC:$vD, (xor (v4i32 VRRC:$vA), VRRC:$vB))]>;
309 def VRLB : VXForm_1<4, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
310 "vrlb $vD, $vA, $vB", VecFP,
312 (int_ppc_altivec_vrlb VRRC:$vA, VRRC:$vB))]>;
313 def VRLH : VXForm_1<68, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
314 "vrlh $vD, $vA, $vB", VecFP,
316 (int_ppc_altivec_vrlh VRRC:$vA, VRRC:$vB))]>;
317 def VRLW : VXForm_1<132, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
318 "vrlw $vD, $vA, $vB", VecFP,
320 (int_ppc_altivec_vrlw VRRC:$vA, VRRC:$vB))]>;
322 def VSLO : VXForm_1<1036, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
323 "vslo $vD, $vA, $vB", VecFP,
325 (int_ppc_altivec_vslo VRRC:$vA, VRRC:$vB))]>;
326 def VSLB : VXForm_1<260, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
327 "vslb $vD, $vA, $vB", VecFP,
329 (int_ppc_altivec_vslb VRRC:$vA, VRRC:$vB))]>;
330 def VSLH : VXForm_1<324, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
331 "vslh $vD, $vA, $vB", VecFP,
333 (int_ppc_altivec_vslh VRRC:$vA, VRRC:$vB))]>;
334 def VSLW : VXForm_1<388, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
335 "vslw $vD, $vA, $vB", VecFP,
337 (int_ppc_altivec_vslw VRRC:$vA, VRRC:$vB))]>;
339 def VSPLTB : VXForm_1<524, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
340 "vspltb $vD, $vB, $UIMM", VecPerm,
342 def VSPLTH : VXForm_1<588, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
343 "vsplth $vD, $vB, $UIMM", VecPerm,
345 def VSPLTW : VXForm_1<652, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
346 "vspltw $vD, $vB, $UIMM", VecPerm,
347 [(set VRRC:$vD, (vector_shuffle (v4f32 VRRC:$vB), (undef),
348 VSPLT_shuffle_mask:$UIMM))]>;
350 def VSR : VXForm_1<708, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
351 "vsr $vD, $vA, $vB", VecFP,
353 (int_ppc_altivec_vsr VRRC:$vA, VRRC:$vB))]>;
354 def VSRO : VXForm_1<1100, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
355 "vsro $vD, $vA, $vB", VecFP,
357 (int_ppc_altivec_vsro VRRC:$vA, VRRC:$vB))]>;
358 def VSRAB : VXForm_1<772, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
359 "vsrab $vD, $vA, $vB", VecFP,
361 (int_ppc_altivec_vsrab VRRC:$vA, VRRC:$vB))]>;
362 def VSRAH : VXForm_1<836, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
363 "vsrah $vD, $vA, $vB", VecFP,
365 (int_ppc_altivec_vsrah VRRC:$vA, VRRC:$vB))]>;
366 def VSRAW : VXForm_1<900, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
367 "vsraw $vD, $vA, $vB", VecFP,
369 (int_ppc_altivec_vsraw VRRC:$vA, VRRC:$vB))]>;
370 def VSRB : VXForm_1<516, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
371 "vsrb $vD, $vA, $vB", VecFP,
373 (int_ppc_altivec_vsrb VRRC:$vA, VRRC:$vB))]>;
374 def VSRH : VXForm_1<580, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
375 "vsrh $vD, $vA, $vB", VecFP,
377 (int_ppc_altivec_vsrh VRRC:$vA, VRRC:$vB))]>;
378 def VSRW : VXForm_1<644, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
379 "vsrw $vD, $vA, $vB", VecFP,
381 (int_ppc_altivec_vsrw VRRC:$vA, VRRC:$vB))]>;
384 def VSPLTISB : VXForm_3<780, (ops VRRC:$vD, s5imm:$SIMM),
385 "vspltisb $vD, $SIMM", VecPerm,
386 [(set VRRC:$vD, (v4f32 vecspltisb:$SIMM))]>;
387 def VSPLTISH : VXForm_3<844, (ops VRRC:$vD, s5imm:$SIMM),
388 "vspltish $vD, $SIMM", VecPerm,
389 [(set VRRC:$vD, (v4f32 vecspltish:$SIMM))]>;
390 def VSPLTISW : VXForm_3<908, (ops VRRC:$vD, s5imm:$SIMM),
391 "vspltisw $vD, $SIMM", VecPerm,
392 [(set VRRC:$vD, (v4f32 vecspltisw:$SIMM))]>;
395 // Altivec Comparisons.
397 // f32 element comparisons.
398 def VCMPBFP : VXRForm_1<966, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
399 "vcmpbfp $vD, $vA, $vB", VecFPCompare,
401 (int_ppc_altivec_vcmpbfp VRRC:$vA, VRRC:$vB))]>;
402 def VCMPBFPo : VXRForm_1<966, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
403 "vcmpbfp. $vD, $vA, $vB", VecFPCompare,
404 [(set VRRC:$vD, (v4f32
405 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 966)))]>, isVDOT;
406 def VCMPEQFP : VXRForm_1<198, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
407 "vcmpeqfp $vD, $vA, $vB", VecFPCompare,
409 (int_ppc_altivec_vcmpeqfp VRRC:$vA, VRRC:$vB))]>;
410 def VCMPEQFPo : VXRForm_1<198, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
411 "vcmpeqfp. $vD, $vA, $vB", VecFPCompare,
412 [(set VRRC:$vD, (v4f32
413 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 198)))]>, isVDOT;
414 def VCMPGEFP : VXRForm_1<454, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
415 "vcmpgefp $vD, $vA, $vB", VecFPCompare,
417 (int_ppc_altivec_vcmpgefp VRRC:$vA, VRRC:$vB))]>;
418 def VCMPGEFPo : VXRForm_1<454, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
419 "vcmpgefp. $vD, $vA, $vB", VecFPCompare,
420 [(set VRRC:$vD, (v4f32
421 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 454)))]>, isVDOT;
422 def VCMPGTFP : VXRForm_1<710, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
423 "vcmpgtfp $vD, $vA, $vB", VecFPCompare,
425 (int_ppc_altivec_vcmpgtfp VRRC:$vA, VRRC:$vB))]>;
426 def VCMPGTFPo : VXRForm_1<710, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
427 "vcmpgtfp. $vD, $vA, $vB", VecFPCompare,
428 [(set VRRC:$vD, (v4f32
429 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 710)))]>, isVDOT;
431 // i8 element comparisons.
432 def VCMPEQUB : VXRForm_1<6, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
433 "vcmpequb $vD, $vA, $vB", VecFPCompare,
435 (int_ppc_altivec_vcmpequb VRRC:$vA, VRRC:$vB))]>;
436 def VCMPEQUBo : VXRForm_1<6, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
437 "vcmpequb. $vD, $vA, $vB", VecFPCompare,
438 [(set VRRC:$vD, (v16i8
439 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 6)))]>, isVDOT;
440 def VCMPGTSB : VXRForm_1<774, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
441 "vcmpgtsb $vD, $vA, $vB", VecFPCompare,
443 (int_ppc_altivec_vcmpgtsb VRRC:$vA, VRRC:$vB))]>;
444 def VCMPGTSBo : VXRForm_1<774, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
445 "vcmpgtsb. $vD, $vA, $vB", VecFPCompare,
446 [(set VRRC:$vD, (v16i8
447 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 774)))]>, isVDOT;
448 def VCMPGTUB : VXRForm_1<518, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
449 "vcmpgtub $vD, $vA, $vB", VecFPCompare,
451 (int_ppc_altivec_vcmpgtub VRRC:$vA, VRRC:$vB))]>;
452 def VCMPGTUBo : VXRForm_1<518, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
453 "vcmpgtub. $vD, $vA, $vB", VecFPCompare,
454 [(set VRRC:$vD, (v16i8
455 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 518)))]>, isVDOT;
457 // i16 element comparisons.
458 def VCMPEQUH : VXRForm_1<70, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
459 "vcmpequh $vD, $vA, $vB", VecFPCompare,
461 (int_ppc_altivec_vcmpequh VRRC:$vA, VRRC:$vB))]>;
462 def VCMPEQUHo : VXRForm_1<70, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
463 "vcmpequh. $vD, $vA, $vB", VecFPCompare,
464 [(set VRRC:$vD, (v8i16
465 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 70)))]>, isVDOT;
466 def VCMPGTSH : VXRForm_1<838, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
467 "vcmpgtsh $vD, $vA, $vB", VecFPCompare,
469 (int_ppc_altivec_vcmpgtsh VRRC:$vA, VRRC:$vB))]>;
470 def VCMPGTSHo : VXRForm_1<838, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
471 "vcmpgtsh. $vD, $vA, $vB", VecFPCompare,
472 [(set VRRC:$vD, (v8i16
473 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 838)))]>, isVDOT;
474 def VCMPGTUH : VXRForm_1<582, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
475 "vcmpgtuh $vD, $vA, $vB", VecFPCompare,
477 (int_ppc_altivec_vcmpgtuh VRRC:$vA, VRRC:$vB))]>;
478 def VCMPGTUHo : VXRForm_1<582, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
479 "vcmpgtuh. $vD, $vA, $vB", VecFPCompare,
480 [(set VRRC:$vD, (v8i16
481 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 582)))]>, isVDOT;
483 // i32 element comparisons.
484 def VCMPEQUW : VXRForm_1<134, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
485 "vcmpequw $vD, $vA, $vB", VecFPCompare,
487 (int_ppc_altivec_vcmpequw VRRC:$vA, VRRC:$vB))]>;
488 def VCMPEQUWo : VXRForm_1<134, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
489 "vcmpequw. $vD, $vA, $vB", VecFPCompare,
490 [(set VRRC:$vD, (v4i32
491 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 134)))]>, isVDOT;
492 def VCMPGTSW : VXRForm_1<902, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
493 "vcmpgtsw $vD, $vA, $vB", VecFPCompare,
495 (int_ppc_altivec_vcmpgtsw VRRC:$vA, VRRC:$vB))]>;
496 def VCMPGTSWo : VXRForm_1<902, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
497 "vcmpgtsw. $vD, $vA, $vB", VecFPCompare,
498 [(set VRRC:$vD, (v4i32
499 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 902)))]>, isVDOT;
500 def VCMPGTUW : VXRForm_1<646, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
501 "vcmpgtuw $vD, $vA, $vB", VecFPCompare,
503 (int_ppc_altivec_vcmpgtuw VRRC:$vA, VRRC:$vB))]>;
504 def VCMPGTUWo : VXRForm_1<646, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
505 "vcmpgtuw. $vD, $vA, $vB", VecFPCompare,
506 [(set VRRC:$vD, (v4i32
507 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 646)))]>, isVDOT;
509 def V_SET0 : VXForm_setzero<1220, (ops VRRC:$vD),
510 "vxor $vD, $vD, $vD", VecFP,
511 [(set VRRC:$vD, (v4f32 immAllZerosV))]>;
514 //===----------------------------------------------------------------------===//
515 // Additional Altivec Patterns
519 def : Pat<(v16i8 (undef)), (v16i8 (IMPLICIT_DEF_VRRC))>;
520 def : Pat<(v8i16 (undef)), (v8i16 (IMPLICIT_DEF_VRRC))>;
521 def : Pat<(v4i32 (undef)), (v4i32 (IMPLICIT_DEF_VRRC))>;
522 def : Pat<(v16i8 immAllZerosV), (v16i8 (V_SET0))>;
523 def : Pat<(v8i16 immAllZerosV), (v8i16 (V_SET0))>;
524 def : Pat<(v4i32 immAllZerosV), (v4i32 (V_SET0))>;
527 def : Pat<(v16i8 (load xoaddr:$src)), (v16i8 (LVX xoaddr:$src))>;
528 def : Pat<(v8i16 (load xoaddr:$src)), (v8i16 (LVX xoaddr:$src))>;
529 def : Pat<(v4i32 (load xoaddr:$src)), (v4i32 (LVX xoaddr:$src))>;
530 def : Pat<(v4f32 (load xoaddr:$src)), (v4f32 (LVX xoaddr:$src))>;
533 def : Pat<(store (v16i8 VRRC:$rS), xoaddr:$dst),
534 (STVX (v16i8 VRRC:$rS), xoaddr:$dst)>;
535 def : Pat<(store (v8i16 VRRC:$rS), xoaddr:$dst),
536 (STVX (v8i16 VRRC:$rS), xoaddr:$dst)>;
537 def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst),
538 (STVX (v4i32 VRRC:$rS), xoaddr:$dst)>;
539 def : Pat<(store (v4f32 VRRC:$rS), xoaddr:$dst),
540 (STVX (v4f32 VRRC:$rS), xoaddr:$dst)>;
543 def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
544 def : Pat<(v16i8 (bitconvert (v4i32 VRRC:$src))), (v16i8 VRRC:$src)>;
545 def : Pat<(v16i8 (bitconvert (v4f32 VRRC:$src))), (v16i8 VRRC:$src)>;
547 def : Pat<(v8i16 (bitconvert (v16i8 VRRC:$src))), (v8i16 VRRC:$src)>;
548 def : Pat<(v8i16 (bitconvert (v4i32 VRRC:$src))), (v8i16 VRRC:$src)>;
549 def : Pat<(v8i16 (bitconvert (v4f32 VRRC:$src))), (v8i16 VRRC:$src)>;
551 def : Pat<(v4i32 (bitconvert (v16i8 VRRC:$src))), (v4i32 VRRC:$src)>;
552 def : Pat<(v4i32 (bitconvert (v8i16 VRRC:$src))), (v4i32 VRRC:$src)>;
553 def : Pat<(v4i32 (bitconvert (v4f32 VRRC:$src))), (v4i32 VRRC:$src)>;
555 def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>;
556 def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>;
557 def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
559 // Immediate vector formation with vsplti*.
560 def : Pat<(v16i8 vecspltisb:$invec), (v16i8 (VSPLTISB vecspltisb:$invec))>;
561 def : Pat<(v16i8 vecspltish:$invec), (v16i8 (VSPLTISH vecspltish:$invec))>;
562 def : Pat<(v16i8 vecspltisw:$invec), (v16i8 (VSPLTISW vecspltisw:$invec))>;
564 def : Pat<(v8i16 vecspltisb:$invec), (v8i16 (VSPLTISB vecspltisb:$invec))>;
565 def : Pat<(v8i16 vecspltish:$invec), (v8i16 (VSPLTISH vecspltish:$invec))>;
566 def : Pat<(v8i16 vecspltisw:$invec), (v8i16 (VSPLTISW vecspltisw:$invec))>;
568 def : Pat<(v4i32 vecspltisb:$invec), (v4i32 (VSPLTISB vecspltisb:$invec))>;
569 def : Pat<(v4i32 vecspltish:$invec), (v4i32 (VSPLTISH vecspltish:$invec))>;
570 def : Pat<(v4i32 vecspltisw:$invec), (v4i32 (VSPLTISW vecspltisw:$invec))>;
572 // Logical Operations
573 def : Pat<(v16i8 (and VRRC:$A, VRRC:$B)), (v16i8 (VAND VRRC:$A, VRRC:$B))>;
574 def : Pat<(v8i16 (and VRRC:$A, VRRC:$B)), (v8i16 (VAND VRRC:$A, VRRC:$B))>;
575 def : Pat<(v16i8 (or VRRC:$A, VRRC:$B)), (v16i8 (VOR VRRC:$A, VRRC:$B))>;
576 def : Pat<(v8i16 (or VRRC:$A, VRRC:$B)), (v8i16 (VOR VRRC:$A, VRRC:$B))>;
577 def : Pat<(v16i8 (xor VRRC:$A, VRRC:$B)), (v16i8 (VXOR VRRC:$A, VRRC:$B))>;
578 def : Pat<(v8i16 (xor VRRC:$A, VRRC:$B)), (v8i16 (VXOR VRRC:$A, VRRC:$B))>;
579 def : Pat<(v16i8 (vnot (or VRRC:$A, VRRC:$B))),(v16i8 (VNOR VRRC:$A, VRRC:$B))>;
580 def : Pat<(v8i16 (vnot (or VRRC:$A, VRRC:$B))),(v8i16 (VNOR VRRC:$A, VRRC:$B))>;
581 def : Pat<(v16i8 (and VRRC:$A, (vnot VRRC:$B))),
582 (v16i8 (VANDC VRRC:$A, VRRC:$B))>;
583 def : Pat<(v8i16 (and VRRC:$A, (vnot VRRC:$B))),
584 (v8i16 (VANDC VRRC:$A, VRRC:$B))>;
586 def : Pat<(fmul VRRC:$vA, VRRC:$vB),
587 (VMADDFP VRRC:$vA, VRRC:$vB, (V_SET0))>;
589 // Fused multiply add and multiply sub for packed float. These are represented
590 // separately from the real instructions above, for operations that must have
591 // the additional precision, such as Newton-Rhapson (used by divide, sqrt)
592 def : Pat<(PPCvmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
593 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
594 def : Pat<(PPCvnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
595 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
597 def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
598 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
599 def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
600 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
602 def : Pat<(vector_shuffle (v4i32 VRRC:$vB), (undef), VSPLT_shuffle_mask:$UIMM),
603 (v4i32 (VSPLTW VSPLT_shuffle_mask:$UIMM, VRRC:$vB))>;
605 def : Pat<(PPCvperm (v4i32 VRRC:$vA), VRRC:$vB, VRRC:$vC),
606 (v4i32 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>;