1 //===- PPCInstrAltivec.td - The PowerPC Altivec Extension --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the Altivec extension to the PowerPC instruction set.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Altivec transformation functions and pattern fragments.
18 // VSPLT_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
19 def VSPLT_get_imm : SDNodeXForm<build_vector, [{
20 return getI32Imm(PPC::getVSPLTImmediate(N));
23 def VSPLT_shuffle_mask : PatLeaf<(build_vector), [{
24 return PPC::isSplatShuffleMask(N);
28 // VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm.
29 def VSPLTISB_get_imm : SDNodeXForm<build_vector, [{
31 PPC::isVecSplatImm(N, 1, &Val);
32 return getI32Imm(Val);
34 def vecspltisb : PatLeaf<(build_vector), [{
35 return PPC::isVecSplatImm(N, 1);
36 }], VSPLTISB_get_imm>;
38 // VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm.
39 def VSPLTISH_get_imm : SDNodeXForm<build_vector, [{
41 PPC::isVecSplatImm(N, 2, &Val);
42 return getI32Imm(Val);
44 def vecspltish : PatLeaf<(build_vector), [{
45 return PPC::isVecSplatImm(N, 2);
46 }], VSPLTISH_get_imm>;
48 // VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm.
49 def VSPLTISW_get_imm : SDNodeXForm<build_vector, [{
51 PPC::isVecSplatImm(N, 4, &Val);
52 return getI32Imm(Val);
54 def vecspltisw : PatLeaf<(build_vector), [{
55 return PPC::isVecSplatImm(N, 4);
56 }], VSPLTISW_get_imm>;
58 class isVDOT { // vector dot instruction.
59 list<Register> Defs = [CR6];
63 //===----------------------------------------------------------------------===//
64 // Helpers for defining instructions that directly correspond to intrinsics.
66 // VX1_Int - A VXForm_1 intrinsic definition.
67 class VX1_Int<bits<11> xo, string asmstr, Intrinsic IntID>
68 : VXForm_1<xo, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), asmstr, VecFP,
69 [(set VRRC:$vD, (IntID VRRC:$vA, VRRC:$vB))]>;
71 // VX2_Int - A VXForm_2 intrinsic definition.
72 class VX2_Int<bits<11> xo, string asmstr, Intrinsic IntID>
73 : VXForm_2<xo, (ops VRRC:$vD, VRRC:$vB), asmstr, VecFP,
74 [(set VRRC:$vD, (IntID VRRC:$vB))]>;
76 //===----------------------------------------------------------------------===//
77 // Instruction Definitions.
79 def IMPLICIT_DEF_VRRC : Pseudo<(ops VRRC:$rD), "; $rD = IMPLICIT_DEF_VRRC",
80 [(set VRRC:$rD, (v4f32 (undef)))]>;
82 let isLoad = 1, PPC970_Unit = 2 in { // Loads.
83 def LVEBX: XForm_1<31, 7, (ops VRRC:$vD, memrr:$src),
84 "lvebx $vD, $src", LdStGeneral,
85 [(set VRRC:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>;
86 def LVEHX: XForm_1<31, 39, (ops VRRC:$vD, memrr:$src),
87 "lvehx $vD, $src", LdStGeneral,
88 [(set VRRC:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>;
89 def LVEWX: XForm_1<31, 71, (ops VRRC:$vD, memrr:$src),
90 "lvewx $vD, $src", LdStGeneral,
91 [(set VRRC:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>;
92 def LVX : XForm_1<31, 103, (ops VRRC:$vD, memrr:$src),
93 "lvx $vD, $src", LdStGeneral,
94 [(set VRRC:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>;
95 def LVXL : XForm_1<31, 359, (ops VRRC:$vD, memrr:$src),
96 "lvxl $vD, $src", LdStGeneral,
97 [(set VRRC:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>;
100 def LVSL : XForm_1<31, 6, (ops VRRC:$vD, memrr:$src),
101 "lvsl $vD, $src", LdStGeneral,
102 [(set VRRC:$vD, (int_ppc_altivec_lvsl xoaddr:$src))]>,
104 def LVSR : XForm_1<31, 38, (ops VRRC:$vD, memrr:$src),
105 "lvsl $vD, $src", LdStGeneral,
106 [(set VRRC:$vD, (int_ppc_altivec_lvsr xoaddr:$src))]>,
109 let isStore = 1, noResults = 1, PPC970_Unit = 2 in { // Stores.
110 def STVEBX: XForm_8<31, 135, (ops VRRC:$rS, memrr:$dst),
111 "stvebx $rS, $dst", LdStGeneral,
112 [(int_ppc_altivec_stvebx VRRC:$rS, xoaddr:$dst)]>;
113 def STVEHX: XForm_8<31, 167, (ops VRRC:$rS, memrr:$dst),
114 "stvehx $rS, $dst", LdStGeneral,
115 [(int_ppc_altivec_stvehx VRRC:$rS, xoaddr:$dst)]>;
116 def STVEWX: XForm_8<31, 199, (ops VRRC:$rS, memrr:$dst),
117 "stvewx $rS, $dst", LdStGeneral,
118 [(int_ppc_altivec_stvewx VRRC:$rS, xoaddr:$dst)]>;
119 def STVX : XForm_8<31, 231, (ops VRRC:$rS, memrr:$dst),
120 "stvx $rS, $dst", LdStGeneral,
121 [(int_ppc_altivec_stvx VRRC:$rS, xoaddr:$dst)]>;
122 def STVXL : XForm_8<31, 487, (ops VRRC:$rS, memrr:$dst),
123 "stvxl $rS, $dst", LdStGeneral,
124 [(int_ppc_altivec_stvxl VRRC:$rS, xoaddr:$dst)]>;
127 let PPC970_Unit = 5 in { // VALU Operations.
128 // VA-Form instructions. 3-input AltiVec ops.
129 def VMADDFP : VAForm_1<46, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
130 "vmaddfp $vD, $vA, $vC, $vB", VecFP,
131 [(set VRRC:$vD, (fadd (fmul VRRC:$vA, VRRC:$vC),
133 Requires<[FPContractions]>;
134 def VNMSUBFP: VAForm_1<47, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
135 "vnmsubfp $vD, $vA, $vC, $vB", VecFP,
136 [(set VRRC:$vD, (fneg (fsub (fmul VRRC:$vA, VRRC:$vC),
138 Requires<[FPContractions]>;
139 def VMHADDSHS : VAForm_1a<32, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
140 "vmhaddshs $vD, $vA, $vB, $vC", VecFP,
142 (int_ppc_altivec_vmhaddshs VRRC:$vA, VRRC:$vB, VRRC:$vC))]>;
143 def VMHRADDSHS : VAForm_1a<33, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
144 "vmhraddshs $vD, $vA, $vB, $vC", VecFP,
146 (int_ppc_altivec_vmhraddshs VRRC:$vA, VRRC:$vB, VRRC:$vC))]>;
147 def VPERM : VAForm_1a<43, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
148 "vperm $vD, $vA, $vB, $vC", VecPerm,
150 (PPCvperm (v4f32 VRRC:$vA), VRRC:$vB, VRRC:$vC))]>;
151 def VSLDOI : VAForm_2<44, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, u5imm:$SH),
152 "vsldoi $vD, $vA, $vB, $SH", VecFP,
154 (int_ppc_altivec_vsldoi VRRC:$vA, VRRC:$vB,
156 def VSEL : VAForm_1a<42, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
157 "vsel $vD, $vA, $vB, $vC", VecFP,
159 (int_ppc_altivec_vsel VRRC:$vA, VRRC:$vB, VRRC:$vC))]>;
161 // VX-Form instructions. AltiVec arithmetic ops.
162 def VADDCUW : VXForm_1<384, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
163 "vaddcuw $vD, $vA, $vB", VecFP,
165 (int_ppc_altivec_vaddcuw VRRC:$vA, VRRC:$vB))]>;
166 def VADDFP : VXForm_1<10, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
167 "vaddfp $vD, $vA, $vB", VecFP,
168 [(set VRRC:$vD, (fadd VRRC:$vA, VRRC:$vB))]>;
170 def VADDUBM : VXForm_1<0, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
171 "vaddubm $vD, $vA, $vB", VecGeneral,
172 [(set VRRC:$vD, (add (v16i8 VRRC:$vA), VRRC:$vB))]>;
173 def VADDUHM : VXForm_1<64, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
174 "vadduhm $vD, $vA, $vB", VecGeneral,
175 [(set VRRC:$vD, (add (v8i16 VRRC:$vA), VRRC:$vB))]>;
176 def VADDUWM : VXForm_1<128, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
177 "vadduwm $vD, $vA, $vB", VecGeneral,
178 [(set VRRC:$vD, (add (v4i32 VRRC:$vA), VRRC:$vB))]>;
180 def VADDSBS : VXForm_1<768, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
181 "vaddsbs $vD, $vA, $vB", VecFP,
183 (int_ppc_altivec_vaddsbs VRRC:$vA, VRRC:$vB))]>;
184 def VADDSHS : VXForm_1<832, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
185 "vaddshs $vD, $vA, $vB", VecFP,
187 (int_ppc_altivec_vaddshs VRRC:$vA, VRRC:$vB))]>;
188 def VADDSWS : VXForm_1<896, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
189 "vaddsws $vD, $vA, $vB", VecFP,
191 (int_ppc_altivec_vaddsws VRRC:$vA, VRRC:$vB))]>;
193 def VADDUBS : VXForm_1<512, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
194 "vaddubs $vD, $vA, $vB", VecFP,
196 (int_ppc_altivec_vaddubs VRRC:$vA, VRRC:$vB))]>;
197 def VADDUHS : VXForm_1<576, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
198 "vadduhs $vD, $vA, $vB", VecFP,
200 (int_ppc_altivec_vadduhs VRRC:$vA, VRRC:$vB))]>;
201 def VADDUWS : VXForm_1<640, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
202 "vadduws $vD, $vA, $vB", VecFP,
204 (int_ppc_altivec_vadduws VRRC:$vA, VRRC:$vB))]>;
205 def VAND : VXForm_1<1028, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
206 "vand $vD, $vA, $vB", VecFP,
207 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), VRRC:$vB))]>;
208 def VANDC : VXForm_1<1092, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
209 "vandc $vD, $vA, $vB", VecFP,
210 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), (vnot VRRC:$vB)))]>;
212 def VCFSX : VXForm_1<842, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
213 "vcfsx $vD, $vB, $UIMM", VecFP,
215 (int_ppc_altivec_vcfsx VRRC:$vB, imm:$UIMM))]>;
216 def VCFUX : VXForm_1<778, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
217 "vcfux $vD, $vB, $UIMM", VecFP,
219 (int_ppc_altivec_vcfux VRRC:$vB, imm:$UIMM))]>;
220 def VCTSXS : VXForm_1<970, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
221 "vctsxs $vD, $vB, $UIMM", VecFP,
223 def VCTUXS : VXForm_1<906, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
224 "vctuxs $vD, $vB, $UIMM", VecFP,
226 def VEXPTEFP : VXForm_2<394, (ops VRRC:$vD, VRRC:$vB),
227 "vexptefp $vD, $vB", VecFP,
228 [(set VRRC:$vD, (int_ppc_altivec_vexptefp VRRC:$vB))]>;
229 def VLOGEFP : VXForm_2<458, (ops VRRC:$vD, VRRC:$vB),
230 "vlogefp $vD, $vB", VecFP,
231 [(set VRRC:$vD, (int_ppc_altivec_vlogefp VRRC:$vB))]>;
232 def VMAXFP : VXForm_1<1034, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
233 "vmaxfp $vD, $vA, $vB", VecFP,
235 def VMINFP : VXForm_1<1098, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
236 "vminfp $vD, $vA, $vB", VecFP,
240 def VMRGHH : VX1_Int<76 , "vmrghh $vD, $vA, $vB", int_ppc_altivec_vmrghh>;
241 def VMRGHW : VX1_Int<140, "vmrghw $vD, $vA, $vB", int_ppc_altivec_vmrghw>;
242 def VMRGLH : VX1_Int<332, "vmrglh $vD, $vA, $vB", int_ppc_altivec_vmrglh>;
243 def VMRGLW : VX1_Int<396, "vmrglw $vD, $vA, $vB", int_ppc_altivec_vmrglw>;
245 def VMULESB : VX1_Int<776, "vmulesb $vD, $vA, $vB", int_ppc_altivec_vmulesb>;
246 def VMULESH : VX1_Int<840, "vmulesh $vD, $vA, $vB", int_ppc_altivec_vmulesh>;
247 def VMULEUB : VX1_Int<520, "vmuleub $vD, $vA, $vB", int_ppc_altivec_vmuleub>;
248 def VMULEUH : VX1_Int<584, "vmuleuh $vD, $vA, $vB", int_ppc_altivec_vmuleuh>;
249 def VMULOSB : VX1_Int<264, "vmulosb $vD, $vA, $vB", int_ppc_altivec_vmulosb>;
251 def VREFP : VX2_Int<266, "vrefp $vD, $vB", int_ppc_altivec_vrefp>;
252 def VRFIM : VX2_Int<714, "vrfim $vD, $vB", int_ppc_altivec_vrfim>;
253 def VRFIN : VX2_Int<522, "vrfin $vD, $vB", int_ppc_altivec_vrfin>;
254 def VRFIP : VX2_Int<650, "vrfip $vD, $vB", int_ppc_altivec_vrfip>;
255 def VRFIZ : VX2_Int<586, "vrfiz $vD, $vB", int_ppc_altivec_vrfiz>;
256 def VRSQRTEFP : VX2_Int<330, "vrsqrtefp $vD, $vB", int_ppc_altivec_vrsqrtefp>;
258 def VSUBCUW : VX1_Int<74, "vsubcuw $vD, $vA, $vB", int_ppc_altivec_vsubcuw>;
260 def VSUBFP : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
261 "vsubfp $vD, $vA, $vB", VecGeneral,
262 [(set VRRC:$vD, (fsub VRRC:$vA, VRRC:$vB))]>;
263 def VSUBUBM : VXForm_1<1024, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
264 "vsububm $vD, $vA, $vB", VecGeneral,
265 [(set VRRC:$vD, (sub (v16i8 VRRC:$vA), VRRC:$vB))]>;
266 def VSUBUHM : VXForm_1<1088, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
267 "vsubuhm $vD, $vA, $vB", VecGeneral,
268 [(set VRRC:$vD, (sub (v8i16 VRRC:$vA), VRRC:$vB))]>;
269 def VSUBUWM : VXForm_1<1152, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
270 "vsubuwm $vD, $vA, $vB", VecGeneral,
271 [(set VRRC:$vD, (sub (v4i32 VRRC:$vA), VRRC:$vB))]>;
273 def VSUBSBS : VX1_Int<1792, "vsubsbs $vD, $vA, $vB", int_ppc_altivec_vsubsbs>;
274 def VSUBSHS : VX1_Int<1856, "vsubshs $vD, $vA, $vB", int_ppc_altivec_vsubshs>;
275 def VSUBSWS : VX1_Int<1920, "vsubsws $vD, $vA, $vB", int_ppc_altivec_vsubsws>;
276 def VSUBUBS : VX1_Int<1536, "vsububs $vD, $vA, $vB", int_ppc_altivec_vsububs>;
277 def VSUBUHS : VX1_Int<1600, "vsubuhs $vD, $vA, $vB", int_ppc_altivec_vsubuhs>;
278 def VSUBUWS : VX1_Int<1664, "vsubuws $vD, $vA, $vB", int_ppc_altivec_vsubuws>;
279 def VSUMSWS : VX1_Int<1928, "vsumsws $vD, $vA, $vB", int_ppc_altivec_vsumsws>;
280 def VSUM2SWS: VX1_Int<1672, "vsum2sws $vD, $vA, $vB", int_ppc_altivec_vsum2sws>;
281 def VSUM4SBS: VX1_Int<1672, "vsum4sbs $vD, $vA, $vB", int_ppc_altivec_vsum4sbs>;
282 def VSUM4SHS: VX1_Int<1608, "vsum4shs $vD, $vA, $vB", int_ppc_altivec_vsum4shs>;
283 def VSUM4UBS: VX1_Int<1544, "vsum4ubs $vD, $vA, $vB", int_ppc_altivec_vsum4ubs>;
285 def VNOR : VXForm_1<1284, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
286 "vnor $vD, $vA, $vB", VecFP,
287 [(set VRRC:$vD, (vnot (or (v4i32 VRRC:$vA), VRRC:$vB)))]>;
288 def VOR : VXForm_1<1156, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
289 "vor $vD, $vA, $vB", VecFP,
290 [(set VRRC:$vD, (or (v4i32 VRRC:$vA), VRRC:$vB))]>;
291 def VXOR : VXForm_1<1220, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
292 "vxor $vD, $vA, $vB", VecFP,
293 [(set VRRC:$vD, (xor (v4i32 VRRC:$vA), VRRC:$vB))]>;
295 def VRLB : VXForm_1<4, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
296 "vrlb $vD, $vA, $vB", VecFP,
298 (int_ppc_altivec_vrlb VRRC:$vA, VRRC:$vB))]>;
299 def VRLH : VXForm_1<68, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
300 "vrlh $vD, $vA, $vB", VecFP,
302 (int_ppc_altivec_vrlh VRRC:$vA, VRRC:$vB))]>;
303 def VRLW : VXForm_1<132, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
304 "vrlw $vD, $vA, $vB", VecFP,
306 (int_ppc_altivec_vrlw VRRC:$vA, VRRC:$vB))]>;
308 def VSLO : VXForm_1<1036, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
309 "vslo $vD, $vA, $vB", VecFP,
311 (int_ppc_altivec_vslo VRRC:$vA, VRRC:$vB))]>;
312 def VSLB : VXForm_1<260, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
313 "vslb $vD, $vA, $vB", VecFP,
315 (int_ppc_altivec_vslb VRRC:$vA, VRRC:$vB))]>;
316 def VSLH : VXForm_1<324, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
317 "vslh $vD, $vA, $vB", VecFP,
319 (int_ppc_altivec_vslh VRRC:$vA, VRRC:$vB))]>;
320 def VSLW : VXForm_1<388, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
321 "vslw $vD, $vA, $vB", VecFP,
323 (int_ppc_altivec_vslw VRRC:$vA, VRRC:$vB))]>;
325 def VSPLTB : VXForm_1<524, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
326 "vspltb $vD, $vB, $UIMM", VecPerm,
328 def VSPLTH : VXForm_1<588, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
329 "vsplth $vD, $vB, $UIMM", VecPerm,
331 def VSPLTW : VXForm_1<652, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
332 "vspltw $vD, $vB, $UIMM", VecPerm,
333 [(set VRRC:$vD, (vector_shuffle (v4f32 VRRC:$vB), (undef),
334 VSPLT_shuffle_mask:$UIMM))]>;
336 def VSR : VXForm_1<708, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
337 "vsr $vD, $vA, $vB", VecFP,
339 (int_ppc_altivec_vsr VRRC:$vA, VRRC:$vB))]>;
340 def VSRO : VXForm_1<1100, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
341 "vsro $vD, $vA, $vB", VecFP,
343 (int_ppc_altivec_vsro VRRC:$vA, VRRC:$vB))]>;
344 def VSRAB : VXForm_1<772, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
345 "vsrab $vD, $vA, $vB", VecFP,
347 (int_ppc_altivec_vsrab VRRC:$vA, VRRC:$vB))]>;
348 def VSRAH : VXForm_1<836, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
349 "vsrah $vD, $vA, $vB", VecFP,
351 (int_ppc_altivec_vsrah VRRC:$vA, VRRC:$vB))]>;
352 def VSRAW : VXForm_1<900, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
353 "vsraw $vD, $vA, $vB", VecFP,
355 (int_ppc_altivec_vsraw VRRC:$vA, VRRC:$vB))]>;
356 def VSRB : VXForm_1<516, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
357 "vsrb $vD, $vA, $vB", VecFP,
359 (int_ppc_altivec_vsrb VRRC:$vA, VRRC:$vB))]>;
360 def VSRH : VXForm_1<580, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
361 "vsrh $vD, $vA, $vB", VecFP,
363 (int_ppc_altivec_vsrh VRRC:$vA, VRRC:$vB))]>;
364 def VSRW : VXForm_1<644, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
365 "vsrw $vD, $vA, $vB", VecFP,
367 (int_ppc_altivec_vsrw VRRC:$vA, VRRC:$vB))]>;
370 def VSPLTISB : VXForm_3<780, (ops VRRC:$vD, s5imm:$SIMM),
371 "vspltisb $vD, $SIMM", VecPerm,
372 [(set VRRC:$vD, (v4f32 vecspltisb:$SIMM))]>;
373 def VSPLTISH : VXForm_3<844, (ops VRRC:$vD, s5imm:$SIMM),
374 "vspltish $vD, $SIMM", VecPerm,
375 [(set VRRC:$vD, (v4f32 vecspltish:$SIMM))]>;
376 def VSPLTISW : VXForm_3<908, (ops VRRC:$vD, s5imm:$SIMM),
377 "vspltisw $vD, $SIMM", VecPerm,
378 [(set VRRC:$vD, (v4f32 vecspltisw:$SIMM))]>;
381 def VPKPX : VXForm_1<782, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
382 "vpkpx $vD, $vA, $vB", VecFP,
384 (int_ppc_altivec_vpkpx VRRC:$vA, VRRC:$vB))]>;
385 def VPKSHSS : VXForm_1<398, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
386 "vpkshss $vD, $vA, $vB", VecFP,
388 (int_ppc_altivec_vpkshss VRRC:$vA, VRRC:$vB))]>;
389 def VPKSHUS : VXForm_1<270, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
390 "vpkshus $vD, $vA, $vB", VecFP,
392 (int_ppc_altivec_vpkshus VRRC:$vA, VRRC:$vB))]>;
393 def VPKSWSS : VXForm_1<462, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
394 "vpkswss $vD, $vA, $vB", VecFP,
396 (int_ppc_altivec_vpkswss VRRC:$vA, VRRC:$vB))]>;
397 def VPKSWUS : VXForm_1<334, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
398 "vpkswus $vD, $vA, $vB", VecFP,
400 (int_ppc_altivec_vpkswus VRRC:$vA, VRRC:$vB))]>;
401 def VPKUHUM : VXForm_1<14, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
402 "vpkuhum $vD, $vA, $vB", VecFP,
404 def VPKUHUS : VXForm_1<142, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
405 "vpkuhus $vD, $vA, $vB", VecFP,
407 (int_ppc_altivec_vpkuhus VRRC:$vA, VRRC:$vB))]>;
408 def VPKUWUM : VXForm_1<78, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
409 "vpkuwum $vD, $vA, $vB", VecFP,
411 def VPKUWUS : VXForm_1<206, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
412 "vpkuwus $vD, $vA, $vB", VecFP,
414 (int_ppc_altivec_vpkuwus VRRC:$vA, VRRC:$vB))]>;
417 def VUPKHPX : VXForm_2<846, (ops VRRC:$vD, VRRC:$vB),
418 "vupkhpx $vD, $vB", VecFP,
419 [(set VRRC:$vD, (int_ppc_altivec_vupkhpx VRRC:$vB))]>;
420 def VUPKHSB : VXForm_2<526, (ops VRRC:$vD, VRRC:$vB),
421 "vupkhsb $vD, $vB", VecFP,
422 [(set VRRC:$vD, (int_ppc_altivec_vupkhsb VRRC:$vB))]>;
423 def VUPKHSH : VXForm_2<590, (ops VRRC:$vD, VRRC:$vB),
424 "vupkhsh $vD, $vB", VecFP,
425 [(set VRRC:$vD, (int_ppc_altivec_vupkhsh VRRC:$vB))]>;
426 def VUPKLPX : VXForm_2<974, (ops VRRC:$vD, VRRC:$vB),
427 "vupklpx $vD, $vB", VecFP,
428 [(set VRRC:$vD, (int_ppc_altivec_vupklpx VRRC:$vB))]>;
429 def VUPKLSB : VXForm_2<654, (ops VRRC:$vD, VRRC:$vB),
430 "vupklsb $vD, $vB", VecFP,
431 [(set VRRC:$vD, (int_ppc_altivec_vupklsb VRRC:$vB))]>;
432 def VUPKLSH : VXForm_2<718, (ops VRRC:$vD, VRRC:$vB),
433 "vupklsh $vD, $vB", VecFP,
434 [(set VRRC:$vD, (int_ppc_altivec_vupklsh VRRC:$vB))]>;
437 // Altivec Comparisons.
439 // f32 element comparisons.
440 def VCMPBFP : VXRForm_1<966, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
441 "vcmpbfp $vD, $vA, $vB", VecFPCompare,
443 (int_ppc_altivec_vcmpbfp VRRC:$vA, VRRC:$vB))]>;
444 def VCMPBFPo : VXRForm_1<966, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
445 "vcmpbfp. $vD, $vA, $vB", VecFPCompare,
446 [(set VRRC:$vD, (v4f32
447 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 966)))]>, isVDOT;
448 def VCMPEQFP : VXRForm_1<198, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
449 "vcmpeqfp $vD, $vA, $vB", VecFPCompare,
451 (int_ppc_altivec_vcmpeqfp VRRC:$vA, VRRC:$vB))]>;
452 def VCMPEQFPo : VXRForm_1<198, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
453 "vcmpeqfp. $vD, $vA, $vB", VecFPCompare,
454 [(set VRRC:$vD, (v4f32
455 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 198)))]>, isVDOT;
456 def VCMPGEFP : VXRForm_1<454, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
457 "vcmpgefp $vD, $vA, $vB", VecFPCompare,
459 (int_ppc_altivec_vcmpgefp VRRC:$vA, VRRC:$vB))]>;
460 def VCMPGEFPo : VXRForm_1<454, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
461 "vcmpgefp. $vD, $vA, $vB", VecFPCompare,
462 [(set VRRC:$vD, (v4f32
463 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 454)))]>, isVDOT;
464 def VCMPGTFP : VXRForm_1<710, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
465 "vcmpgtfp $vD, $vA, $vB", VecFPCompare,
467 (int_ppc_altivec_vcmpgtfp VRRC:$vA, VRRC:$vB))]>;
468 def VCMPGTFPo : VXRForm_1<710, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
469 "vcmpgtfp. $vD, $vA, $vB", VecFPCompare,
470 [(set VRRC:$vD, (v4f32
471 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 710)))]>, isVDOT;
473 // i8 element comparisons.
474 def VCMPEQUB : VXRForm_1<6, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
475 "vcmpequb $vD, $vA, $vB", VecFPCompare,
477 (int_ppc_altivec_vcmpequb VRRC:$vA, VRRC:$vB))]>;
478 def VCMPEQUBo : VXRForm_1<6, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
479 "vcmpequb. $vD, $vA, $vB", VecFPCompare,
480 [(set VRRC:$vD, (v16i8
481 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 6)))]>, isVDOT;
482 def VCMPGTSB : VXRForm_1<774, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
483 "vcmpgtsb $vD, $vA, $vB", VecFPCompare,
485 (int_ppc_altivec_vcmpgtsb VRRC:$vA, VRRC:$vB))]>;
486 def VCMPGTSBo : VXRForm_1<774, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
487 "vcmpgtsb. $vD, $vA, $vB", VecFPCompare,
488 [(set VRRC:$vD, (v16i8
489 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 774)))]>, isVDOT;
490 def VCMPGTUB : VXRForm_1<518, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
491 "vcmpgtub $vD, $vA, $vB", VecFPCompare,
493 (int_ppc_altivec_vcmpgtub VRRC:$vA, VRRC:$vB))]>;
494 def VCMPGTUBo : VXRForm_1<518, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
495 "vcmpgtub. $vD, $vA, $vB", VecFPCompare,
496 [(set VRRC:$vD, (v16i8
497 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 518)))]>, isVDOT;
499 // i16 element comparisons.
500 def VCMPEQUH : VXRForm_1<70, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
501 "vcmpequh $vD, $vA, $vB", VecFPCompare,
503 (int_ppc_altivec_vcmpequh VRRC:$vA, VRRC:$vB))]>;
504 def VCMPEQUHo : VXRForm_1<70, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
505 "vcmpequh. $vD, $vA, $vB", VecFPCompare,
506 [(set VRRC:$vD, (v8i16
507 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 70)))]>, isVDOT;
508 def VCMPGTSH : VXRForm_1<838, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
509 "vcmpgtsh $vD, $vA, $vB", VecFPCompare,
511 (int_ppc_altivec_vcmpgtsh VRRC:$vA, VRRC:$vB))]>;
512 def VCMPGTSHo : VXRForm_1<838, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
513 "vcmpgtsh. $vD, $vA, $vB", VecFPCompare,
514 [(set VRRC:$vD, (v8i16
515 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 838)))]>, isVDOT;
516 def VCMPGTUH : VXRForm_1<582, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
517 "vcmpgtuh $vD, $vA, $vB", VecFPCompare,
519 (int_ppc_altivec_vcmpgtuh VRRC:$vA, VRRC:$vB))]>;
520 def VCMPGTUHo : VXRForm_1<582, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
521 "vcmpgtuh. $vD, $vA, $vB", VecFPCompare,
522 [(set VRRC:$vD, (v8i16
523 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 582)))]>, isVDOT;
525 // i32 element comparisons.
526 def VCMPEQUW : VXRForm_1<134, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
527 "vcmpequw $vD, $vA, $vB", VecFPCompare,
529 (int_ppc_altivec_vcmpequw VRRC:$vA, VRRC:$vB))]>;
530 def VCMPEQUWo : VXRForm_1<134, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
531 "vcmpequw. $vD, $vA, $vB", VecFPCompare,
532 [(set VRRC:$vD, (v4i32
533 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 134)))]>, isVDOT;
534 def VCMPGTSW : VXRForm_1<902, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
535 "vcmpgtsw $vD, $vA, $vB", VecFPCompare,
537 (int_ppc_altivec_vcmpgtsw VRRC:$vA, VRRC:$vB))]>;
538 def VCMPGTSWo : VXRForm_1<902, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
539 "vcmpgtsw. $vD, $vA, $vB", VecFPCompare,
540 [(set VRRC:$vD, (v4i32
541 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 902)))]>, isVDOT;
542 def VCMPGTUW : VXRForm_1<646, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
543 "vcmpgtuw $vD, $vA, $vB", VecFPCompare,
545 (int_ppc_altivec_vcmpgtuw VRRC:$vA, VRRC:$vB))]>;
546 def VCMPGTUWo : VXRForm_1<646, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
547 "vcmpgtuw. $vD, $vA, $vB", VecFPCompare,
548 [(set VRRC:$vD, (v4i32
549 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 646)))]>, isVDOT;
551 def V_SET0 : VXForm_setzero<1220, (ops VRRC:$vD),
552 "vxor $vD, $vD, $vD", VecFP,
553 [(set VRRC:$vD, (v4f32 immAllZerosV))]>;
556 //===----------------------------------------------------------------------===//
557 // Additional Altivec Patterns
561 def : Pat<(v16i8 (undef)), (v16i8 (IMPLICIT_DEF_VRRC))>;
562 def : Pat<(v8i16 (undef)), (v8i16 (IMPLICIT_DEF_VRRC))>;
563 def : Pat<(v4i32 (undef)), (v4i32 (IMPLICIT_DEF_VRRC))>;
564 def : Pat<(v16i8 immAllZerosV), (v16i8 (V_SET0))>;
565 def : Pat<(v8i16 immAllZerosV), (v8i16 (V_SET0))>;
566 def : Pat<(v4i32 immAllZerosV), (v4i32 (V_SET0))>;
569 def : Pat<(v16i8 (load xoaddr:$src)), (v16i8 (LVX xoaddr:$src))>;
570 def : Pat<(v8i16 (load xoaddr:$src)), (v8i16 (LVX xoaddr:$src))>;
571 def : Pat<(v4i32 (load xoaddr:$src)), (v4i32 (LVX xoaddr:$src))>;
572 def : Pat<(v4f32 (load xoaddr:$src)), (v4f32 (LVX xoaddr:$src))>;
575 def : Pat<(store (v16i8 VRRC:$rS), xoaddr:$dst),
576 (STVX (v16i8 VRRC:$rS), xoaddr:$dst)>;
577 def : Pat<(store (v8i16 VRRC:$rS), xoaddr:$dst),
578 (STVX (v8i16 VRRC:$rS), xoaddr:$dst)>;
579 def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst),
580 (STVX (v4i32 VRRC:$rS), xoaddr:$dst)>;
581 def : Pat<(store (v4f32 VRRC:$rS), xoaddr:$dst),
582 (STVX (v4f32 VRRC:$rS), xoaddr:$dst)>;
585 def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
586 def : Pat<(v16i8 (bitconvert (v4i32 VRRC:$src))), (v16i8 VRRC:$src)>;
587 def : Pat<(v16i8 (bitconvert (v4f32 VRRC:$src))), (v16i8 VRRC:$src)>;
589 def : Pat<(v8i16 (bitconvert (v16i8 VRRC:$src))), (v8i16 VRRC:$src)>;
590 def : Pat<(v8i16 (bitconvert (v4i32 VRRC:$src))), (v8i16 VRRC:$src)>;
591 def : Pat<(v8i16 (bitconvert (v4f32 VRRC:$src))), (v8i16 VRRC:$src)>;
593 def : Pat<(v4i32 (bitconvert (v16i8 VRRC:$src))), (v4i32 VRRC:$src)>;
594 def : Pat<(v4i32 (bitconvert (v8i16 VRRC:$src))), (v4i32 VRRC:$src)>;
595 def : Pat<(v4i32 (bitconvert (v4f32 VRRC:$src))), (v4i32 VRRC:$src)>;
597 def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>;
598 def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>;
599 def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
601 // Immediate vector formation with vsplti*.
602 def : Pat<(v16i8 vecspltisb:$invec), (v16i8 (VSPLTISB vecspltisb:$invec))>;
603 def : Pat<(v16i8 vecspltish:$invec), (v16i8 (VSPLTISH vecspltish:$invec))>;
604 def : Pat<(v16i8 vecspltisw:$invec), (v16i8 (VSPLTISW vecspltisw:$invec))>;
606 def : Pat<(v8i16 vecspltisb:$invec), (v8i16 (VSPLTISB vecspltisb:$invec))>;
607 def : Pat<(v8i16 vecspltish:$invec), (v8i16 (VSPLTISH vecspltish:$invec))>;
608 def : Pat<(v8i16 vecspltisw:$invec), (v8i16 (VSPLTISW vecspltisw:$invec))>;
610 def : Pat<(v4i32 vecspltisb:$invec), (v4i32 (VSPLTISB vecspltisb:$invec))>;
611 def : Pat<(v4i32 vecspltish:$invec), (v4i32 (VSPLTISH vecspltish:$invec))>;
612 def : Pat<(v4i32 vecspltisw:$invec), (v4i32 (VSPLTISW vecspltisw:$invec))>;
614 // Logical Operations
615 def : Pat<(v16i8 (and VRRC:$A, VRRC:$B)), (v16i8 (VAND VRRC:$A, VRRC:$B))>;
616 def : Pat<(v8i16 (and VRRC:$A, VRRC:$B)), (v8i16 (VAND VRRC:$A, VRRC:$B))>;
617 def : Pat<(v16i8 (or VRRC:$A, VRRC:$B)), (v16i8 (VOR VRRC:$A, VRRC:$B))>;
618 def : Pat<(v8i16 (or VRRC:$A, VRRC:$B)), (v8i16 (VOR VRRC:$A, VRRC:$B))>;
619 def : Pat<(v16i8 (xor VRRC:$A, VRRC:$B)), (v16i8 (VXOR VRRC:$A, VRRC:$B))>;
620 def : Pat<(v8i16 (xor VRRC:$A, VRRC:$B)), (v8i16 (VXOR VRRC:$A, VRRC:$B))>;
621 def : Pat<(v16i8 (vnot (or VRRC:$A, VRRC:$B))),(v16i8 (VNOR VRRC:$A, VRRC:$B))>;
622 def : Pat<(v8i16 (vnot (or VRRC:$A, VRRC:$B))),(v8i16 (VNOR VRRC:$A, VRRC:$B))>;
623 def : Pat<(v16i8 (and VRRC:$A, (vnot VRRC:$B))),
624 (v16i8 (VANDC VRRC:$A, VRRC:$B))>;
625 def : Pat<(v8i16 (and VRRC:$A, (vnot VRRC:$B))),
626 (v8i16 (VANDC VRRC:$A, VRRC:$B))>;
628 def : Pat<(fmul VRRC:$vA, VRRC:$vB),
629 (VMADDFP VRRC:$vA, VRRC:$vB, (V_SET0))>;
631 // Fused multiply add and multiply sub for packed float. These are represented
632 // separately from the real instructions above, for operations that must have
633 // the additional precision, such as Newton-Rhapson (used by divide, sqrt)
634 def : Pat<(PPCvmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
635 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
636 def : Pat<(PPCvnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
637 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
639 def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
640 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
641 def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
642 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
643 def : Pat<(int_ppc_altivec_vperm VRRC:$A, VRRC:$B, VRRC:$C),
644 (VPERM VRRC:$A, VRRC:$B, VRRC:$C)>;
645 def : Pat<(vector_shuffle (v4i32 VRRC:$vB), (undef), VSPLT_shuffle_mask:$UIMM),
646 (v4i32 (VSPLTW VSPLT_shuffle_mask:$UIMM, VRRC:$vB))>;
648 def : Pat<(PPCvperm (v4i32 VRRC:$vA), VRRC:$vB, VRRC:$vC),
649 (v4i32 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>;