1 //===- PPCInstrAltivec.td - The PowerPC Altivec Extension --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the Altivec extension to the PowerPC instruction set.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Altivec transformation functions and pattern fragments.
18 // VSPLT*_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
19 def VSPLTB_get_imm : SDNodeXForm<build_vector, [{
20 return getI32Imm(PPC::getVSPLTImmediate(N, 1));
22 def VSPLTB_shuffle_mask : PatLeaf<(build_vector), [{
23 return PPC::isSplatShuffleMask(N, 1);
25 def VSPLTH_get_imm : SDNodeXForm<build_vector, [{
26 return getI32Imm(PPC::getVSPLTImmediate(N, 2));
28 def VSPLTH_shuffle_mask : PatLeaf<(build_vector), [{
29 return PPC::isSplatShuffleMask(N, 2);
31 def VSPLTW_get_imm : SDNodeXForm<build_vector, [{
32 return getI32Imm(PPC::getVSPLTImmediate(N, 4));
34 def VSPLTW_shuffle_mask : PatLeaf<(build_vector), [{
35 return PPC::isSplatShuffleMask(N, 4);
39 // VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm.
40 def VSPLTISB_get_imm : SDNodeXForm<build_vector, [{
42 PPC::isVecSplatImm(N, 1, &Val);
43 return getI32Imm(Val);
45 def vecspltisb : PatLeaf<(build_vector), [{
46 return PPC::isVecSplatImm(N, 1);
47 }], VSPLTISB_get_imm>;
49 // VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm.
50 def VSPLTISH_get_imm : SDNodeXForm<build_vector, [{
52 PPC::isVecSplatImm(N, 2, &Val);
53 return getI32Imm(Val);
55 def vecspltish : PatLeaf<(build_vector), [{
56 return PPC::isVecSplatImm(N, 2);
57 }], VSPLTISH_get_imm>;
59 // VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm.
60 def VSPLTISW_get_imm : SDNodeXForm<build_vector, [{
62 PPC::isVecSplatImm(N, 4, &Val);
63 return getI32Imm(Val);
65 def vecspltisw : PatLeaf<(build_vector), [{
66 return PPC::isVecSplatImm(N, 4);
67 }], VSPLTISW_get_imm>;
69 //===----------------------------------------------------------------------===//
70 // Helpers for defining instructions that directly correspond to intrinsics.
72 // VA1a_Int - A VAForm_1a intrinsic definition.
73 class VA1a_Int<bits<6> xo, string opc, Intrinsic IntID>
74 : VAForm_1a<xo, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
75 !strconcat(opc, " $vD, $vA, $vB, $vC"), VecFP,
76 [(set VRRC:$vD, (IntID VRRC:$vA, VRRC:$vB, VRRC:$vC))]>;
78 // VX1_Int - A VXForm_1 intrinsic definition.
79 class VX1_Int<bits<11> xo, string opc, Intrinsic IntID>
80 : VXForm_1<xo, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
81 !strconcat(opc, " $vD, $vA, $vB"), VecFP,
82 [(set VRRC:$vD, (IntID VRRC:$vA, VRRC:$vB))]>;
84 // VX2_Int - A VXForm_2 intrinsic definition.
85 class VX2_Int<bits<11> xo, string opc, Intrinsic IntID>
86 : VXForm_2<xo, (ops VRRC:$vD, VRRC:$vB),
87 !strconcat(opc, " $vD, $vB"), VecFP,
88 [(set VRRC:$vD, (IntID VRRC:$vB))]>;
90 //===----------------------------------------------------------------------===//
91 // Instruction Definitions.
93 def IMPLICIT_DEF_VRRC : Pseudo<(ops VRRC:$rD), "; $rD = IMPLICIT_DEF_VRRC",
94 [(set VRRC:$rD, (v4f32 (undef)))]>;
96 def MFVSCR : VXForm_4<1540, (ops VRRC:$vD),
97 "mfvcr $vD", LdStGeneral,
98 [(set VRRC:$vD, (int_ppc_altivec_mfvscr))]>;
99 def MTVSCR : VXForm_5<1604, (ops VRRC:$vB),
100 "mtvcr $vB", LdStGeneral,
101 [(int_ppc_altivec_mtvscr VRRC:$vB)]>;
103 let isLoad = 1, PPC970_Unit = 2 in { // Loads.
104 def LVEBX: XForm_1<31, 7, (ops VRRC:$vD, memrr:$src),
105 "lvebx $vD, $src", LdStGeneral,
106 [(set VRRC:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>;
107 def LVEHX: XForm_1<31, 39, (ops VRRC:$vD, memrr:$src),
108 "lvehx $vD, $src", LdStGeneral,
109 [(set VRRC:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>;
110 def LVEWX: XForm_1<31, 71, (ops VRRC:$vD, memrr:$src),
111 "lvewx $vD, $src", LdStGeneral,
112 [(set VRRC:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>;
113 def LVX : XForm_1<31, 103, (ops VRRC:$vD, memrr:$src),
114 "lvx $vD, $src", LdStGeneral,
115 [(set VRRC:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>;
116 def LVXL : XForm_1<31, 359, (ops VRRC:$vD, memrr:$src),
117 "lvxl $vD, $src", LdStGeneral,
118 [(set VRRC:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>;
121 def LVSL : XForm_1<31, 6, (ops VRRC:$vD, memrr:$src),
122 "lvsl $vD, $src", LdStGeneral,
123 [(set VRRC:$vD, (int_ppc_altivec_lvsl xoaddr:$src))]>,
125 def LVSR : XForm_1<31, 38, (ops VRRC:$vD, memrr:$src),
126 "lvsl $vD, $src", LdStGeneral,
127 [(set VRRC:$vD, (int_ppc_altivec_lvsr xoaddr:$src))]>,
130 let isStore = 1, noResults = 1, PPC970_Unit = 2 in { // Stores.
131 def STVEBX: XForm_8<31, 135, (ops VRRC:$rS, memrr:$dst),
132 "stvebx $rS, $dst", LdStGeneral,
133 [(int_ppc_altivec_stvebx VRRC:$rS, xoaddr:$dst)]>;
134 def STVEHX: XForm_8<31, 167, (ops VRRC:$rS, memrr:$dst),
135 "stvehx $rS, $dst", LdStGeneral,
136 [(int_ppc_altivec_stvehx VRRC:$rS, xoaddr:$dst)]>;
137 def STVEWX: XForm_8<31, 199, (ops VRRC:$rS, memrr:$dst),
138 "stvewx $rS, $dst", LdStGeneral,
139 [(int_ppc_altivec_stvewx VRRC:$rS, xoaddr:$dst)]>;
140 def STVX : XForm_8<31, 231, (ops VRRC:$rS, memrr:$dst),
141 "stvx $rS, $dst", LdStGeneral,
142 [(int_ppc_altivec_stvx VRRC:$rS, xoaddr:$dst)]>;
143 def STVXL : XForm_8<31, 487, (ops VRRC:$rS, memrr:$dst),
144 "stvxl $rS, $dst", LdStGeneral,
145 [(int_ppc_altivec_stvxl VRRC:$rS, xoaddr:$dst)]>;
148 let PPC970_Unit = 5 in { // VALU Operations.
149 // VA-Form instructions. 3-input AltiVec ops.
150 def VMADDFP : VAForm_1<46, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
151 "vmaddfp $vD, $vA, $vC, $vB", VecFP,
152 [(set VRRC:$vD, (fadd (fmul VRRC:$vA, VRRC:$vC),
154 Requires<[FPContractions]>;
155 def VNMSUBFP: VAForm_1<47, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
156 "vnmsubfp $vD, $vA, $vC, $vB", VecFP,
157 [(set VRRC:$vD, (fneg (fsub (fmul VRRC:$vA, VRRC:$vC),
159 Requires<[FPContractions]>;
161 def VMHADDSHS : VA1a_Int<32, "vmhaddshs", int_ppc_altivec_vmhaddshs>;
162 def VMHRADDSHS : VA1a_Int<33, "vmhraddshs", int_ppc_altivec_vmhraddshs>;
163 def VMLADDUHM : VA1a_Int<34, "vmladduhm", int_ppc_altivec_vmladduhm>;
164 def VPERM : VA1a_Int<43, "vperm", int_ppc_altivec_vperm>;
165 def VSEL : VA1a_Int<42, "vsel", int_ppc_altivec_vsel>;
167 def VSLDOI : VAForm_2<44, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, u5imm:$SH),
168 "vsldoi $vD, $vA, $vB, $SH", VecFP,
170 (int_ppc_altivec_vsldoi VRRC:$vA, VRRC:$vB,
173 // VX-Form instructions. AltiVec arithmetic ops.
174 def VADDFP : VXForm_1<10, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
175 "vaddfp $vD, $vA, $vB", VecFP,
176 [(set VRRC:$vD, (fadd VRRC:$vA, VRRC:$vB))]>;
178 def VADDUBM : VXForm_1<0, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
179 "vaddubm $vD, $vA, $vB", VecGeneral,
180 [(set VRRC:$vD, (add (v16i8 VRRC:$vA), VRRC:$vB))]>;
181 def VADDUHM : VXForm_1<64, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
182 "vadduhm $vD, $vA, $vB", VecGeneral,
183 [(set VRRC:$vD, (add (v8i16 VRRC:$vA), VRRC:$vB))]>;
184 def VADDUWM : VXForm_1<128, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
185 "vadduwm $vD, $vA, $vB", VecGeneral,
186 [(set VRRC:$vD, (add (v4i32 VRRC:$vA), VRRC:$vB))]>;
188 def VADDCUW : VX1_Int<384, "vaddcuw", int_ppc_altivec_vaddcuw>;
189 def VADDSBS : VX1_Int<768, "vaddsbs", int_ppc_altivec_vaddsbs>;
190 def VADDSHS : VX1_Int<832, "vaddshs", int_ppc_altivec_vaddshs>;
191 def VADDSWS : VX1_Int<896, "vaddsws", int_ppc_altivec_vaddsws>;
192 def VADDUBS : VX1_Int<512, "vaddubs", int_ppc_altivec_vaddubs>;
193 def VADDUHS : VX1_Int<576, "vadduhs", int_ppc_altivec_vadduhs>;
194 def VADDUWS : VX1_Int<640, "vadduws", int_ppc_altivec_vadduws>;
197 def VAND : VXForm_1<1028, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
198 "vand $vD, $vA, $vB", VecFP,
199 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), VRRC:$vB))]>;
200 def VANDC : VXForm_1<1092, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
201 "vandc $vD, $vA, $vB", VecFP,
202 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), (vnot VRRC:$vB)))]>;
204 def VCFSX : VXForm_1<842, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
205 "vcfsx $vD, $vB, $UIMM", VecFP,
207 (int_ppc_altivec_vcfsx VRRC:$vB, imm:$UIMM))]>;
208 def VCFUX : VXForm_1<778, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
209 "vcfux $vD, $vB, $UIMM", VecFP,
211 (int_ppc_altivec_vcfux VRRC:$vB, imm:$UIMM))]>;
212 def VCTSXS : VXForm_1<970, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
213 "vctsxs $vD, $vB, $UIMM", VecFP,
215 (int_ppc_altivec_vctsxs VRRC:$vB, imm:$UIMM))]>;
216 def VCTUXS : VXForm_1<906, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
217 "vctuxs $vD, $vB, $UIMM", VecFP,
219 (int_ppc_altivec_vctuxs VRRC:$vB, imm:$UIMM))]>;
220 def VEXPTEFP : VX2_Int<394, "vexptefp", int_ppc_altivec_vexptefp>;
221 def VLOGEFP : VX2_Int<458, "vlogefp", int_ppc_altivec_vlogefp>;
223 def VAVGSB : VX1_Int<1282, "vavgsb", int_ppc_altivec_vavgsb>;
224 def VAVGSH : VX1_Int<1346, "vavgsh", int_ppc_altivec_vavgsh>;
225 def VAVGSW : VX1_Int<1410, "vavgsw", int_ppc_altivec_vavgsw>;
226 def VAVGUB : VX1_Int<1026, "vavgub", int_ppc_altivec_vavgub>;
227 def VAVGUH : VX1_Int<1090, "vavguh", int_ppc_altivec_vavguh>;
228 def VAVGUW : VX1_Int<1154, "vavguw", int_ppc_altivec_vavguw>;
230 def VMAXFP : VX1_Int<1034, "vmaxfp", int_ppc_altivec_vmaxfp>;
231 def VMAXSB : VX1_Int< 258, "vmaxsb", int_ppc_altivec_vmaxsb>;
232 def VMAXSH : VX1_Int< 322, "vmaxsh", int_ppc_altivec_vmaxsh>;
233 def VMAXSW : VX1_Int< 386, "vmaxsw", int_ppc_altivec_vmaxsw>;
234 def VMAXUB : VX1_Int< 2, "vmaxub", int_ppc_altivec_vmaxub>;
235 def VMAXUH : VX1_Int< 66, "vmaxuh", int_ppc_altivec_vmaxuh>;
236 def VMAXUW : VX1_Int< 130, "vmaxuw", int_ppc_altivec_vmaxuw>;
237 def VMINFP : VX1_Int<1098, "vminfp", int_ppc_altivec_vminfp>;
238 def VMINSB : VX1_Int< 770, "vminsb", int_ppc_altivec_vminsb>;
239 def VMINSH : VX1_Int< 834, "vminsh", int_ppc_altivec_vminsh>;
240 def VMINSW : VX1_Int< 896, "vminsw", int_ppc_altivec_vminsw>;
241 def VMINUB : VX1_Int< 514, "vminub", int_ppc_altivec_vminub>;
242 def VMINUH : VX1_Int< 578, "vminuh", int_ppc_altivec_vminuh>;
243 def VMINUW : VX1_Int< 642, "vminuw", int_ppc_altivec_vminuw>;
245 def VMRGHB : VX1_Int<12 , "vmrghb", int_ppc_altivec_vmrghb>;
246 def VMRGHH : VX1_Int<76 , "vmrghh", int_ppc_altivec_vmrghh>;
247 def VMRGHW : VX1_Int<140, "vmrghw", int_ppc_altivec_vmrghw>;
248 def VMRGLB : VX1_Int<268, "vmrglb", int_ppc_altivec_vmrglb>;
249 def VMRGLH : VX1_Int<332, "vmrglh", int_ppc_altivec_vmrglh>;
250 def VMRGLW : VX1_Int<396, "vmrglw", int_ppc_altivec_vmrglw>;
252 def VMSUMMBM : VA1a_Int<37, "vmsummbm", int_ppc_altivec_vmsummbm>;
253 def VMSUMSHM : VA1a_Int<40, "vmsumshm", int_ppc_altivec_vmsumshm>;
254 def VMSUMSHS : VA1a_Int<41, "vmsumshs", int_ppc_altivec_vmsumshs>;
255 def VMSUMUBM : VA1a_Int<36, "vmsumubm", int_ppc_altivec_vmsumubm>;
256 def VMSUMUHM : VA1a_Int<38, "vmsumuhm", int_ppc_altivec_vmsumuhm>;
257 def VMSUMUHS : VA1a_Int<39, "vmsumuhs", int_ppc_altivec_vmsumuhs>;
259 def VMULESB : VX1_Int<776, "vmulesb", int_ppc_altivec_vmulesb>;
260 def VMULESH : VX1_Int<840, "vmulesh", int_ppc_altivec_vmulesh>;
261 def VMULEUB : VX1_Int<520, "vmuleub", int_ppc_altivec_vmuleub>;
262 def VMULEUH : VX1_Int<584, "vmuleuh", int_ppc_altivec_vmuleuh>;
263 def VMULOSB : VX1_Int<264, "vmulosb", int_ppc_altivec_vmulosb>;
264 def VMULOSH : VX1_Int<328, "vmulosh", int_ppc_altivec_vmulosh>;
265 def VMULOUB : VX1_Int< 8, "vmuloub", int_ppc_altivec_vmuloub>;
266 def VMULOUH : VX1_Int< 72, "vmulouh", int_ppc_altivec_vmulouh>;
268 def VREFP : VX2_Int<266, "vrefp", int_ppc_altivec_vrefp>;
269 def VRFIM : VX2_Int<714, "vrfim", int_ppc_altivec_vrfim>;
270 def VRFIN : VX2_Int<522, "vrfin", int_ppc_altivec_vrfin>;
271 def VRFIP : VX2_Int<650, "vrfip", int_ppc_altivec_vrfip>;
272 def VRFIZ : VX2_Int<586, "vrfiz", int_ppc_altivec_vrfiz>;
273 def VRSQRTEFP : VX2_Int<330, "vrsqrtefp", int_ppc_altivec_vrsqrtefp>;
275 def VSUBCUW : VX1_Int<74, "vsubcuw", int_ppc_altivec_vsubcuw>;
277 def VSUBFP : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
278 "vsubfp $vD, $vA, $vB", VecGeneral,
279 [(set VRRC:$vD, (fsub VRRC:$vA, VRRC:$vB))]>;
280 def VSUBUBM : VXForm_1<1024, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
281 "vsububm $vD, $vA, $vB", VecGeneral,
282 [(set VRRC:$vD, (sub (v16i8 VRRC:$vA), VRRC:$vB))]>;
283 def VSUBUHM : VXForm_1<1088, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
284 "vsubuhm $vD, $vA, $vB", VecGeneral,
285 [(set VRRC:$vD, (sub (v8i16 VRRC:$vA), VRRC:$vB))]>;
286 def VSUBUWM : VXForm_1<1152, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
287 "vsubuwm $vD, $vA, $vB", VecGeneral,
288 [(set VRRC:$vD, (sub (v4i32 VRRC:$vA), VRRC:$vB))]>;
290 def VSUBSBS : VX1_Int<1792, "vsubsbs" , int_ppc_altivec_vsubsbs>;
291 def VSUBSHS : VX1_Int<1856, "vsubshs" , int_ppc_altivec_vsubshs>;
292 def VSUBSWS : VX1_Int<1920, "vsubsws" , int_ppc_altivec_vsubsws>;
293 def VSUBUBS : VX1_Int<1536, "vsububs" , int_ppc_altivec_vsububs>;
294 def VSUBUHS : VX1_Int<1600, "vsubuhs" , int_ppc_altivec_vsubuhs>;
295 def VSUBUWS : VX1_Int<1664, "vsubuws" , int_ppc_altivec_vsubuws>;
296 def VSUMSWS : VX1_Int<1928, "vsumsws" , int_ppc_altivec_vsumsws>;
297 def VSUM2SWS: VX1_Int<1672, "vsum2sws", int_ppc_altivec_vsum2sws>;
298 def VSUM4SBS: VX1_Int<1672, "vsum4sbs", int_ppc_altivec_vsum4sbs>;
299 def VSUM4SHS: VX1_Int<1608, "vsum4shs", int_ppc_altivec_vsum4shs>;
300 def VSUM4UBS: VX1_Int<1544, "vsum4ubs", int_ppc_altivec_vsum4ubs>;
302 def VNOR : VXForm_1<1284, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
303 "vnor $vD, $vA, $vB", VecFP,
304 [(set VRRC:$vD, (vnot (or (v4i32 VRRC:$vA), VRRC:$vB)))]>;
305 def VOR : VXForm_1<1156, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
306 "vor $vD, $vA, $vB", VecFP,
307 [(set VRRC:$vD, (or (v4i32 VRRC:$vA), VRRC:$vB))]>;
308 def VXOR : VXForm_1<1220, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
309 "vxor $vD, $vA, $vB", VecFP,
310 [(set VRRC:$vD, (xor (v4i32 VRRC:$vA), VRRC:$vB))]>;
312 def VRLB : VX1_Int< 4, "vrlb", int_ppc_altivec_vrlb>;
313 def VRLH : VX1_Int< 68, "vrlh", int_ppc_altivec_vrlh>;
314 def VRLW : VX1_Int< 132, "vrlw", int_ppc_altivec_vrlw>;
316 def VSL : VX1_Int< 452, "vsl" , int_ppc_altivec_vsl >;
317 def VSLO : VX1_Int<1036, "vslo", int_ppc_altivec_vslo>;
318 def VSLB : VX1_Int< 260, "vslb", int_ppc_altivec_vslb>;
319 def VSLH : VX1_Int< 324, "vslh", int_ppc_altivec_vslh>;
320 def VSLW : VX1_Int< 388, "vslw", int_ppc_altivec_vslw>;
322 def VSPLTB : VXForm_1<524, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
323 "vspltb $vD, $vB, $UIMM", VecPerm,
324 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef),
325 VSPLTB_shuffle_mask:$UIMM))]>;
326 def VSPLTH : VXForm_1<588, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
327 "vsplth $vD, $vB, $UIMM", VecPerm,
328 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef),
329 VSPLTH_shuffle_mask:$UIMM))]>;
330 def VSPLTW : VXForm_1<652, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
331 "vspltw $vD, $vB, $UIMM", VecPerm,
332 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef),
333 VSPLTW_shuffle_mask:$UIMM))]>;
335 def VSR : VX1_Int< 708, "vsr" , int_ppc_altivec_vsr>;
336 def VSRO : VX1_Int<1100, "vsro" , int_ppc_altivec_vsro>;
337 def VSRAB : VX1_Int< 772, "vsrab", int_ppc_altivec_vsrab>;
338 def VSRAH : VX1_Int< 836, "vsrah", int_ppc_altivec_vsrah>;
339 def VSRAW : VX1_Int< 900, "vsraw", int_ppc_altivec_vsraw>;
340 def VSRB : VX1_Int< 516, "vsrb" , int_ppc_altivec_vsrb>;
341 def VSRH : VX1_Int< 580, "vsrh" , int_ppc_altivec_vsrh>;
342 def VSRW : VX1_Int< 644, "vsrw" , int_ppc_altivec_vsrw>;
345 def VSPLTISB : VXForm_3<780, (ops VRRC:$vD, s5imm:$SIMM),
346 "vspltisb $vD, $SIMM", VecPerm,
347 [(set VRRC:$vD, (v4f32 vecspltisb:$SIMM))]>;
348 def VSPLTISH : VXForm_3<844, (ops VRRC:$vD, s5imm:$SIMM),
349 "vspltish $vD, $SIMM", VecPerm,
350 [(set VRRC:$vD, (v4f32 vecspltish:$SIMM))]>;
351 def VSPLTISW : VXForm_3<908, (ops VRRC:$vD, s5imm:$SIMM),
352 "vspltisw $vD, $SIMM", VecPerm,
353 [(set VRRC:$vD, (v4f32 vecspltisw:$SIMM))]>;
356 def VPKPX : VX1_Int<782, "vpkpx", int_ppc_altivec_vpkpx>;
357 def VPKSHSS : VX1_Int<398, "vpkshss", int_ppc_altivec_vpkshss>;
358 def VPKSHUS : VX1_Int<270, "vpkshus", int_ppc_altivec_vpkshus>;
359 def VPKSWSS : VX1_Int<462, "vpkswss", int_ppc_altivec_vpkswss>;
360 def VPKSWUS : VX1_Int<334, "vpkswus", int_ppc_altivec_vpkswus>;
361 def VPKUHUM : VXForm_1<14, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
362 "vpkuhum $vD, $vA, $vB", VecFP,
364 def VPKUHUS : VX1_Int<142, "vpkuhus", int_ppc_altivec_vpkuhus>;
365 def VPKUWUM : VXForm_1<78, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
366 "vpkuwum $vD, $vA, $vB", VecFP,
368 def VPKUWUS : VX1_Int<206, "vpkuwus", int_ppc_altivec_vpkuwus>;
371 def VUPKHPX : VX2_Int<846, "vupkhpx", int_ppc_altivec_vupkhpx>;
372 def VUPKHSB : VX2_Int<526, "vupkhsb", int_ppc_altivec_vupkhsb>;
373 def VUPKHSH : VX2_Int<590, "vupkhsh", int_ppc_altivec_vupkhsh>;
374 def VUPKLPX : VX2_Int<974, "vupklpx", int_ppc_altivec_vupklpx>;
375 def VUPKLSB : VX2_Int<654, "vupklsb", int_ppc_altivec_vupklsb>;
376 def VUPKLSH : VX2_Int<718, "vupklsh", int_ppc_altivec_vupklsh>;
379 // Altivec Comparisons.
381 class VCMP<bits<10> xo, string asmstr, ValueType Ty>
382 : VXRForm_1<xo, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), asmstr, VecFPCompare,
383 [(set VRRC:$vD, (Ty (PPCvcmp VRRC:$vA, VRRC:$vB, xo)))]>;
384 class VCMPo<bits<10> xo, string asmstr, ValueType Ty>
385 : VXRForm_1<xo, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), asmstr, VecFPCompare,
386 [(set VRRC:$vD, (Ty (PPCvcmp_o VRRC:$vA, VRRC:$vB, xo)))]> {
391 // f32 element comparisons.0
392 def VCMPBFP : VCMP <966, "vcmpbfp $vD, $vA, $vB" , v4f32>;
393 def VCMPBFPo : VCMPo<966, "vcmpbfp. $vD, $vA, $vB" , v4f32>;
394 def VCMPEQFP : VCMP <198, "vcmpeqfp $vD, $vA, $vB" , v4f32>;
395 def VCMPEQFPo : VCMPo<198, "vcmpeqfp. $vD, $vA, $vB", v4f32>;
396 def VCMPGEFP : VCMP <454, "vcmpgefp $vD, $vA, $vB" , v4f32>;
397 def VCMPGEFPo : VCMPo<454, "vcmpgefp. $vD, $vA, $vB", v4f32>;
398 def VCMPGTFP : VCMP <710, "vcmpgtfp $vD, $vA, $vB" , v4f32>;
399 def VCMPGTFPo : VCMPo<710, "vcmpgtfp. $vD, $vA, $vB", v4f32>;
401 // i8 element comparisons.
402 def VCMPEQUB : VCMP < 6, "vcmpequb $vD, $vA, $vB" , v16i8>;
403 def VCMPEQUBo : VCMPo< 6, "vcmpequb. $vD, $vA, $vB", v16i8>;
404 def VCMPGTSB : VCMP <774, "vcmpgtsb $vD, $vA, $vB" , v16i8>;
405 def VCMPGTSBo : VCMPo<774, "vcmpgtsb. $vD, $vA, $vB", v16i8>;
406 def VCMPGTUB : VCMP <518, "vcmpgtub $vD, $vA, $vB" , v16i8>;
407 def VCMPGTUBo : VCMPo<518, "vcmpgtub. $vD, $vA, $vB", v16i8>;
409 // i16 element comparisons.
410 def VCMPEQUH : VCMP < 70, "vcmpequh $vD, $vA, $vB" , v8i16>;
411 def VCMPEQUHo : VCMPo< 70, "vcmpequh. $vD, $vA, $vB", v8i16>;
412 def VCMPGTSH : VCMP <838, "vcmpgtsh $vD, $vA, $vB" , v8i16>;
413 def VCMPGTSHo : VCMPo<838, "vcmpgtsh. $vD, $vA, $vB", v8i16>;
414 def VCMPGTUH : VCMP <582, "vcmpgtuh $vD, $vA, $vB" , v8i16>;
415 def VCMPGTUHo : VCMPo<582, "vcmpgtuh. $vD, $vA, $vB", v8i16>;
417 // i32 element comparisons.
418 def VCMPEQUW : VCMP <134, "vcmpequw $vD, $vA, $vB" , v4i32>;
419 def VCMPEQUWo : VCMPo<134, "vcmpequw. $vD, $vA, $vB", v4i32>;
420 def VCMPGTSW : VCMP <902, "vcmpgtsw $vD, $vA, $vB" , v4i32>;
421 def VCMPGTSWo : VCMPo<902, "vcmpgtsw. $vD, $vA, $vB", v4i32>;
422 def VCMPGTUW : VCMP <646, "vcmpgtuw $vD, $vA, $vB" , v4i32>;
423 def VCMPGTUWo : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>;
425 def V_SET0 : VXForm_setzero<1220, (ops VRRC:$vD),
426 "vxor $vD, $vD, $vD", VecFP,
427 [(set VRRC:$vD, (v4f32 immAllZerosV))]>;
430 //===----------------------------------------------------------------------===//
431 // Additional Altivec Patterns
435 def : Pat<(v16i8 (undef)), (v16i8 (IMPLICIT_DEF_VRRC))>;
436 def : Pat<(v8i16 (undef)), (v8i16 (IMPLICIT_DEF_VRRC))>;
437 def : Pat<(v4i32 (undef)), (v4i32 (IMPLICIT_DEF_VRRC))>;
438 def : Pat<(v16i8 immAllZerosV), (v16i8 (V_SET0))>;
439 def : Pat<(v8i16 immAllZerosV), (v8i16 (V_SET0))>;
440 def : Pat<(v4i32 immAllZerosV), (v4i32 (V_SET0))>;
443 def : Pat<(v16i8 (load xoaddr:$src)), (v16i8 (LVX xoaddr:$src))>;
444 def : Pat<(v8i16 (load xoaddr:$src)), (v8i16 (LVX xoaddr:$src))>;
445 def : Pat<(v4i32 (load xoaddr:$src)), (v4i32 (LVX xoaddr:$src))>;
446 def : Pat<(v4f32 (load xoaddr:$src)), (v4f32 (LVX xoaddr:$src))>;
449 def : Pat<(store (v16i8 VRRC:$rS), xoaddr:$dst),
450 (STVX (v16i8 VRRC:$rS), xoaddr:$dst)>;
451 def : Pat<(store (v8i16 VRRC:$rS), xoaddr:$dst),
452 (STVX (v8i16 VRRC:$rS), xoaddr:$dst)>;
453 def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst),
454 (STVX (v4i32 VRRC:$rS), xoaddr:$dst)>;
455 def : Pat<(store (v4f32 VRRC:$rS), xoaddr:$dst),
456 (STVX (v4f32 VRRC:$rS), xoaddr:$dst)>;
459 def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
460 def : Pat<(v16i8 (bitconvert (v4i32 VRRC:$src))), (v16i8 VRRC:$src)>;
461 def : Pat<(v16i8 (bitconvert (v4f32 VRRC:$src))), (v16i8 VRRC:$src)>;
463 def : Pat<(v8i16 (bitconvert (v16i8 VRRC:$src))), (v8i16 VRRC:$src)>;
464 def : Pat<(v8i16 (bitconvert (v4i32 VRRC:$src))), (v8i16 VRRC:$src)>;
465 def : Pat<(v8i16 (bitconvert (v4f32 VRRC:$src))), (v8i16 VRRC:$src)>;
467 def : Pat<(v4i32 (bitconvert (v16i8 VRRC:$src))), (v4i32 VRRC:$src)>;
468 def : Pat<(v4i32 (bitconvert (v8i16 VRRC:$src))), (v4i32 VRRC:$src)>;
469 def : Pat<(v4i32 (bitconvert (v4f32 VRRC:$src))), (v4i32 VRRC:$src)>;
471 def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>;
472 def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>;
473 def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
475 // Immediate vector formation with vsplti*.
476 def : Pat<(v16i8 vecspltisb:$invec), (v16i8 (VSPLTISB vecspltisb:$invec))>;
477 def : Pat<(v16i8 vecspltish:$invec), (v16i8 (VSPLTISH vecspltish:$invec))>;
478 def : Pat<(v16i8 vecspltisw:$invec), (v16i8 (VSPLTISW vecspltisw:$invec))>;
480 def : Pat<(v8i16 vecspltisb:$invec), (v8i16 (VSPLTISB vecspltisb:$invec))>;
481 def : Pat<(v8i16 vecspltish:$invec), (v8i16 (VSPLTISH vecspltish:$invec))>;
482 def : Pat<(v8i16 vecspltisw:$invec), (v8i16 (VSPLTISW vecspltisw:$invec))>;
484 def : Pat<(v4i32 vecspltisb:$invec), (v4i32 (VSPLTISB vecspltisb:$invec))>;
485 def : Pat<(v4i32 vecspltish:$invec), (v4i32 (VSPLTISH vecspltish:$invec))>;
486 def : Pat<(v4i32 vecspltisw:$invec), (v4i32 (VSPLTISW vecspltisw:$invec))>;
488 // Logical Operations
489 def : Pat<(v16i8 (vnot VRRC:$vA)), (v16i8 (VNOR VRRC:$vA, VRRC:$vA))>;
490 def : Pat<(v8i16 (vnot VRRC:$vA)), (v8i16 (VNOR VRRC:$vA, VRRC:$vA))>;
491 def : Pat<(v4i32 (vnot VRRC:$vA)), (v4i32 (VNOR VRRC:$vA, VRRC:$vA))>;
493 def : Pat<(v16i8 (and VRRC:$A, VRRC:$B)), (v16i8 (VAND VRRC:$A, VRRC:$B))>;
494 def : Pat<(v8i16 (and VRRC:$A, VRRC:$B)), (v8i16 (VAND VRRC:$A, VRRC:$B))>;
495 def : Pat<(v16i8 (or VRRC:$A, VRRC:$B)), (v16i8 (VOR VRRC:$A, VRRC:$B))>;
496 def : Pat<(v8i16 (or VRRC:$A, VRRC:$B)), (v8i16 (VOR VRRC:$A, VRRC:$B))>;
497 def : Pat<(v16i8 (xor VRRC:$A, VRRC:$B)), (v16i8 (VXOR VRRC:$A, VRRC:$B))>;
498 def : Pat<(v8i16 (xor VRRC:$A, VRRC:$B)), (v8i16 (VXOR VRRC:$A, VRRC:$B))>;
499 def : Pat<(v16i8 (vnot (or VRRC:$A, VRRC:$B))),(v16i8 (VNOR VRRC:$A, VRRC:$B))>;
500 def : Pat<(v8i16 (vnot (or VRRC:$A, VRRC:$B))),(v8i16 (VNOR VRRC:$A, VRRC:$B))>;
501 def : Pat<(v16i8 (and VRRC:$A, (vnot VRRC:$B))),
502 (v16i8 (VANDC VRRC:$A, VRRC:$B))>;
503 def : Pat<(v8i16 (and VRRC:$A, (vnot VRRC:$B))),
504 (v8i16 (VANDC VRRC:$A, VRRC:$B))>;
506 def : Pat<(fmul VRRC:$vA, VRRC:$vB),
507 (VMADDFP VRRC:$vA, VRRC:$vB, (V_SET0))>;
509 // Fused multiply add and multiply sub for packed float. These are represented
510 // separately from the real instructions above, for operations that must have
511 // the additional precision, such as Newton-Rhapson (used by divide, sqrt)
512 def : Pat<(PPCvmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
513 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
514 def : Pat<(PPCvnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
515 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
517 def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
518 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
519 def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
520 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
522 def : Pat<(PPCvperm (v16i8 VRRC:$vA), VRRC:$vB, VRRC:$vC),
523 (v16i8 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>;