1 //===- SystemZInstrInfo.td - SystemZ Instruction defs ---------*- tblgen-*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the SystemZ instructions in TableGen format.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // SystemZ Instruction Predicate Definitions.
16 def IsZ10 : Predicate<"Subtarget.isZ10()">;
18 include "SystemZInstrFormats.td"
20 //===----------------------------------------------------------------------===//
22 //===----------------------------------------------------------------------===//
23 class SDTCisI8<int OpNum> : SDTCisVT<OpNum, i8>;
24 class SDTCisI16<int OpNum> : SDTCisVT<OpNum, i16>;
25 class SDTCisI32<int OpNum> : SDTCisVT<OpNum, i32>;
26 class SDTCisI64<int OpNum> : SDTCisVT<OpNum, i64>;
28 //===----------------------------------------------------------------------===//
30 //===----------------------------------------------------------------------===//
31 def SDT_SystemZCall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
32 def SDT_SystemZCallSeqStart : SDCallSeqStart<[SDTCisI64<0>]>;
33 def SDT_SystemZCallSeqEnd : SDCallSeqEnd<[SDTCisI64<0>, SDTCisI64<1>]>;
34 def SDT_CmpTest : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
35 def SDT_BrCond : SDTypeProfile<0, 2,
36 [SDTCisVT<0, OtherVT>,
38 def SDT_SelectCC : SDTypeProfile<1, 3,
39 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
41 def SDT_Address : SDTypeProfile<1, 1,
42 [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
44 //===----------------------------------------------------------------------===//
45 // SystemZ Specific Node Definitions.
46 //===----------------------------------------------------------------------===//
47 def SystemZretflag : SDNode<"SystemZISD::RET_FLAG", SDTNone,
48 [SDNPHasChain, SDNPOptInFlag]>;
49 def SystemZcall : SDNode<"SystemZISD::CALL", SDT_SystemZCall,
50 [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>;
51 def SystemZcallseq_start :
52 SDNode<"ISD::CALLSEQ_START", SDT_SystemZCallSeqStart,
53 [SDNPHasChain, SDNPOutFlag]>;
54 def SystemZcallseq_end :
55 SDNode<"ISD::CALLSEQ_END", SDT_SystemZCallSeqEnd,
56 [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
57 def SystemZcmp : SDNode<"SystemZISD::CMP", SDT_CmpTest, [SDNPOutFlag]>;
58 def SystemZucmp : SDNode<"SystemZISD::UCMP", SDT_CmpTest, [SDNPOutFlag]>;
59 def SystemZbrcond : SDNode<"SystemZISD::BRCOND", SDT_BrCond,
60 [SDNPHasChain, SDNPInFlag]>;
61 def SystemZselect : SDNode<"SystemZISD::SELECT", SDT_SelectCC, [SDNPInFlag]>;
62 def SystemZpcrelwrapper : SDNode<"SystemZISD::PCRelativeWrapper", SDT_Address, []>;
65 include "SystemZOperands.td"
67 //===----------------------------------------------------------------------===//
70 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt),
72 [(SystemZcallseq_start timm:$amt)]>;
73 def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
75 [(SystemZcallseq_end timm:$amt1, timm:$amt2)]>;
77 let usesCustomDAGSchedInserter = 1 in {
78 def Select32 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cc),
81 (SystemZselect GR32:$src1, GR32:$src2, imm:$cc))]>;
82 def Select64 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$cc),
85 (SystemZselect GR64:$src1, GR64:$src2, imm:$cc))]>;
89 //===----------------------------------------------------------------------===//
90 // Control Flow Instructions...
93 // FIXME: Provide proper encoding!
94 let isReturn = 1, isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in {
95 def RET : Pseudo<(outs), (ins), "br\t%r14", [(SystemZretflag)]>;
98 let isBranch = 1, isTerminator = 1 in {
100 def JMP : Pseudo<(outs), (ins brtarget:$dst), "j\t{$dst}", [(br bb:$dst)]>;
102 let Uses = [PSW] in {
103 def JE : Pseudo<(outs), (ins brtarget:$dst),
105 [(SystemZbrcond bb:$dst, SYSTEMZ_COND_E)]>;
106 def JNE : Pseudo<(outs), (ins brtarget:$dst),
108 [(SystemZbrcond bb:$dst, SYSTEMZ_COND_NE)]>;
109 def JH : Pseudo<(outs), (ins brtarget:$dst),
111 [(SystemZbrcond bb:$dst, SYSTEMZ_COND_H)]>;
112 def JL : Pseudo<(outs), (ins brtarget:$dst),
114 [(SystemZbrcond bb:$dst, SYSTEMZ_COND_L)]>;
115 def JHE : Pseudo<(outs), (ins brtarget:$dst),
117 [(SystemZbrcond bb:$dst, SYSTEMZ_COND_HE)]>;
118 def JLE : Pseudo<(outs), (ins brtarget:$dst),
120 [(SystemZbrcond bb:$dst, SYSTEMZ_COND_LE)]>;
125 //===----------------------------------------------------------------------===//
126 // Call Instructions...
130 // All calls clobber the non-callee saved registers (except R14 which we
131 // handle separately). Uses for argument registers are added manually.
132 let Defs = [R0D, R1D, R2D, R3D, R4D, R5D] in {
133 def CALLi : Pseudo<(outs), (ins i64imm:$dst, variable_ops),
134 "brasl\t%r14, $dst", [(SystemZcall imm:$dst)]>;
135 def CALLr : Pseudo<(outs), (ins ADDR64:$dst, variable_ops),
136 "brasl\t%r14, $dst", [(SystemZcall ADDR64:$dst)]>;
139 //===----------------------------------------------------------------------===//
140 // Miscellaneous Instructions.
143 let isReMaterializable = 1 in
144 // FIXME: Provide imm12 variant
145 // FIXME: Address should be halfword aligned...
146 def LA64r : Pseudo<(outs GR64:$dst), (ins laaddr:$src),
148 [(set GR64:$dst, laaddr:$src)]>;
149 def LA64rm : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
150 "larl\t{$dst, $src}",
152 (SystemZpcrelwrapper tglobaladdr:$src))]>;
154 let neverHasSideEffects = 1 in
155 def NOP : Pseudo<(outs), (ins), "# no-op", []>;
157 //===----------------------------------------------------------------------===//
160 // FIXME: Provide proper encoding!
161 let neverHasSideEffects = 1 in {
162 def MOV32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src),
165 def MOV64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src),
168 def MOV128rr : Pseudo<(outs GR128:$dst), (ins GR128:$src),
170 "\tlgr\t${dst:subreg_odd}, ${src:subreg_odd}\n"
171 "\tlgr\t${dst:subreg_even}, ${src:subreg_even}",
173 def MOV64rrP : Pseudo<(outs GR64P:$dst), (ins GR64P:$src),
175 "\tlr\t${dst:subreg_odd}, ${src:subreg_odd}\n"
176 "\tlr\t${dst:subreg_even}, ${src:subreg_even}",
180 def MOVSX64rr32 : Pseudo<(outs GR64:$dst), (ins GR32:$src),
181 "lgfr\t{$dst, $src}",
182 [(set GR64:$dst, (sext GR32:$src))]>;
183 def MOVZX64rr32 : Pseudo<(outs GR64:$dst), (ins GR32:$src),
184 "llgfr\t{$dst, $src}",
185 [(set GR64:$dst, (zext GR32:$src))]>;
187 // FIXME: Provide proper encoding!
188 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
189 def MOV32ri16 : Pseudo<(outs GR32:$dst), (ins s16imm:$src),
191 [(set GR32:$dst, immSExt16:$src)]>;
192 def MOV64ri16 : Pseudo<(outs GR64:$dst), (ins s16imm64:$src),
193 "lghi\t{$dst, $src}",
194 [(set GR64:$dst, immSExt16:$src)]>;
196 def MOV64rill16 : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
197 "llill\t{$dst, $src}",
198 [(set GR64:$dst, i64ll16:$src)]>;
199 def MOV64rilh16 : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
200 "llilh\t{$dst, $src}",
201 [(set GR64:$dst, i64lh16:$src)]>;
202 def MOV64rihl16 : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
203 "llihl\t{$dst, $src}",
204 [(set GR64:$dst, i64hl16:$src)]>;
205 def MOV64rihh16 : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
206 "llihh\t{$dst, $src}",
207 [(set GR64:$dst, i64hh16:$src)]>;
209 def MOV64ri32 : Pseudo<(outs GR64:$dst), (ins s32imm64:$src),
210 "lgfi\t{$dst, $src}",
211 [(set GR64:$dst, immSExt32:$src)]>;
212 def MOV64rilo32 : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
213 "llilf\t{$dst, $src}",
214 [(set GR64:$dst, i64lo32:$src)]>;
215 def MOV64rihi32 : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
216 "llihf\t{$dst, $src}",
217 [(set GR64:$dst, i64hi32:$src)]>;
220 let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in {
221 def MOV32rm : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
223 [(set GR32:$dst, (load rriaddr:$src))]>;
224 def MOV64rm : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
226 [(set GR64:$dst, (load rriaddr:$src))]>;
230 def MOV32mr : Pseudo<(outs), (ins rriaddr:$dst, GR32:$src),
232 [(store GR32:$src, rriaddr:$dst)]>;
233 def MOV64mr : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
235 [(store GR64:$src, rriaddr:$dst)]>;
237 // FIXME: displacements here are really 12 bit, not 20!
238 def MOV8mi : Pseudo<(outs), (ins riaddr:$dst, i32i8imm:$src),
239 "mviy\t{$dst, $src}",
240 [(truncstorei8 (i32 i32immSExt8:$src), riaddr:$dst)]>;
242 def MOV16mi : Pseudo<(outs), (ins riaddr:$dst, s16imm:$src),
243 "mvhhi\t{$dst, $src}",
244 [(truncstorei16 (i32 i32immSExt16:$src), riaddr:$dst)]>,
246 def MOV32mi16 : Pseudo<(outs), (ins riaddr:$dst, s32imm:$src),
247 "mvhi\t{$dst, $src}",
248 [(store (i32 immSExt16:$src), riaddr:$dst)]>,
250 def MOV64mi16 : Pseudo<(outs), (ins riaddr:$dst, s32imm64:$src),
251 "mvghi\t{$dst, $src}",
252 [(store (i64 immSExt16:$src), riaddr:$dst)]>,
256 def MOVSX32rr8 : Pseudo<(outs GR32:$dst), (ins GR32:$src),
258 [(set GR32:$dst, (sext_inreg GR32:$src, i8))]>;
259 def MOVSX64rr8 : Pseudo<(outs GR64:$dst), (ins GR64:$src),
260 "lgbr\t{$dst, $src}",
261 [(set GR64:$dst, (sext_inreg GR64:$src, i8))]>;
262 def MOVSX32rr16 : Pseudo<(outs GR32:$dst), (ins GR32:$src),
264 [(set GR32:$dst, (sext_inreg GR32:$src, i16))]>;
265 def MOVSX64rr16 : Pseudo<(outs GR64:$dst), (ins GR64:$src),
266 "lghr\t{$dst, $src}",
267 [(set GR64:$dst, (sext_inreg GR64:$src, i16))]>;
270 def MOVSX32rm8 : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
272 [(set GR32:$dst, (sextloadi32i8 rriaddr:$src))]>;
273 def MOVSX32rm16 : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
275 [(set GR32:$dst, (sextloadi32i16 rriaddr:$src))]>;
276 def MOVSX64rm8 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
278 [(set GR64:$dst, (sextloadi64i8 rriaddr:$src))]>;
279 def MOVSX64rm16 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
281 [(set GR64:$dst, (sextloadi64i16 rriaddr:$src))]>;
282 def MOVSX64rm32 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
284 [(set GR64:$dst, (sextloadi64i32 rriaddr:$src))]>;
286 def MOVZX32rm8 : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
288 [(set GR32:$dst, (zextloadi32i8 rriaddr:$src))]>;
289 def MOVZX32rm16 : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
291 [(set GR32:$dst, (zextloadi32i16 rriaddr:$src))]>;
292 def MOVZX64rm8 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
293 "llgc\t{$dst, $src}",
294 [(set GR64:$dst, (zextloadi64i8 rriaddr:$src))]>;
295 def MOVZX64rm16 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
296 "llgh\t{$dst, $src}",
297 [(set GR64:$dst, (zextloadi64i16 rriaddr:$src))]>;
298 def MOVZX64rm32 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
299 "llgf\t{$dst, $src}",
300 [(set GR64:$dst, (zextloadi64i32 rriaddr:$src))]>;
303 // FIXME: Implement 12-bit displacement stuff someday
304 def MOV32m8r : Pseudo<(outs), (ins rriaddr:$dst, GR32:$src),
305 "stcy\t{$src, $dst}",
306 [(truncstorei8 GR32:$src, rriaddr:$dst)]>;
308 def MOV32m16r : Pseudo<(outs), (ins rriaddr:$dst, GR32:$src),
309 "sthy\t{$src, $dst}",
310 [(truncstorei16 GR32:$src, rriaddr:$dst)]>;
312 def MOV64m8r : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
313 "stcy\t{$src, $dst}",
314 [(truncstorei8 GR64:$src, rriaddr:$dst)]>;
316 def MOV64m16r : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
317 "sthy\t{$src, $dst}",
318 [(truncstorei16 GR64:$src, rriaddr:$dst)]>;
320 def MOV64m32r : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
322 [(truncstorei32 GR64:$src, rriaddr:$dst)]>;
324 // multiple regs moves
325 // FIXME: should we use multiple arg nodes?
326 def MOV32mrm : Pseudo<(outs), (ins riaddr:$dst, GR32:$from, GR32:$to),
327 "stmy\t{$from, $to, $dst}",
329 def MOV64mrm : Pseudo<(outs), (ins riaddr:$dst, GR64:$from, GR64:$to),
330 "stmg\t{$from, $to, $dst}",
332 def MOV32rmm : Pseudo<(outs GR32:$from, GR32:$to), (ins riaddr:$dst),
333 "lmy\t{$from, $to, $dst}",
335 def MOV64rmm : Pseudo<(outs GR64:$from, GR64:$to), (ins riaddr:$dst),
336 "lmg\t{$from, $to, $dst}",
340 //===----------------------------------------------------------------------===//
341 // Arithmetic Instructions
343 let isTwoAddress = 1 in {
345 let Defs = [PSW] in {
347 let isCommutable = 1 in { // X = ADD Y, Z == X = ADD Z, Y
348 // FIXME: Provide proper encoding!
349 def ADD32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
351 [(set GR32:$dst, (add GR32:$src1, GR32:$src2)),
353 def ADD64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
354 "agr\t{$dst, $src2}",
355 [(set GR64:$dst, (add GR64:$src1, GR64:$src2)),
359 // FIXME: Provide proper encoding!
360 def ADD32ri16 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, s16imm:$src2),
361 "ahi\t{$dst, $src2}",
362 [(set GR32:$dst, (add GR32:$src1, immSExt16:$src2)),
364 def ADD32ri : Pseudo<(outs GR32:$dst), (ins GR32:$src1, s32imm:$src2),
365 "afi\t{$dst, $src2}",
366 [(set GR32:$dst, (add GR32:$src1, imm:$src2)),
368 def ADD64ri16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, s16imm64:$src2),
369 "aghi\t{$dst, $src2}",
370 [(set GR64:$dst, (add GR64:$src1, immSExt16:$src2)),
372 def ADD64ri32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, s32imm64:$src2),
373 "agfi\t{$dst, $src2}",
374 [(set GR64:$dst, (add GR64:$src1, immSExt32:$src2)),
377 let isCommutable = 1 in { // X = AND Y, Z == X = AND Z, Y
378 // FIXME: Provide proper encoding!
379 def AND32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
381 [(set GR32:$dst, (and GR32:$src1, GR32:$src2))]>;
382 def AND64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
383 "ngr\t{$dst, $src2}",
384 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
387 // FIXME: Provide proper encoding!
388 // FIXME: Compute masked bits properly!
389 def AND32rill16 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
390 "nill\t{$dst, $src2}",
391 [(set GR32:$dst, (and GR32:$src1, i32ll16c:$src2))]>;
392 def AND64rill16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
393 "nill\t{$dst, $src2}",
394 [(set GR64:$dst, (and GR64:$src1, i64ll16c:$src2))]>;
396 def AND32rilh16 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
397 "nilh\t{$dst, $src2}",
398 [(set GR32:$dst, (and GR32:$src1, i32lh16c:$src2))]>;
399 def AND64rilh16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
400 "nilh\t{$dst, $src2}",
401 [(set GR64:$dst, (and GR64:$src1, i64lh16c:$src2))]>;
403 def AND64rihl16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
404 "nihl\t{$dst, $src2}",
405 [(set GR64:$dst, (and GR64:$src1, i64hl16c:$src2))]>;
406 def AND64rihh16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
407 "nihh\t{$dst, $src2}",
408 [(set GR64:$dst, (and GR64:$src1, i64hh16c:$src2))]>;
410 def AND32ri : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
411 "nilf\t{$dst, $src2}",
412 [(set GR32:$dst, (and GR32:$src1, imm:$src2))]>;
413 def AND64rilo32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
414 "nilf\t{$dst, $src2}",
415 [(set GR64:$dst, (and GR64:$src1, i64lo32c:$src2))]>;
416 def AND64rihi32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
417 "nihf\t{$dst, $src2}",
418 [(set GR64:$dst, (and GR64:$src1, i64hi32c:$src2))]>;
420 let isCommutable = 1 in { // X = OR Y, Z == X = OR Z, Y
421 // FIXME: Provide proper encoding!
422 def OR32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
424 [(set GR32:$dst, (or GR32:$src1, GR32:$src2))]>;
425 def OR64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
426 "ogr\t{$dst, $src2}",
427 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
430 def OR32ri16 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
431 "oill\t{$dst, $src2}",
432 [(set GR32:$dst, (or GR32:$src1, i32ll16:$src2))]>;
433 def OR32ri16h : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
434 "oilh\t{$dst, $src2}",
435 [(set GR32:$dst, (or GR32:$src1, i32lh16:$src2))]>;
436 def OR32ri : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
437 "oilf\t{$dst, $src2}",
438 [(set GR32:$dst, (or GR32:$src1, imm:$src2))]>;
440 def OR64rill16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
441 "oill\t{$dst, $src2}",
442 [(set GR64:$dst, (or GR64:$src1, i64ll16:$src2))]>;
443 def OR64rilh16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
444 "oilh\t{$dst, $src2}",
445 [(set GR64:$dst, (or GR64:$src1, i64lh16:$src2))]>;
446 def OR64rihl16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
447 "oihl\t{$dst, $src2}",
448 [(set GR64:$dst, (or GR64:$src1, i64hl16:$src2))]>;
449 def OR64rihh16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
450 "oihh\t{$dst, $src2}",
451 [(set GR64:$dst, (or GR64:$src1, i64hh16:$src2))]>;
453 def OR64rilo32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
454 "oilf\t{$dst, $src2}",
455 [(set GR64:$dst, (or GR64:$src1, i64lo32:$src2))]>;
456 def OR64rihi32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
457 "oihf\t{$dst, $src2}",
458 [(set GR64:$dst, (or GR64:$src1, i64hi32:$src2))]>;
460 // FIXME: Provide proper encoding!
461 def SUB32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
463 [(set GR32:$dst, (sub GR32:$src1, GR32:$src2))]>;
464 def SUB64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
465 "sgr\t{$dst, $src2}",
466 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
469 let isCommutable = 1 in { // X = XOR Y, Z == X = XOR Z, Y
470 // FIXME: Provide proper encoding!
471 def XOR32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
473 [(set GR32:$dst, (xor GR32:$src1, GR32:$src2))]>;
474 def XOR64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
475 "xgr\t{$dst, $src2}",
476 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
479 def XOR32ri : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
480 "xilf\t{$dst, $src2}",
481 [(set GR32:$dst, (xor GR32:$src1, imm:$src2))]>;
485 let isCommutable = 1 in { // X = MUL Y, Z == X = MUL Z, Y
486 def MUL32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
487 "msr\t{$dst, $src2}",
488 [(set GR32:$dst, (mul GR32:$src1, GR32:$src2))]>;
489 def MUL64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
490 "msgr\t{$dst, $src2}",
491 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>;
493 def MUL64rrP : Pseudo<(outs GR64P:$dst), (ins GR64P:$src1, GR32:$src2),
496 def UMUL64rrP : Pseudo<(outs GR64P:$dst), (ins GR64P:$src1, GR32:$src2),
497 "mlr\t{$dst, $src2}",
499 def UMUL128rrP : Pseudo<(outs GR128:$dst), (ins GR128:$src1, GR64:$src2),
500 "mlgr\t{$dst, $src2}",
505 def MUL32ri16 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, s16imm:$src2),
506 "mhi\t{$dst, $src2}",
507 [(set GR32:$dst, (mul GR32:$src1, i32immSExt16:$src2))]>;
508 def MUL64ri16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, s16imm64:$src2),
509 "mghi\t{$dst, $src2}",
510 [(set GR64:$dst, (mul GR64:$src1, immSExt16:$src2))]>;
512 def MUL32ri : Pseudo<(outs GR32:$dst), (ins GR32:$src1, s32imm:$src2),
513 "msfi\t{$dst, $src2}",
514 [(set GR32:$dst, (mul GR32:$src1, imm:$src2))]>,
516 def MUL64ri32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, s32imm64:$src2),
517 "msgfi\t{$dst, $src2}",
518 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>,
521 def MUL32rm : Pseudo<(outs GR32:$dst), (ins GR32:$src1, rriaddr:$src2),
522 "msy\t{$dst, $src2}",
523 [(set GR32:$dst, (mul GR32:$src1, (load rriaddr:$src2)))]>;
524 def MUL64rm : Pseudo<(outs GR64:$dst), (ins GR64:$src1, rriaddr:$src2),
525 "msg\t{$dst, $src2}",
526 [(set GR64:$dst, (mul GR64:$src1, (load rriaddr:$src2)))]>;
528 def MULSX64rr32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR32:$src2),
529 "msgfr\t{$dst, $src2}",
530 [(set GR64:$dst, (mul GR64:$src1, (sext GR32:$src2)))]>;
532 def SDIVREM64rrP : Pseudo<(outs GR64P:$dst), (ins GR64P:$src1, GR32:$src2),
536 def SDIVREM128rrP : Pseudo<(outs GR128:$dst), (ins GR128:$src1, GR64:$src2),
537 "dsgr\t{$dst, $src2}",
540 def UDIVREM64rrP : Pseudo<(outs GR64P:$dst), (ins GR64P:$src1, GR32:$src2),
541 "dlr\t{$dst, $src2}",
544 def UDIVREM128rrP : Pseudo<(outs GR128:$dst), (ins GR128:$src1, GR64:$src2),
545 "dlgr\t{$dst, $src2}",
548 } // isTwoAddress = 1
550 //===----------------------------------------------------------------------===//
553 let isTwoAddress = 1 in
554 def SRL32rri : Pseudo<(outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
556 [(set GR32:$dst, (srl GR32:$src, riaddr32:$amt))]>;
557 def SRL64rri : Pseudo<(outs GR64:$dst), (ins GR64:$src, riaddr:$amt),
558 "srlg\t{$dst, $src, $amt}",
559 [(set GR64:$dst, (srl GR64:$src, (i32 (trunc riaddr:$amt))))]>;
560 def SRLA64ri : Pseudo<(outs GR64:$dst), (ins GR64:$src, i32imm:$amt),
561 "srlg\t{$dst, $src, $amt}",
562 [(set GR64:$dst, (srl GR64:$src, (i32 imm:$amt)))]>;
564 let isTwoAddress = 1 in
565 def SHL32rri : Pseudo<(outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
567 [(set GR32:$dst, (shl GR32:$src, riaddr32:$amt))]>;
568 def SHL64rri : Pseudo<(outs GR64:$dst), (ins GR64:$src, riaddr:$amt),
569 "sllg\t{$dst, $src, $amt}",
570 [(set GR64:$dst, (shl GR64:$src, (i32 (trunc riaddr:$amt))))]>;
571 def SHL64ri : Pseudo<(outs GR64:$dst), (ins GR64:$src, i32imm:$amt),
572 "sllg\t{$dst, $src, $amt}",
573 [(set GR64:$dst, (shl GR64:$src, (i32 imm:$amt)))]>;
576 let Defs = [PSW] in {
577 let isTwoAddress = 1 in
578 def SRA32rri : Pseudo<(outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
580 [(set GR32:$dst, (sra GR32:$src, riaddr32:$amt)),
582 def SRA64rri : Pseudo<(outs GR64:$dst), (ins GR64:$src, riaddr:$amt),
583 "srag\t{$dst, $src, $amt}",
584 [(set GR64:$dst, (sra GR64:$src, (i32 (trunc riaddr:$amt)))),
586 def SRA64ri : Pseudo<(outs GR64:$dst), (ins GR64:$src, i32imm:$amt),
587 "srag\t{$dst, $src, $amt}",
588 [(set GR64:$dst, (sra GR64:$src, (i32 imm:$amt))),
592 //===----------------------------------------------------------------------===//
593 // Test instructions (like AND but do not produce any result
595 // Integer comparisons
596 let Defs = [PSW] in {
597 def CMP32rr : Pseudo<(outs), (ins GR32:$src1, GR32:$src2),
599 [(SystemZcmp GR32:$src1, GR32:$src2), (implicit PSW)]>;
600 def CMP64rr : Pseudo<(outs), (ins GR64:$src1, GR64:$src2),
602 [(SystemZcmp GR64:$src1, GR64:$src2), (implicit PSW)]>;
604 def CMP32ri : Pseudo<(outs), (ins GR32:$src1, s32imm:$src2),
606 [(SystemZcmp GR32:$src1, imm:$src2), (implicit PSW)]>;
607 def CMP64ri32 : Pseudo<(outs), (ins GR64:$src1, s32imm64:$src2),
608 "cgfi\t$src1, $src2",
609 [(SystemZcmp GR64:$src1, i64immSExt32:$src2),
612 def CMP32rm : Pseudo<(outs), (ins GR32:$src1, rriaddr:$src2),
614 [(SystemZcmp GR32:$src1, (load rriaddr:$src2)),
616 def CMP64rm : Pseudo<(outs), (ins GR64:$src1, rriaddr:$src2),
618 [(SystemZcmp GR64:$src1, (load rriaddr:$src2)),
621 def UCMP32rr : Pseudo<(outs), (ins GR32:$src1, GR32:$src2),
623 [(SystemZucmp GR32:$src1, GR32:$src2), (implicit PSW)]>;
624 def UCMP64rr : Pseudo<(outs), (ins GR64:$src1, GR64:$src2),
625 "clgr\t$src1, $src2",
626 [(SystemZucmp GR64:$src1, GR64:$src2), (implicit PSW)]>;
628 def UCMP32ri : Pseudo<(outs), (ins GR32:$src1, i32imm:$src2),
629 "clfi\t$src1, $src2",
630 [(SystemZucmp GR32:$src1, imm:$src2), (implicit PSW)]>;
631 def UCMP64ri32 : Pseudo<(outs), (ins GR64:$src1, i64i32imm:$src2),
632 "clgfi\t$src1, $src2",
633 [(SystemZucmp GR64:$src1, i64immZExt32:$src2),
636 def UCMP32rm : Pseudo<(outs), (ins GR32:$src1, rriaddr:$src2),
638 [(SystemZucmp GR32:$src1, (load rriaddr:$src2)),
640 def UCMP64rm : Pseudo<(outs), (ins GR64:$src1, rriaddr:$src2),
642 [(SystemZucmp GR64:$src1, (load rriaddr:$src2)),
645 def CMPSX64rr32 : Pseudo<(outs), (ins GR64:$src1, GR32:$src2),
646 "cgfr\t$src1, $src2",
647 [(SystemZucmp GR64:$src1, (sext GR32:$src2)),
649 def UCMPZX64rr32 : Pseudo<(outs), (ins GR64:$src1, GR32:$src2),
650 "clgfr\t$src1, $src2",
651 [(SystemZucmp GR64:$src1, (zext GR32:$src2)),
654 def CMPSX64rm32 : Pseudo<(outs), (ins GR64:$src1, rriaddr:$src2),
656 [(SystemZucmp GR64:$src1, (sextloadi64i32 rriaddr:$src2)),
658 def UCMPZX64rm32 : Pseudo<(outs), (ins GR64:$src1, rriaddr:$src2),
659 "clgf\t$src1, $src2",
660 [(SystemZucmp GR64:$src1, (zextloadi64i32 rriaddr:$src2)),
663 // FIXME: Add other crazy ucmp forms
667 //===----------------------------------------------------------------------===//
668 // Non-Instruction Patterns.
669 //===----------------------------------------------------------------------===//
672 def : Pat<(i64 (anyext GR32:$src)),
673 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_32bit)>;
675 //===----------------------------------------------------------------------===//
677 //===----------------------------------------------------------------------===//
679 // FIXME: use add/sub tricks with 32678/-32768
682 def : Pat<(i32 (trunc GR64:$src)),
683 (EXTRACT_SUBREG GR64:$src, subreg_32bit)>;
685 // sext_inreg patterns
686 def : Pat<(sext_inreg GR64:$src, i32),
687 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, subreg_32bit))>;
690 def : Pat<(extloadi32i8 rriaddr:$src), (MOVZX32rm8 rriaddr:$src)>;
691 def : Pat<(extloadi32i16 rriaddr:$src), (MOVZX32rm16 rriaddr:$src)>;
692 def : Pat<(extloadi64i8 rriaddr:$src), (MOVZX64rm8 rriaddr:$src)>;
693 def : Pat<(extloadi64i16 rriaddr:$src), (MOVZX64rm16 rriaddr:$src)>;
694 def : Pat<(extloadi64i32 rriaddr:$src), (MOVZX64rm32 rriaddr:$src)>;
697 def : Pat<(SystemZcall (i64 tglobaladdr:$dst)),
698 (CALLi tglobaladdr:$dst)>;
699 def : Pat<(SystemZcall (i64 texternalsym:$dst)),
700 (CALLi texternalsym:$dst)>;
703 def : Pat<(mulhs GR32:$src1, GR32:$src2),
704 (EXTRACT_SUBREG (MUL64rrP (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
705 GR32:$src1, subreg_odd),
709 def : Pat<(mulhu GR32:$src1, GR32:$src2),
710 (EXTRACT_SUBREG (UMUL64rrP (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
711 GR32:$src1, subreg_odd),
714 def : Pat<(mulhu GR64:$src1, GR64:$src2),
715 (EXTRACT_SUBREG (UMUL128rrP (INSERT_SUBREG (i128 (IMPLICIT_DEF)),
716 GR64:$src1, subreg_odd),
721 // FIXME: Add memory versions
722 def : Pat<(sdiv GR32:$src1, GR32:$src2),
723 (EXTRACT_SUBREG (SDIVREM64rrP (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
724 GR32:$src1, subreg_odd),
727 def : Pat<(sdiv GR64:$src1, GR64:$src2),
728 (EXTRACT_SUBREG (SDIVREM128rrP (INSERT_SUBREG (i128 (IMPLICIT_DEF)),
729 GR64:$src1, subreg_odd),
732 def : Pat<(udiv GR32:$src1, GR32:$src2),
733 (EXTRACT_SUBREG (UDIVREM64rrP (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
734 GR32:$src1, subreg_odd),
737 def : Pat<(udiv GR64:$src1, GR64:$src2),
738 (EXTRACT_SUBREG (UDIVREM128rrP (INSERT_SUBREG (i128 (IMPLICIT_DEF)),
739 GR64:$src1, subreg_odd),
744 // FIXME: Add memory versions
745 def : Pat<(srem GR32:$src1, GR32:$src2),
746 (EXTRACT_SUBREG (SDIVREM64rrP (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
747 GR32:$src1, subreg_odd),
750 def : Pat<(srem GR64:$src1, GR64:$src2),
751 (EXTRACT_SUBREG (SDIVREM128rrP (INSERT_SUBREG (i128 (IMPLICIT_DEF)),
752 GR64:$src1, subreg_odd),
755 def : Pat<(urem GR32:$src1, GR32:$src2),
756 (EXTRACT_SUBREG (UDIVREM64rrP (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
757 GR32:$src1, subreg_odd),
760 def : Pat<(urem GR64:$src1, GR64:$src2),
761 (EXTRACT_SUBREG (UDIVREM128rrP (INSERT_SUBREG (i128 (IMPLICIT_DEF)),
762 GR64:$src1, subreg_odd),
766 def : Pat<(i32 imm:$src),
767 (EXTRACT_SUBREG (MOV64ri32 (i64 imm:$src)), subreg_32bit)>;