1 //===-- SystemZOperators.td - SystemZ-specific operators ------*- tblgen-*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 //===----------------------------------------------------------------------===//
12 //===----------------------------------------------------------------------===//
13 def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i64>]>;
14 def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i64>,
16 def SDT_ZCall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
17 def SDT_ZCmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
18 def SDT_ZBRCCMask : SDTypeProfile<0, 2,
20 SDTCisVT<1, OtherVT>]>;
21 def SDT_ZSelectCCMask : SDTypeProfile<1, 3,
25 def SDT_ZWrapPtr : SDTypeProfile<1, 1,
28 def SDT_ZAdjDynAlloc : SDTypeProfile<1, 0, [SDTCisVT<0, i64>]>;
29 def SDT_ZExtractAccess : SDTypeProfile<1, 1,
32 def SDT_ZGR128Binary32 : SDTypeProfile<1, 2,
33 [SDTCisVT<0, untyped>,
36 def SDT_ZGR128Binary64 : SDTypeProfile<1, 2,
37 [SDTCisVT<0, untyped>,
40 def SDT_ZAtomicLoadBinaryW : SDTypeProfile<1, 5,
47 def SDT_ZAtomicCmpSwapW : SDTypeProfile<1, 6,
56 //===----------------------------------------------------------------------===//
58 //===----------------------------------------------------------------------===//
60 // These are target-independent nodes, but have target-specific formats.
61 def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart,
62 [SDNPHasChain, SDNPSideEffect, SDNPOutGlue]>;
63 def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd,
64 [SDNPHasChain, SDNPSideEffect, SDNPOptInGlue,
67 // Nodes for SystemZISD::*. See SystemZISelLowering.h for more details.
68 def z_retflag : SDNode<"SystemZISD::RET_FLAG", SDTNone,
69 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
70 def z_call : SDNode<"SystemZISD::CALL", SDT_ZCall,
71 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
73 def z_pcrel_wrapper : SDNode<"SystemZISD::PCREL_WRAPPER", SDT_ZWrapPtr, []>;
74 def z_cmp : SDNode<"SystemZISD::CMP", SDT_ZCmp, [SDNPOutGlue]>;
75 def z_ucmp : SDNode<"SystemZISD::UCMP", SDT_ZCmp, [SDNPOutGlue]>;
76 def z_br_ccmask : SDNode<"SystemZISD::BR_CCMASK", SDT_ZBRCCMask,
77 [SDNPHasChain, SDNPInGlue]>;
78 def z_select_ccmask : SDNode<"SystemZISD::SELECT_CCMASK", SDT_ZSelectCCMask,
80 def z_adjdynalloc : SDNode<"SystemZISD::ADJDYNALLOC", SDT_ZAdjDynAlloc>;
81 def z_extract_access : SDNode<"SystemZISD::EXTRACT_ACCESS",
83 def z_umul_lohi64 : SDNode<"SystemZISD::UMUL_LOHI64", SDT_ZGR128Binary64>;
84 def z_sdivrem32 : SDNode<"SystemZISD::SDIVREM32", SDT_ZGR128Binary32>;
85 def z_sdivrem64 : SDNode<"SystemZISD::SDIVREM64", SDT_ZGR128Binary64>;
86 def z_udivrem32 : SDNode<"SystemZISD::UDIVREM32", SDT_ZGR128Binary32>;
87 def z_udivrem64 : SDNode<"SystemZISD::UDIVREM64", SDT_ZGR128Binary64>;
89 class AtomicWOp<string name, SDTypeProfile profile = SDT_ZAtomicLoadBinaryW>
90 : SDNode<"SystemZISD::"##name, profile,
91 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
93 def z_atomic_swapw : AtomicWOp<"ATOMIC_SWAPW">;
94 def z_atomic_loadw_add : AtomicWOp<"ATOMIC_LOADW_ADD">;
95 def z_atomic_loadw_sub : AtomicWOp<"ATOMIC_LOADW_SUB">;
96 def z_atomic_loadw_and : AtomicWOp<"ATOMIC_LOADW_AND">;
97 def z_atomic_loadw_or : AtomicWOp<"ATOMIC_LOADW_OR">;
98 def z_atomic_loadw_xor : AtomicWOp<"ATOMIC_LOADW_XOR">;
99 def z_atomic_loadw_nand : AtomicWOp<"ATOMIC_LOADW_NAND">;
100 def z_atomic_loadw_min : AtomicWOp<"ATOMIC_LOADW_MIN">;
101 def z_atomic_loadw_max : AtomicWOp<"ATOMIC_LOADW_MAX">;
102 def z_atomic_loadw_umin : AtomicWOp<"ATOMIC_LOADW_UMIN">;
103 def z_atomic_loadw_umax : AtomicWOp<"ATOMIC_LOADW_UMAX">;
104 def z_atomic_cmp_swapw : AtomicWOp<"ATOMIC_CMP_SWAPW", SDT_ZAtomicCmpSwapW>;
106 //===----------------------------------------------------------------------===//
108 //===----------------------------------------------------------------------===//
110 // Register sign-extend operations. Sub-32-bit values are represented as i32s.
111 def sext8 : PatFrag<(ops node:$src), (sext_inreg node:$src, i8)>;
112 def sext16 : PatFrag<(ops node:$src), (sext_inreg node:$src, i16)>;
113 def sext32 : PatFrag<(ops node:$src), (sext (i32 node:$src))>;
115 // Register zero-extend operations. Sub-32-bit values are represented as i32s.
116 def zext8 : PatFrag<(ops node:$src), (and node:$src, 0xff)>;
117 def zext16 : PatFrag<(ops node:$src), (and node:$src, 0xffff)>;
118 def zext32 : PatFrag<(ops node:$src), (zext (i32 node:$src))>;
120 // Typed floating-point loads.
121 def loadf32 : PatFrag<(ops node:$src), (f32 (load node:$src))>;
122 def loadf64 : PatFrag<(ops node:$src), (f64 (load node:$src))>;
124 // Extending loads in which the extension type doesn't matter.
125 def anyextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
126 return cast<LoadSDNode>(N)->getExtensionType() != ISD::NON_EXTLOAD;
128 def anyextloadi8 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
129 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
131 def anyextloadi16 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
132 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
134 def anyextloadi32 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
135 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
139 class AlignedLoad<SDPatternOperator load>
140 : PatFrag<(ops node:$addr), (load node:$addr), [{
141 LoadSDNode *Load = cast<LoadSDNode>(N);
142 return Load->getAlignment() >= Load->getMemoryVT().getStoreSize();
144 def aligned_load : AlignedLoad<load>;
145 def aligned_sextloadi16 : AlignedLoad<sextloadi16>;
146 def aligned_sextloadi32 : AlignedLoad<sextloadi32>;
147 def aligned_zextloadi16 : AlignedLoad<zextloadi16>;
148 def aligned_zextloadi32 : AlignedLoad<zextloadi32>;
151 class AlignedStore<SDPatternOperator store>
152 : PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{
153 StoreSDNode *Store = cast<StoreSDNode>(N);
154 return Store->getAlignment() >= Store->getMemoryVT().getStoreSize();
156 def aligned_store : AlignedStore<store>;
157 def aligned_truncstorei16 : AlignedStore<truncstorei16>;
158 def aligned_truncstorei32 : AlignedStore<truncstorei32>;
160 // Non-volatile loads. Used for instructions that might access the storage
161 // location multiple times.
162 class NonvolatileLoad<SDPatternOperator load>
163 : PatFrag<(ops node:$addr), (load node:$addr), [{
164 LoadSDNode *Load = cast<LoadSDNode>(N);
165 return !Load->isVolatile();
167 def nonvolatile_load : NonvolatileLoad<load>;
168 def nonvolatile_anyextloadi8 : NonvolatileLoad<anyextloadi8>;
169 def nonvolatile_anyextloadi16 : NonvolatileLoad<anyextloadi16>;
170 def nonvolatile_anyextloadi32 : NonvolatileLoad<anyextloadi32>;
172 // Non-volatile stores.
173 class NonvolatileStore<SDPatternOperator store>
174 : PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{
175 StoreSDNode *Store = cast<StoreSDNode>(N);
176 return !Store->isVolatile();
178 def nonvolatile_store : NonvolatileStore<store>;
179 def nonvolatile_truncstorei8 : NonvolatileStore<truncstorei8>;
180 def nonvolatile_truncstorei16 : NonvolatileStore<truncstorei16>;
181 def nonvolatile_truncstorei32 : NonvolatileStore<truncstorei32>;
184 def inserti8 : PatFrag<(ops node:$src1, node:$src2),
185 (or (and node:$src1, -256), node:$src2)>;
186 def insertll : PatFrag<(ops node:$src1, node:$src2),
187 (or (and node:$src1, 0xffffffffffff0000), node:$src2)>;
188 def insertlh : PatFrag<(ops node:$src1, node:$src2),
189 (or (and node:$src1, 0xffffffff0000ffff), node:$src2)>;
190 def inserthl : PatFrag<(ops node:$src1, node:$src2),
191 (or (and node:$src1, 0xffff0000ffffffff), node:$src2)>;
192 def inserthh : PatFrag<(ops node:$src1, node:$src2),
193 (or (and node:$src1, 0x0000ffffffffffff), node:$src2)>;
194 def insertlf : PatFrag<(ops node:$src1, node:$src2),
195 (or (and node:$src1, 0xffffffff00000000), node:$src2)>;
196 def inserthf : PatFrag<(ops node:$src1, node:$src2),
197 (or (and node:$src1, 0x00000000ffffffff), node:$src2)>;
199 // ORs that can be treated as insertions.
200 def or_as_inserti8 : PatFrag<(ops node:$src1, node:$src2),
201 (or node:$src1, node:$src2), [{
202 unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
203 return CurDAG->MaskedValueIsZero(N->getOperand(0),
204 APInt::getLowBitsSet(BitWidth, 8));
207 // ORs that can be treated as reversed insertions.
208 def or_as_revinserti8 : PatFrag<(ops node:$src1, node:$src2),
209 (or node:$src1, node:$src2), [{
210 unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
211 return CurDAG->MaskedValueIsZero(N->getOperand(1),
212 APInt::getLowBitsSet(BitWidth, 8));
215 // Fused multiply-add and multiply-subtract, but with the order of the
216 // operands matching SystemZ's MA and MS instructions.
217 def z_fma : PatFrag<(ops node:$src1, node:$src2, node:$src3),
218 (fma node:$src2, node:$src3, node:$src1)>;
219 def z_fms : PatFrag<(ops node:$src1, node:$src2, node:$src3),
220 (fma node:$src2, node:$src3, (fneg node:$src1))>;
222 // Floating-point negative absolute.
223 def fnabs : PatFrag<(ops node:$ptr), (fneg (fabs node:$ptr))>;
225 // Create a unary operator that loads from memory and then performs
226 // the given operation on it.
227 class loadu<SDPatternOperator operator, SDPatternOperator load = load>
228 : PatFrag<(ops node:$addr), (operator (load node:$addr))>;
230 // Create a store operator that performs the given unary operation
231 // on the value before storing it.
232 class storeu<SDPatternOperator operator, SDPatternOperator store = store>
233 : PatFrag<(ops node:$value, node:$addr),
234 (store (operator node:$value), node:$addr)>;