1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SystemZTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "SystemZISelLowering.h"
15 #include "SystemZCallingConv.h"
16 #include "SystemZConstantPoolValue.h"
17 #include "SystemZMachineFunctionInfo.h"
18 #include "SystemZTargetMachine.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
23 #include "llvm/IR/Intrinsics.h"
28 #define DEBUG_TYPE "systemz-lower"
31 // Represents a sequence for extracting a 0/1 value from an IPM result:
32 // (((X ^ XORValue) + AddValue) >> Bit)
33 struct IPMConversion {
34 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit)
35 : XORValue(xorValue), AddValue(addValue), Bit(bit) {}
42 // Represents information about a comparison.
44 Comparison(SDValue Op0In, SDValue Op1In)
45 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
47 // The operands to the comparison.
50 // The opcode that should be used to compare Op0 and Op1.
53 // A SystemZICMP value. Only used for integer comparisons.
56 // The mask of CC values that Opcode can produce.
59 // The mask of CC values for which the original condition is true.
62 } // end anonymous namespace
64 // Classify VT as either 32 or 64 bit.
65 static bool is32Bit(EVT VT) {
66 switch (VT.getSimpleVT().SimpleTy) {
72 llvm_unreachable("Unsupported type");
76 // Return a version of MachineOperand that can be safely used before the
78 static MachineOperand earlyUseOperand(MachineOperand Op) {
84 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &tm,
85 const SystemZSubtarget &STI)
86 : TargetLowering(tm), Subtarget(STI) {
87 MVT PtrVT = getPointerTy();
89 // Set up the register classes.
90 if (Subtarget.hasHighWord())
91 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
93 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
94 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
95 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
96 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
97 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
99 // Compute derived properties from the register classes
100 computeRegisterProperties(Subtarget.getRegisterInfo());
102 // Set up special registers.
103 setExceptionPointerRegister(SystemZ::R6D);
104 setExceptionSelectorRegister(SystemZ::R7D);
105 setStackPointerRegisterToSaveRestore(SystemZ::R15D);
107 // TODO: It may be better to default to latency-oriented scheduling, however
108 // LLVM's current latency-oriented scheduler can't handle physreg definitions
109 // such as SystemZ has with CC, so set this to the register-pressure
110 // scheduler, because it can.
111 setSchedulingPreference(Sched::RegPressure);
113 setBooleanContents(ZeroOrOneBooleanContent);
114 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
116 // Instructions are strings of 2-byte aligned 2-byte values.
117 setMinFunctionAlignment(2);
119 // Handle operations that are handled in a similar way for all types.
120 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
121 I <= MVT::LAST_FP_VALUETYPE;
123 MVT VT = MVT::SimpleValueType(I);
124 if (isTypeLegal(VT)) {
125 // Lower SET_CC into an IPM-based sequence.
126 setOperationAction(ISD::SETCC, VT, Custom);
128 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
129 setOperationAction(ISD::SELECT, VT, Expand);
131 // Lower SELECT_CC and BR_CC into separate comparisons and branches.
132 setOperationAction(ISD::SELECT_CC, VT, Custom);
133 setOperationAction(ISD::BR_CC, VT, Custom);
137 // Expand jump table branches as address arithmetic followed by an
139 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
141 // Expand BRCOND into a BR_CC (see above).
142 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
144 // Handle integer types.
145 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
146 I <= MVT::LAST_INTEGER_VALUETYPE;
148 MVT VT = MVT::SimpleValueType(I);
149 if (isTypeLegal(VT)) {
150 // Expand individual DIV and REMs into DIVREMs.
151 setOperationAction(ISD::SDIV, VT, Expand);
152 setOperationAction(ISD::UDIV, VT, Expand);
153 setOperationAction(ISD::SREM, VT, Expand);
154 setOperationAction(ISD::UREM, VT, Expand);
155 setOperationAction(ISD::SDIVREM, VT, Custom);
156 setOperationAction(ISD::UDIVREM, VT, Custom);
158 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
159 // stores, putting a serialization instruction after the stores.
160 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom);
161 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
163 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
164 // available, or if the operand is constant.
165 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
167 // Use POPCNT on z196 and above.
168 if (Subtarget.hasPopulationCount())
169 setOperationAction(ISD::CTPOP, VT, Custom);
171 setOperationAction(ISD::CTPOP, VT, Expand);
173 // No special instructions for these.
174 setOperationAction(ISD::CTTZ, VT, Expand);
175 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
176 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
177 setOperationAction(ISD::ROTR, VT, Expand);
179 // Use *MUL_LOHI where possible instead of MULH*.
180 setOperationAction(ISD::MULHS, VT, Expand);
181 setOperationAction(ISD::MULHU, VT, Expand);
182 setOperationAction(ISD::SMUL_LOHI, VT, Custom);
183 setOperationAction(ISD::UMUL_LOHI, VT, Custom);
185 // Only z196 and above have native support for conversions to unsigned.
186 if (!Subtarget.hasFPExtension())
187 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
191 // Type legalization will convert 8- and 16-bit atomic operations into
192 // forms that operate on i32s (but still keeping the original memory VT).
193 // Lower them into full i32 operations.
194 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom);
195 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom);
196 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
197 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
198 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom);
199 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom);
200 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom);
201 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom);
202 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom);
203 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom);
204 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
205 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
207 // z10 has instructions for signed but not unsigned FP conversion.
208 // Handle unsigned 32-bit types as signed 64-bit types.
209 if (!Subtarget.hasFPExtension()) {
210 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
211 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
214 // We have native support for a 64-bit CTLZ, via FLOGR.
215 setOperationAction(ISD::CTLZ, MVT::i32, Promote);
216 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
218 // Give LowerOperation the chance to replace 64-bit ORs with subregs.
219 setOperationAction(ISD::OR, MVT::i64, Custom);
221 // FIXME: Can we support these natively?
222 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
223 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
224 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
226 // We have native instructions for i8, i16 and i32 extensions, but not i1.
227 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
228 for (MVT VT : MVT::integer_valuetypes()) {
229 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
230 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
231 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
234 // Handle the various types of symbolic address.
235 setOperationAction(ISD::ConstantPool, PtrVT, Custom);
236 setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
237 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
238 setOperationAction(ISD::BlockAddress, PtrVT, Custom);
239 setOperationAction(ISD::JumpTable, PtrVT, Custom);
241 // We need to handle dynamic allocations specially because of the
242 // 160-byte area at the bottom of the stack.
243 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
245 // Use custom expanders so that we can force the function to use
247 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
248 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
250 // Handle prefetches with PFD or PFDRL.
251 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
253 // Handle floating-point types.
254 for (unsigned I = MVT::FIRST_FP_VALUETYPE;
255 I <= MVT::LAST_FP_VALUETYPE;
257 MVT VT = MVT::SimpleValueType(I);
258 if (isTypeLegal(VT)) {
259 // We can use FI for FRINT.
260 setOperationAction(ISD::FRINT, VT, Legal);
262 // We can use the extended form of FI for other rounding operations.
263 if (Subtarget.hasFPExtension()) {
264 setOperationAction(ISD::FNEARBYINT, VT, Legal);
265 setOperationAction(ISD::FFLOOR, VT, Legal);
266 setOperationAction(ISD::FCEIL, VT, Legal);
267 setOperationAction(ISD::FTRUNC, VT, Legal);
268 setOperationAction(ISD::FROUND, VT, Legal);
271 // No special instructions for these.
272 setOperationAction(ISD::FSIN, VT, Expand);
273 setOperationAction(ISD::FCOS, VT, Expand);
274 setOperationAction(ISD::FREM, VT, Expand);
278 // We have fused multiply-addition for f32 and f64 but not f128.
279 setOperationAction(ISD::FMA, MVT::f32, Legal);
280 setOperationAction(ISD::FMA, MVT::f64, Legal);
281 setOperationAction(ISD::FMA, MVT::f128, Expand);
283 // Needed so that we don't try to implement f128 constant loads using
284 // a load-and-extend of a f80 constant (in cases where the constant
285 // would fit in an f80).
286 for (MVT VT : MVT::fp_valuetypes())
287 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
289 // Floating-point truncation and stores need to be done separately.
290 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
291 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
292 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
294 // We have 64-bit FPR<->GPR moves, but need special handling for
296 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
297 setOperationAction(ISD::BITCAST, MVT::f32, Custom);
299 // VASTART and VACOPY need to deal with the SystemZ-specific varargs
300 // structure, but VAEND is a no-op.
301 setOperationAction(ISD::VASTART, MVT::Other, Custom);
302 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
303 setOperationAction(ISD::VAEND, MVT::Other, Expand);
305 // Codes for which we want to perform some z-specific combinations.
306 setTargetDAGCombine(ISD::SIGN_EXTEND);
308 // Handle intrinsics.
309 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
311 // We want to use MVC in preference to even a single load/store pair.
312 MaxStoresPerMemcpy = 0;
313 MaxStoresPerMemcpyOptSize = 0;
315 // The main memset sequence is a byte store followed by an MVC.
316 // Two STC or MV..I stores win over that, but the kind of fused stores
317 // generated by target-independent code don't when the byte value is
318 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
319 // than "STC;MVC". Handle the choice in target-specific code instead.
320 MaxStoresPerMemset = 0;
321 MaxStoresPerMemsetOptSize = 0;
324 EVT SystemZTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
327 return VT.changeVectorElementTypeToInteger();
330 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
331 VT = VT.getScalarType();
336 switch (VT.getSimpleVT().SimpleTy) {
349 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
350 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
351 return Imm.isZero() || Imm.isNegZero();
354 bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
355 // We can use CGFI or CLGFI.
356 return isInt<32>(Imm) || isUInt<32>(Imm);
359 bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const {
360 // We can use ALGFI or SLGFI.
361 return isUInt<32>(Imm) || isUInt<32>(-Imm);
364 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
368 // Unaligned accesses should never be slower than the expanded version.
369 // We check specifically for aligned accesses in the few cases where
370 // they are required.
376 bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM,
378 // Punt on globals for now, although they can be used in limited
379 // RELATIVE LONG cases.
383 // Require a 20-bit signed offset.
384 if (!isInt<20>(AM.BaseOffs))
387 // Indexing is OK but no scale factor can be applied.
388 return AM.Scale == 0 || AM.Scale == 1;
391 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
392 if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
394 unsigned FromBits = FromType->getPrimitiveSizeInBits();
395 unsigned ToBits = ToType->getPrimitiveSizeInBits();
396 return FromBits > ToBits;
399 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
400 if (!FromVT.isInteger() || !ToVT.isInteger())
402 unsigned FromBits = FromVT.getSizeInBits();
403 unsigned ToBits = ToVT.getSizeInBits();
404 return FromBits > ToBits;
407 //===----------------------------------------------------------------------===//
408 // Inline asm support
409 //===----------------------------------------------------------------------===//
411 TargetLowering::ConstraintType
412 SystemZTargetLowering::getConstraintType(const std::string &Constraint) const {
413 if (Constraint.size() == 1) {
414 switch (Constraint[0]) {
415 case 'a': // Address register
416 case 'd': // Data register (equivalent to 'r')
417 case 'f': // Floating-point register
418 case 'h': // High-part register
419 case 'r': // General-purpose register
420 return C_RegisterClass;
422 case 'Q': // Memory with base and unsigned 12-bit displacement
423 case 'R': // Likewise, plus an index
424 case 'S': // Memory with base and signed 20-bit displacement
425 case 'T': // Likewise, plus an index
426 case 'm': // Equivalent to 'T'.
429 case 'I': // Unsigned 8-bit constant
430 case 'J': // Unsigned 12-bit constant
431 case 'K': // Signed 16-bit constant
432 case 'L': // Signed 20-bit displacement (on all targets we support)
433 case 'M': // 0x7fffffff
440 return TargetLowering::getConstraintType(Constraint);
443 TargetLowering::ConstraintWeight SystemZTargetLowering::
444 getSingleConstraintMatchWeight(AsmOperandInfo &info,
445 const char *constraint) const {
446 ConstraintWeight weight = CW_Invalid;
447 Value *CallOperandVal = info.CallOperandVal;
448 // If we don't have a value, we can't do a match,
449 // but allow it at the lowest weight.
452 Type *type = CallOperandVal->getType();
453 // Look at the constraint type.
454 switch (*constraint) {
456 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
459 case 'a': // Address register
460 case 'd': // Data register (equivalent to 'r')
461 case 'h': // High-part register
462 case 'r': // General-purpose register
463 if (CallOperandVal->getType()->isIntegerTy())
464 weight = CW_Register;
467 case 'f': // Floating-point register
468 if (type->isFloatingPointTy())
469 weight = CW_Register;
472 case 'I': // Unsigned 8-bit constant
473 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
474 if (isUInt<8>(C->getZExtValue()))
475 weight = CW_Constant;
478 case 'J': // Unsigned 12-bit constant
479 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
480 if (isUInt<12>(C->getZExtValue()))
481 weight = CW_Constant;
484 case 'K': // Signed 16-bit constant
485 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
486 if (isInt<16>(C->getSExtValue()))
487 weight = CW_Constant;
490 case 'L': // Signed 20-bit displacement (on all targets we support)
491 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
492 if (isInt<20>(C->getSExtValue()))
493 weight = CW_Constant;
496 case 'M': // 0x7fffffff
497 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
498 if (C->getZExtValue() == 0x7fffffff)
499 weight = CW_Constant;
505 // Parse a "{tNNN}" register constraint for which the register type "t"
506 // has already been verified. MC is the class associated with "t" and
507 // Map maps 0-based register numbers to LLVM register numbers.
508 static std::pair<unsigned, const TargetRegisterClass *>
509 parseRegisterNumber(const std::string &Constraint,
510 const TargetRegisterClass *RC, const unsigned *Map) {
511 assert(*(Constraint.end()-1) == '}' && "Missing '}'");
512 if (isdigit(Constraint[2])) {
513 std::string Suffix(Constraint.data() + 2, Constraint.size() - 2);
514 unsigned Index = atoi(Suffix.c_str());
515 if (Index < 16 && Map[Index])
516 return std::make_pair(Map[Index], RC);
518 return std::make_pair(0U, nullptr);
521 std::pair<unsigned, const TargetRegisterClass *>
522 SystemZTargetLowering::getRegForInlineAsmConstraint(
523 const TargetRegisterInfo *TRI, const std::string &Constraint,
525 if (Constraint.size() == 1) {
526 // GCC Constraint Letters
527 switch (Constraint[0]) {
529 case 'd': // Data register (equivalent to 'r')
530 case 'r': // General-purpose register
532 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
533 else if (VT == MVT::i128)
534 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
535 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
537 case 'a': // Address register
539 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
540 else if (VT == MVT::i128)
541 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
542 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
544 case 'h': // High-part register (an LLVM extension)
545 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
547 case 'f': // Floating-point register
549 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
550 else if (VT == MVT::f128)
551 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
552 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
555 if (Constraint[0] == '{') {
556 // We need to override the default register parsing for GPRs and FPRs
557 // because the interpretation depends on VT. The internal names of
558 // the registers are also different from the external names
559 // (F0D and F0S instead of F0, etc.).
560 if (Constraint[1] == 'r') {
562 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
563 SystemZMC::GR32Regs);
565 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
566 SystemZMC::GR128Regs);
567 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
568 SystemZMC::GR64Regs);
570 if (Constraint[1] == 'f') {
572 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
573 SystemZMC::FP32Regs);
575 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
576 SystemZMC::FP128Regs);
577 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
578 SystemZMC::FP64Regs);
581 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
584 void SystemZTargetLowering::
585 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
586 std::vector<SDValue> &Ops,
587 SelectionDAG &DAG) const {
588 // Only support length 1 constraints for now.
589 if (Constraint.length() == 1) {
590 switch (Constraint[0]) {
591 case 'I': // Unsigned 8-bit constant
592 if (auto *C = dyn_cast<ConstantSDNode>(Op))
593 if (isUInt<8>(C->getZExtValue()))
594 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
598 case 'J': // Unsigned 12-bit constant
599 if (auto *C = dyn_cast<ConstantSDNode>(Op))
600 if (isUInt<12>(C->getZExtValue()))
601 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
605 case 'K': // Signed 16-bit constant
606 if (auto *C = dyn_cast<ConstantSDNode>(Op))
607 if (isInt<16>(C->getSExtValue()))
608 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
612 case 'L': // Signed 20-bit displacement (on all targets we support)
613 if (auto *C = dyn_cast<ConstantSDNode>(Op))
614 if (isInt<20>(C->getSExtValue()))
615 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
619 case 'M': // 0x7fffffff
620 if (auto *C = dyn_cast<ConstantSDNode>(Op))
621 if (C->getZExtValue() == 0x7fffffff)
622 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
627 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
630 //===----------------------------------------------------------------------===//
631 // Calling conventions
632 //===----------------------------------------------------------------------===//
634 #include "SystemZGenCallingConv.inc"
636 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
637 Type *ToType) const {
638 return isTruncateFree(FromType, ToType);
641 bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
642 if (!CI->isTailCall())
647 // Value is a value that has been passed to us in the location described by VA
648 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
649 // any loads onto Chain.
650 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL,
651 CCValAssign &VA, SDValue Chain,
653 // If the argument has been promoted from a smaller type, insert an
654 // assertion to capture this.
655 if (VA.getLocInfo() == CCValAssign::SExt)
656 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
657 DAG.getValueType(VA.getValVT()));
658 else if (VA.getLocInfo() == CCValAssign::ZExt)
659 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
660 DAG.getValueType(VA.getValVT()));
663 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
664 else if (VA.getLocInfo() == CCValAssign::Indirect)
665 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value,
666 MachinePointerInfo(), false, false, false, 0);
668 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
672 // Value is a value of type VA.getValVT() that we need to copy into
673 // the location described by VA. Return a copy of Value converted to
674 // VA.getValVT(). The caller is responsible for handling indirect values.
675 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL,
676 CCValAssign &VA, SDValue Value) {
677 switch (VA.getLocInfo()) {
678 case CCValAssign::SExt:
679 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
680 case CCValAssign::ZExt:
681 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
682 case CCValAssign::AExt:
683 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
684 case CCValAssign::Full:
687 llvm_unreachable("Unhandled getLocInfo()");
691 SDValue SystemZTargetLowering::
692 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
693 const SmallVectorImpl<ISD::InputArg> &Ins,
694 SDLoc DL, SelectionDAG &DAG,
695 SmallVectorImpl<SDValue> &InVals) const {
696 MachineFunction &MF = DAG.getMachineFunction();
697 MachineFrameInfo *MFI = MF.getFrameInfo();
698 MachineRegisterInfo &MRI = MF.getRegInfo();
699 SystemZMachineFunctionInfo *FuncInfo =
700 MF.getInfo<SystemZMachineFunctionInfo>();
702 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
704 // Assign locations to all of the incoming arguments.
705 SmallVector<CCValAssign, 16> ArgLocs;
706 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
707 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
709 unsigned NumFixedGPRs = 0;
710 unsigned NumFixedFPRs = 0;
711 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
713 CCValAssign &VA = ArgLocs[I];
714 EVT LocVT = VA.getLocVT();
716 // Arguments passed in registers
717 const TargetRegisterClass *RC;
718 switch (LocVT.getSimpleVT().SimpleTy) {
720 // Integers smaller than i64 should be promoted to i64.
721 llvm_unreachable("Unexpected argument type");
724 RC = &SystemZ::GR32BitRegClass;
728 RC = &SystemZ::GR64BitRegClass;
732 RC = &SystemZ::FP32BitRegClass;
736 RC = &SystemZ::FP64BitRegClass;
740 unsigned VReg = MRI.createVirtualRegister(RC);
741 MRI.addLiveIn(VA.getLocReg(), VReg);
742 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
744 assert(VA.isMemLoc() && "Argument not register or memory");
746 // Create the frame index object for this incoming parameter.
747 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8,
748 VA.getLocMemOffset(), true);
750 // Create the SelectionDAG nodes corresponding to a load
751 // from this parameter. Unpromoted ints and floats are
752 // passed as right-justified 8-byte values.
753 EVT PtrVT = getPointerTy();
754 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
755 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
756 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
757 DAG.getIntPtrConstant(4, DL));
758 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
759 MachinePointerInfo::getFixedStack(FI),
760 false, false, false, 0);
763 // Convert the value of the argument register into the value that's
765 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
769 // Save the number of non-varargs registers for later use by va_start, etc.
770 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
771 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);
773 // Likewise the address (in the form of a frame index) of where the
774 // first stack vararg would be. The 1-byte size here is arbitrary.
775 int64_t StackSize = CCInfo.getNextStackOffset();
776 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true));
778 // ...and a similar frame index for the caller-allocated save area
779 // that will be used to store the incoming registers.
780 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
781 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true);
782 FuncInfo->setRegSaveFrameIndex(RegSaveIndex);
784 // Store the FPR varargs in the reserved frame slots. (We store the
785 // GPRs as part of the prologue.)
786 if (NumFixedFPRs < SystemZ::NumArgFPRs) {
787 SDValue MemOps[SystemZ::NumArgFPRs];
788 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
789 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]);
790 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true);
791 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
792 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
793 &SystemZ::FP64BitRegClass);
794 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
795 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
796 MachinePointerInfo::getFixedStack(FI),
800 // Join the stores, which are independent of one another.
801 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
802 makeArrayRef(&MemOps[NumFixedFPRs],
803 SystemZ::NumArgFPRs-NumFixedFPRs));
810 static bool canUseSiblingCall(const CCState &ArgCCInfo,
811 SmallVectorImpl<CCValAssign> &ArgLocs) {
812 // Punt if there are any indirect or stack arguments, or if the call
813 // needs the call-saved argument register R6.
814 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
815 CCValAssign &VA = ArgLocs[I];
816 if (VA.getLocInfo() == CCValAssign::Indirect)
820 unsigned Reg = VA.getLocReg();
821 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
828 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
829 SmallVectorImpl<SDValue> &InVals) const {
830 SelectionDAG &DAG = CLI.DAG;
832 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
833 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
834 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
835 SDValue Chain = CLI.Chain;
836 SDValue Callee = CLI.Callee;
837 bool &IsTailCall = CLI.IsTailCall;
838 CallingConv::ID CallConv = CLI.CallConv;
839 bool IsVarArg = CLI.IsVarArg;
840 MachineFunction &MF = DAG.getMachineFunction();
841 EVT PtrVT = getPointerTy();
843 // Analyze the operands of the call, assigning locations to each operand.
844 SmallVector<CCValAssign, 16> ArgLocs;
845 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
846 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
848 // We don't support GuaranteedTailCallOpt, only automatically-detected
850 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs))
853 // Get a count of how many bytes are to be pushed on the stack.
854 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
856 // Mark the start of the call.
858 Chain = DAG.getCALLSEQ_START(Chain,
859 DAG.getConstant(NumBytes, DL, PtrVT, true),
862 // Copy argument values to their designated locations.
863 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
864 SmallVector<SDValue, 8> MemOpChains;
866 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
867 CCValAssign &VA = ArgLocs[I];
868 SDValue ArgValue = OutVals[I];
870 if (VA.getLocInfo() == CCValAssign::Indirect) {
871 // Store the argument in a stack slot and pass its address.
872 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
873 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
874 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot,
875 MachinePointerInfo::getFixedStack(FI),
877 ArgValue = SpillSlot;
879 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
882 // Queue up the argument copies and emit them at the end.
883 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
885 assert(VA.isMemLoc() && "Argument not register or memory");
887 // Work out the address of the stack slot. Unpromoted ints and
888 // floats are passed as right-justified 8-byte values.
889 if (!StackPtr.getNode())
890 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
891 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset();
892 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
894 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
895 DAG.getIntPtrConstant(Offset, DL));
898 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address,
899 MachinePointerInfo(),
904 // Join the stores, which are independent of one another.
905 if (!MemOpChains.empty())
906 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
908 // Accept direct calls by converting symbolic call addresses to the
909 // associated Target* opcodes. Force %r1 to be used for indirect
912 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
913 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
914 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
915 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
916 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
917 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
918 } else if (IsTailCall) {
919 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
920 Glue = Chain.getValue(1);
921 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
924 // Build a sequence of copy-to-reg nodes, chained and glued together.
925 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
926 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
927 RegsToPass[I].second, Glue);
928 Glue = Chain.getValue(1);
931 // The first call operand is the chain and the second is the target address.
932 SmallVector<SDValue, 8> Ops;
933 Ops.push_back(Chain);
934 Ops.push_back(Callee);
936 // Add argument registers to the end of the list so that they are
937 // known live into the call.
938 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
939 Ops.push_back(DAG.getRegister(RegsToPass[I].first,
940 RegsToPass[I].second.getValueType()));
942 // Add a register mask operand representing the call-preserved registers.
943 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
944 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
945 assert(Mask && "Missing call preserved mask for calling convention");
946 Ops.push_back(DAG.getRegisterMask(Mask));
948 // Glue the call to the argument copies, if any.
953 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
955 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops);
956 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops);
957 Glue = Chain.getValue(1);
959 // Mark the end of the call, which is glued to the call itself.
960 Chain = DAG.getCALLSEQ_END(Chain,
961 DAG.getConstant(NumBytes, DL, PtrVT, true),
962 DAG.getConstant(0, DL, PtrVT, true),
964 Glue = Chain.getValue(1);
966 // Assign locations to each value returned by this call.
967 SmallVector<CCValAssign, 16> RetLocs;
968 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
969 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
971 // Copy all of the result registers out of their specified physreg.
972 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
973 CCValAssign &VA = RetLocs[I];
975 // Copy the value out, gluing the copy to the end of the call sequence.
976 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
977 VA.getLocVT(), Glue);
978 Chain = RetValue.getValue(1);
979 Glue = RetValue.getValue(2);
981 // Convert the value of the return register into the value that's
983 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
990 SystemZTargetLowering::LowerReturn(SDValue Chain,
991 CallingConv::ID CallConv, bool IsVarArg,
992 const SmallVectorImpl<ISD::OutputArg> &Outs,
993 const SmallVectorImpl<SDValue> &OutVals,
994 SDLoc DL, SelectionDAG &DAG) const {
995 MachineFunction &MF = DAG.getMachineFunction();
997 // Assign locations to each returned value.
998 SmallVector<CCValAssign, 16> RetLocs;
999 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1000 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
1002 // Quick exit for void returns
1003 if (RetLocs.empty())
1004 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);
1006 // Copy the result values into the output registers.
1008 SmallVector<SDValue, 4> RetOps;
1009 RetOps.push_back(Chain);
1010 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1011 CCValAssign &VA = RetLocs[I];
1012 SDValue RetValue = OutVals[I];
1014 // Make the return register live on exit.
1015 assert(VA.isRegLoc() && "Can only return in registers!");
1017 // Promote the value as required.
1018 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);
1020 // Chain and glue the copies together.
1021 unsigned Reg = VA.getLocReg();
1022 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
1023 Glue = Chain.getValue(1);
1024 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
1027 // Update chain and glue.
1030 RetOps.push_back(Glue);
1032 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
1035 SDValue SystemZTargetLowering::
1036 prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL, SelectionDAG &DAG) const {
1037 return DAG.getNode(SystemZISD::SERIALIZE, DL, MVT::Other, Chain);
1040 // Return true if Op is an intrinsic node with chain that returns the CC value
1041 // as its only (other) argument. Provide the associated SystemZISD opcode and
1042 // the mask of valid CC values if so.
1043 static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode,
1044 unsigned &CCValid) {
1045 unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1047 case Intrinsic::s390_tbegin:
1048 Opcode = SystemZISD::TBEGIN;
1049 CCValid = SystemZ::CCMASK_TBEGIN;
1052 case Intrinsic::s390_tbegin_nofloat:
1053 Opcode = SystemZISD::TBEGIN_NOFLOAT;
1054 CCValid = SystemZ::CCMASK_TBEGIN;
1057 case Intrinsic::s390_tend:
1058 Opcode = SystemZISD::TEND;
1059 CCValid = SystemZ::CCMASK_TEND;
1067 // Emit an intrinsic with chain with a glued value instead of its CC result.
1068 static SDValue emitIntrinsicWithChainAndGlue(SelectionDAG &DAG, SDValue Op,
1070 // Copy all operands except the intrinsic ID.
1071 unsigned NumOps = Op.getNumOperands();
1072 SmallVector<SDValue, 6> Ops;
1073 Ops.reserve(NumOps - 1);
1074 Ops.push_back(Op.getOperand(0));
1075 for (unsigned I = 2; I < NumOps; ++I)
1076 Ops.push_back(Op.getOperand(I));
1078 assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
1079 SDVTList RawVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1080 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops);
1081 SDValue OldChain = SDValue(Op.getNode(), 1);
1082 SDValue NewChain = SDValue(Intr.getNode(), 0);
1083 DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
1087 // CC is a comparison that will be implemented using an integer or
1088 // floating-point comparison. Return the condition code mask for
1089 // a branch on true. In the integer case, CCMASK_CMP_UO is set for
1090 // unsigned comparisons and clear for signed ones. In the floating-point
1091 // case, CCMASK_CMP_UO has its normal mask meaning (unordered).
1092 static unsigned CCMaskForCondCode(ISD::CondCode CC) {
1094 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
1095 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
1096 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
1100 llvm_unreachable("Invalid integer condition!");
1109 case ISD::SETO: return SystemZ::CCMASK_CMP_O;
1110 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
1115 // Return a sequence for getting a 1 from an IPM result when CC has a
1116 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask.
1117 // The handling of CC values outside CCValid doesn't matter.
1118 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
1119 // Deal with cases where the result can be taken directly from a bit
1120 // of the IPM result.
1121 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3)))
1122 return IPMConversion(0, 0, SystemZ::IPM_CC);
1123 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3)))
1124 return IPMConversion(0, 0, SystemZ::IPM_CC + 1);
1126 // Deal with cases where we can add a value to force the sign bit
1127 // to contain the right value. Putting the bit in 31 means we can
1128 // use SRL rather than RISBG(L), and also makes it easier to get a
1129 // 0/-1 value, so it has priority over the other tests below.
1131 // These sequences rely on the fact that the upper two bits of the
1132 // IPM result are zero.
1133 uint64_t TopBit = uint64_t(1) << 31;
1134 if (CCMask == (CCValid & SystemZ::CCMASK_0))
1135 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31);
1136 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1)))
1137 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31);
1138 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1140 | SystemZ::CCMASK_2)))
1141 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31);
1142 if (CCMask == (CCValid & SystemZ::CCMASK_3))
1143 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31);
1144 if (CCMask == (CCValid & (SystemZ::CCMASK_1
1146 | SystemZ::CCMASK_3)))
1147 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31);
1149 // Next try inverting the value and testing a bit. 0/1 could be
1150 // handled this way too, but we dealt with that case above.
1151 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2)))
1152 return IPMConversion(-1, 0, SystemZ::IPM_CC);
1154 // Handle cases where adding a value forces a non-sign bit to contain
1156 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2)))
1157 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1);
1158 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
1159 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
1161 // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are
1162 // can be done by inverting the low CC bit and applying one of the
1163 // sign-based extractions above.
1164 if (CCMask == (CCValid & SystemZ::CCMASK_1))
1165 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31);
1166 if (CCMask == (CCValid & SystemZ::CCMASK_2))
1167 return IPMConversion(1 << SystemZ::IPM_CC,
1168 TopBit - (3 << SystemZ::IPM_CC), 31);
1169 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1171 | SystemZ::CCMASK_3)))
1172 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31);
1173 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1175 | SystemZ::CCMASK_3)))
1176 return IPMConversion(1 << SystemZ::IPM_CC,
1177 TopBit - (1 << SystemZ::IPM_CC), 31);
1179 llvm_unreachable("Unexpected CC combination");
1182 // If C can be converted to a comparison against zero, adjust the operands
1184 static void adjustZeroCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
1185 if (C.ICmpType == SystemZICMP::UnsignedOnly)
1188 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode());
1192 int64_t Value = ConstOp1->getSExtValue();
1193 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) ||
1194 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) ||
1195 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) ||
1196 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) {
1197 C.CCMask ^= SystemZ::CCMASK_CMP_EQ;
1198 C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType());
1202 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
1203 // adjust the operands as necessary.
1204 static void adjustSubwordCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
1205 // For us to make any changes, it must a comparison between a single-use
1206 // load and a constant.
1207 if (!C.Op0.hasOneUse() ||
1208 C.Op0.getOpcode() != ISD::LOAD ||
1209 C.Op1.getOpcode() != ISD::Constant)
1212 // We must have an 8- or 16-bit load.
1213 auto *Load = cast<LoadSDNode>(C.Op0);
1214 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
1215 if (NumBits != 8 && NumBits != 16)
1218 // The load must be an extending one and the constant must be within the
1219 // range of the unextended value.
1220 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
1221 uint64_t Value = ConstOp1->getZExtValue();
1222 uint64_t Mask = (1 << NumBits) - 1;
1223 if (Load->getExtensionType() == ISD::SEXTLOAD) {
1224 // Make sure that ConstOp1 is in range of C.Op0.
1225 int64_t SignedValue = ConstOp1->getSExtValue();
1226 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
1228 if (C.ICmpType != SystemZICMP::SignedOnly) {
1229 // Unsigned comparison between two sign-extended values is equivalent
1230 // to unsigned comparison between two zero-extended values.
1232 } else if (NumBits == 8) {
1233 // Try to treat the comparison as unsigned, so that we can use CLI.
1234 // Adjust CCMask and Value as necessary.
1235 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT)
1236 // Test whether the high bit of the byte is set.
1237 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT;
1238 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE)
1239 // Test whether the high bit of the byte is clear.
1240 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT;
1242 // No instruction exists for this combination.
1244 C.ICmpType = SystemZICMP::UnsignedOnly;
1246 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
1249 assert(C.ICmpType == SystemZICMP::Any &&
1250 "Signedness shouldn't matter here.");
1254 // Make sure that the first operand is an i32 of the right extension type.
1255 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ?
1258 if (C.Op0.getValueType() != MVT::i32 ||
1259 Load->getExtensionType() != ExtType)
1260 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32,
1261 Load->getChain(), Load->getBasePtr(),
1262 Load->getPointerInfo(), Load->getMemoryVT(),
1263 Load->isVolatile(), Load->isNonTemporal(),
1264 Load->isInvariant(), Load->getAlignment());
1266 // Make sure that the second operand is an i32 with the right value.
1267 if (C.Op1.getValueType() != MVT::i32 ||
1268 Value != ConstOp1->getZExtValue())
1269 C.Op1 = DAG.getConstant(Value, DL, MVT::i32);
1272 // Return true if Op is either an unextended load, or a load suitable
1273 // for integer register-memory comparisons of type ICmpType.
1274 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
1275 auto *Load = dyn_cast<LoadSDNode>(Op.getNode());
1277 // There are no instructions to compare a register with a memory byte.
1278 if (Load->getMemoryVT() == MVT::i8)
1280 // Otherwise decide on extension type.
1281 switch (Load->getExtensionType()) {
1282 case ISD::NON_EXTLOAD:
1285 return ICmpType != SystemZICMP::UnsignedOnly;
1287 return ICmpType != SystemZICMP::SignedOnly;
1295 // Return true if it is better to swap the operands of C.
1296 static bool shouldSwapCmpOperands(const Comparison &C) {
1297 // Leave f128 comparisons alone, since they have no memory forms.
1298 if (C.Op0.getValueType() == MVT::f128)
1301 // Always keep a floating-point constant second, since comparisons with
1302 // zero can use LOAD TEST and comparisons with other constants make a
1303 // natural memory operand.
1304 if (isa<ConstantFPSDNode>(C.Op1))
1307 // Never swap comparisons with zero since there are many ways to optimize
1309 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
1310 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
1313 // Also keep natural memory operands second if the loaded value is
1314 // only used here. Several comparisons have memory forms.
1315 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse())
1318 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
1319 // In that case we generally prefer the memory to be second.
1320 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) {
1321 // The only exceptions are when the second operand is a constant and
1322 // we can use things like CHHSI.
1325 // The unsigned memory-immediate instructions can handle 16-bit
1326 // unsigned integers.
1327 if (C.ICmpType != SystemZICMP::SignedOnly &&
1328 isUInt<16>(ConstOp1->getZExtValue()))
1330 // The signed memory-immediate instructions can handle 16-bit
1332 if (C.ICmpType != SystemZICMP::UnsignedOnly &&
1333 isInt<16>(ConstOp1->getSExtValue()))
1338 // Try to promote the use of CGFR and CLGFR.
1339 unsigned Opcode0 = C.Op0.getOpcode();
1340 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND)
1342 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND)
1344 if (C.ICmpType != SystemZICMP::SignedOnly &&
1345 Opcode0 == ISD::AND &&
1346 C.Op0.getOperand(1).getOpcode() == ISD::Constant &&
1347 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
1353 // Return a version of comparison CC mask CCMask in which the LT and GT
1354 // actions are swapped.
1355 static unsigned reverseCCMask(unsigned CCMask) {
1356 return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
1357 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
1358 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
1359 (CCMask & SystemZ::CCMASK_CMP_UO));
1362 // Check whether C tests for equality between X and Y and whether X - Y
1363 // or Y - X is also computed. In that case it's better to compare the
1364 // result of the subtraction against zero.
1365 static void adjustForSubtraction(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
1366 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
1367 C.CCMask == SystemZ::CCMASK_CMP_NE) {
1368 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
1370 if (N->getOpcode() == ISD::SUB &&
1371 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
1372 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
1373 C.Op0 = SDValue(N, 0);
1374 C.Op1 = DAG.getConstant(0, DL, N->getValueType(0));
1381 // Check whether C compares a floating-point value with zero and if that
1382 // floating-point value is also negated. In this case we can use the
1383 // negation to set CC, so avoiding separate LOAD AND TEST and
1384 // LOAD (NEGATIVE/COMPLEMENT) instructions.
1385 static void adjustForFNeg(Comparison &C) {
1386 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1);
1387 if (C1 && C1->isZero()) {
1388 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
1390 if (N->getOpcode() == ISD::FNEG) {
1391 C.Op0 = SDValue(N, 0);
1392 C.CCMask = reverseCCMask(C.CCMask);
1399 // Check whether C compares (shl X, 32) with 0 and whether X is
1400 // also sign-extended. In that case it is better to test the result
1401 // of the sign extension using LTGFR.
1403 // This case is important because InstCombine transforms a comparison
1404 // with (sext (trunc X)) into a comparison with (shl X, 32).
1405 static void adjustForLTGFR(Comparison &C) {
1406 // Check for a comparison between (shl X, 32) and 0.
1407 if (C.Op0.getOpcode() == ISD::SHL &&
1408 C.Op0.getValueType() == MVT::i64 &&
1409 C.Op1.getOpcode() == ISD::Constant &&
1410 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
1411 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
1412 if (C1 && C1->getZExtValue() == 32) {
1413 SDValue ShlOp0 = C.Op0.getOperand(0);
1414 // See whether X has any SIGN_EXTEND_INREG uses.
1415 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) {
1417 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG &&
1418 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) {
1419 C.Op0 = SDValue(N, 0);
1427 // If C compares the truncation of an extending load, try to compare
1428 // the untruncated value instead. This exposes more opportunities to
1430 static void adjustICmpTruncate(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
1431 if (C.Op0.getOpcode() == ISD::TRUNCATE &&
1432 C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
1433 C.Op1.getOpcode() == ISD::Constant &&
1434 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
1435 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
1436 if (L->getMemoryVT().getStoreSizeInBits()
1437 <= C.Op0.getValueType().getSizeInBits()) {
1438 unsigned Type = L->getExtensionType();
1439 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
1440 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
1441 C.Op0 = C.Op0.getOperand(0);
1442 C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType());
1448 // Return true if shift operation N has an in-range constant shift value.
1449 // Store it in ShiftVal if so.
1450 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
1451 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
1455 uint64_t Amount = Shift->getZExtValue();
1456 if (Amount >= N.getValueType().getSizeInBits())
1463 // Check whether an AND with Mask is suitable for a TEST UNDER MASK
1464 // instruction and whether the CC value is descriptive enough to handle
1465 // a comparison of type Opcode between the AND result and CmpVal.
1466 // CCMask says which comparison result is being tested and BitSize is
1467 // the number of bits in the operands. If TEST UNDER MASK can be used,
1468 // return the corresponding CC mask, otherwise return 0.
1469 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
1470 uint64_t Mask, uint64_t CmpVal,
1471 unsigned ICmpType) {
1472 assert(Mask != 0 && "ANDs with zero should have been removed by now");
1474 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
1475 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
1476 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
1479 // Work out the masks for the lowest and highest bits.
1480 unsigned HighShift = 63 - countLeadingZeros(Mask);
1481 uint64_t High = uint64_t(1) << HighShift;
1482 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);
1484 // Signed ordered comparisons are effectively unsigned if the sign
1486 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);
1488 // Check for equality comparisons with 0, or the equivalent.
1490 if (CCMask == SystemZ::CCMASK_CMP_EQ)
1491 return SystemZ::CCMASK_TM_ALL_0;
1492 if (CCMask == SystemZ::CCMASK_CMP_NE)
1493 return SystemZ::CCMASK_TM_SOME_1;
1495 if (EffectivelyUnsigned && CmpVal <= Low) {
1496 if (CCMask == SystemZ::CCMASK_CMP_LT)
1497 return SystemZ::CCMASK_TM_ALL_0;
1498 if (CCMask == SystemZ::CCMASK_CMP_GE)
1499 return SystemZ::CCMASK_TM_SOME_1;
1501 if (EffectivelyUnsigned && CmpVal < Low) {
1502 if (CCMask == SystemZ::CCMASK_CMP_LE)
1503 return SystemZ::CCMASK_TM_ALL_0;
1504 if (CCMask == SystemZ::CCMASK_CMP_GT)
1505 return SystemZ::CCMASK_TM_SOME_1;
1508 // Check for equality comparisons with the mask, or the equivalent.
1509 if (CmpVal == Mask) {
1510 if (CCMask == SystemZ::CCMASK_CMP_EQ)
1511 return SystemZ::CCMASK_TM_ALL_1;
1512 if (CCMask == SystemZ::CCMASK_CMP_NE)
1513 return SystemZ::CCMASK_TM_SOME_0;
1515 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
1516 if (CCMask == SystemZ::CCMASK_CMP_GT)
1517 return SystemZ::CCMASK_TM_ALL_1;
1518 if (CCMask == SystemZ::CCMASK_CMP_LE)
1519 return SystemZ::CCMASK_TM_SOME_0;
1521 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
1522 if (CCMask == SystemZ::CCMASK_CMP_GE)
1523 return SystemZ::CCMASK_TM_ALL_1;
1524 if (CCMask == SystemZ::CCMASK_CMP_LT)
1525 return SystemZ::CCMASK_TM_SOME_0;
1528 // Check for ordered comparisons with the top bit.
1529 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
1530 if (CCMask == SystemZ::CCMASK_CMP_LE)
1531 return SystemZ::CCMASK_TM_MSB_0;
1532 if (CCMask == SystemZ::CCMASK_CMP_GT)
1533 return SystemZ::CCMASK_TM_MSB_1;
1535 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
1536 if (CCMask == SystemZ::CCMASK_CMP_LT)
1537 return SystemZ::CCMASK_TM_MSB_0;
1538 if (CCMask == SystemZ::CCMASK_CMP_GE)
1539 return SystemZ::CCMASK_TM_MSB_1;
1542 // If there are just two bits, we can do equality checks for Low and High
1544 if (Mask == Low + High) {
1545 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
1546 return SystemZ::CCMASK_TM_MIXED_MSB_0;
1547 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
1548 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY;
1549 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
1550 return SystemZ::CCMASK_TM_MIXED_MSB_1;
1551 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
1552 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY;
1555 // Looks like we've exhausted our options.
1559 // See whether C can be implemented as a TEST UNDER MASK instruction.
1560 // Update the arguments with the TM version if so.
1561 static void adjustForTestUnderMask(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
1562 // Check that we have a comparison with a constant.
1563 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
1566 uint64_t CmpVal = ConstOp1->getZExtValue();
1568 // Check whether the nonconstant input is an AND with a constant mask.
1571 ConstantSDNode *Mask = nullptr;
1572 if (C.Op0.getOpcode() == ISD::AND) {
1573 NewC.Op0 = C.Op0.getOperand(0);
1574 NewC.Op1 = C.Op0.getOperand(1);
1575 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
1578 MaskVal = Mask->getZExtValue();
1580 // There is no instruction to compare with a 64-bit immediate
1581 // so use TMHH instead if possible. We need an unsigned ordered
1582 // comparison with an i64 immediate.
1583 if (NewC.Op0.getValueType() != MVT::i64 ||
1584 NewC.CCMask == SystemZ::CCMASK_CMP_EQ ||
1585 NewC.CCMask == SystemZ::CCMASK_CMP_NE ||
1586 NewC.ICmpType == SystemZICMP::SignedOnly)
1588 // Convert LE and GT comparisons into LT and GE.
1589 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE ||
1590 NewC.CCMask == SystemZ::CCMASK_CMP_GT) {
1591 if (CmpVal == uint64_t(-1))
1594 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ;
1596 // If the low N bits of Op1 are zero than the low N bits of Op0 can
1597 // be masked off without changing the result.
1598 MaskVal = -(CmpVal & -CmpVal);
1599 NewC.ICmpType = SystemZICMP::UnsignedOnly;
1604 // Check whether the combination of mask, comparison value and comparison
1605 // type are suitable.
1606 unsigned BitSize = NewC.Op0.getValueType().getSizeInBits();
1607 unsigned NewCCMask, ShiftVal;
1608 if (NewC.ICmpType != SystemZICMP::SignedOnly &&
1609 NewC.Op0.getOpcode() == ISD::SHL &&
1610 isSimpleShift(NewC.Op0, ShiftVal) &&
1611 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
1612 MaskVal >> ShiftVal,
1614 SystemZICMP::Any))) {
1615 NewC.Op0 = NewC.Op0.getOperand(0);
1616 MaskVal >>= ShiftVal;
1617 } else if (NewC.ICmpType != SystemZICMP::SignedOnly &&
1618 NewC.Op0.getOpcode() == ISD::SRL &&
1619 isSimpleShift(NewC.Op0, ShiftVal) &&
1620 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
1621 MaskVal << ShiftVal,
1623 SystemZICMP::UnsignedOnly))) {
1624 NewC.Op0 = NewC.Op0.getOperand(0);
1625 MaskVal <<= ShiftVal;
1627 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal,
1633 // Go ahead and make the change.
1634 C.Opcode = SystemZISD::TM;
1636 if (Mask && Mask->getZExtValue() == MaskVal)
1637 C.Op1 = SDValue(Mask, 0);
1639 C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType());
1640 C.CCValid = SystemZ::CCMASK_TM;
1641 C.CCMask = NewCCMask;
1644 // Return a Comparison that tests the condition-code result of intrinsic
1645 // node Call against constant integer CC using comparison code Cond.
1646 // Opcode is the opcode of the SystemZISD operation for the intrinsic
1647 // and CCValid is the set of possible condition-code results.
1648 static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode,
1649 SDValue Call, unsigned CCValid, uint64_t CC,
1650 ISD::CondCode Cond) {
1651 Comparison C(Call, SDValue());
1653 C.CCValid = CCValid;
1654 if (Cond == ISD::SETEQ)
1655 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3.
1656 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
1657 else if (Cond == ISD::SETNE)
1658 // ...and the inverse of that.
1659 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
1660 else if (Cond == ISD::SETLT || Cond == ISD::SETULT)
1661 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3,
1662 // always true for CC>3.
1663 C.CCMask = CC < 4 ? -1 << (4 - CC) : -1;
1664 else if (Cond == ISD::SETGE || Cond == ISD::SETUGE)
1665 // ...and the inverse of that.
1666 C.CCMask = CC < 4 ? ~(-1 << (4 - CC)) : 0;
1667 else if (Cond == ISD::SETLE || Cond == ISD::SETULE)
1668 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true),
1669 // always true for CC>3.
1670 C.CCMask = CC < 4 ? -1 << (3 - CC) : -1;
1671 else if (Cond == ISD::SETGT || Cond == ISD::SETUGT)
1672 // ...and the inverse of that.
1673 C.CCMask = CC < 4 ? ~(-1 << (3 - CC)) : 0;
1675 llvm_unreachable("Unexpected integer comparison type");
1676 C.CCMask &= CCValid;
1680 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
1681 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
1682 ISD::CondCode Cond, SDLoc DL) {
1683 if (CmpOp1.getOpcode() == ISD::Constant) {
1684 uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
1685 unsigned Opcode, CCValid;
1686 if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
1687 CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
1688 isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
1689 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
1691 Comparison C(CmpOp0, CmpOp1);
1692 C.CCMask = CCMaskForCondCode(Cond);
1693 if (C.Op0.getValueType().isFloatingPoint()) {
1694 C.CCValid = SystemZ::CCMASK_FCMP;
1695 C.Opcode = SystemZISD::FCMP;
1698 C.CCValid = SystemZ::CCMASK_ICMP;
1699 C.Opcode = SystemZISD::ICMP;
1700 // Choose the type of comparison. Equality and inequality tests can
1701 // use either signed or unsigned comparisons. The choice also doesn't
1702 // matter if both sign bits are known to be clear. In those cases we
1703 // want to give the main isel code the freedom to choose whichever
1705 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
1706 C.CCMask == SystemZ::CCMASK_CMP_NE ||
1707 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1)))
1708 C.ICmpType = SystemZICMP::Any;
1709 else if (C.CCMask & SystemZ::CCMASK_CMP_UO)
1710 C.ICmpType = SystemZICMP::UnsignedOnly;
1712 C.ICmpType = SystemZICMP::SignedOnly;
1713 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
1714 adjustZeroCmp(DAG, DL, C);
1715 adjustSubwordCmp(DAG, DL, C);
1716 adjustForSubtraction(DAG, DL, C);
1718 adjustICmpTruncate(DAG, DL, C);
1721 if (shouldSwapCmpOperands(C)) {
1722 std::swap(C.Op0, C.Op1);
1723 C.CCMask = reverseCCMask(C.CCMask);
1726 adjustForTestUnderMask(DAG, DL, C);
1730 // Emit the comparison instruction described by C.
1731 static SDValue emitCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
1732 if (!C.Op1.getNode()) {
1734 switch (C.Op0.getOpcode()) {
1735 case ISD::INTRINSIC_W_CHAIN:
1736 Op = emitIntrinsicWithChainAndGlue(DAG, C.Op0, C.Opcode);
1739 llvm_unreachable("Invalid comparison operands");
1741 return SDValue(Op.getNode(), Op->getNumValues() - 1);
1743 if (C.Opcode == SystemZISD::ICMP)
1744 return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1,
1745 DAG.getConstant(C.ICmpType, DL, MVT::i32));
1746 if (C.Opcode == SystemZISD::TM) {
1747 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
1748 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
1749 return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1,
1750 DAG.getConstant(RegisterOnly, DL, MVT::i32));
1752 return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1);
1755 // Implement a 32-bit *MUL_LOHI operation by extending both operands to
1756 // 64 bits. Extend is the extension type to use. Store the high part
1757 // in Hi and the low part in Lo.
1758 static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL,
1759 unsigned Extend, SDValue Op0, SDValue Op1,
1760 SDValue &Hi, SDValue &Lo) {
1761 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
1762 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
1763 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
1764 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul,
1765 DAG.getConstant(32, DL, MVT::i64));
1766 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
1767 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
1770 // Lower a binary operation that produces two VT results, one in each
1771 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
1772 // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation
1773 // on the extended Op0 and (unextended) Op1. Store the even register result
1774 // in Even and the odd register result in Odd.
1775 static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT,
1776 unsigned Extend, unsigned Opcode,
1777 SDValue Op0, SDValue Op1,
1778 SDValue &Even, SDValue &Odd) {
1779 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0);
1780 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped,
1781 SDValue(In128, 0), Op1);
1782 bool Is32Bit = is32Bit(VT);
1783 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
1784 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
1787 // Return an i32 value that is 1 if the CC value produced by Glue is
1788 // in the mask CCMask and 0 otherwise. CC is known to have a value
1789 // in CCValid, so other values can be ignored.
1790 static SDValue emitSETCC(SelectionDAG &DAG, SDLoc DL, SDValue Glue,
1791 unsigned CCValid, unsigned CCMask) {
1792 IPMConversion Conversion = getIPMConversion(CCValid, CCMask);
1793 SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
1795 if (Conversion.XORValue)
1796 Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result,
1797 DAG.getConstant(Conversion.XORValue, DL, MVT::i32));
1799 if (Conversion.AddValue)
1800 Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result,
1801 DAG.getConstant(Conversion.AddValue, DL, MVT::i32));
1803 // The SHR/AND sequence should get optimized to an RISBG.
1804 Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result,
1805 DAG.getConstant(Conversion.Bit, DL, MVT::i32));
1806 if (Conversion.Bit != 31)
1807 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
1808 DAG.getConstant(1, DL, MVT::i32));
1812 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
1813 SelectionDAG &DAG) const {
1814 SDValue CmpOp0 = Op.getOperand(0);
1815 SDValue CmpOp1 = Op.getOperand(1);
1816 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1819 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
1820 SDValue Glue = emitCmp(DAG, DL, C);
1821 return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask);
1824 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
1825 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
1826 SDValue CmpOp0 = Op.getOperand(2);
1827 SDValue CmpOp1 = Op.getOperand(3);
1828 SDValue Dest = Op.getOperand(4);
1831 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
1832 SDValue Glue = emitCmp(DAG, DL, C);
1833 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
1834 Op.getOperand(0), DAG.getConstant(C.CCValid, DL, MVT::i32),
1835 DAG.getConstant(C.CCMask, DL, MVT::i32), Dest, Glue);
1838 // Return true if Pos is CmpOp and Neg is the negative of CmpOp,
1839 // allowing Pos and Neg to be wider than CmpOp.
1840 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
1841 return (Neg.getOpcode() == ISD::SUB &&
1842 Neg.getOperand(0).getOpcode() == ISD::Constant &&
1843 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 &&
1844 Neg.getOperand(1) == Pos &&
1846 (Pos.getOpcode() == ISD::SIGN_EXTEND &&
1847 Pos.getOperand(0) == CmpOp)));
1850 // Return the absolute or negative absolute of Op; IsNegative decides which.
1851 static SDValue getAbsolute(SelectionDAG &DAG, SDLoc DL, SDValue Op,
1853 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
1855 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
1856 DAG.getConstant(0, DL, Op.getValueType()), Op);
1860 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
1861 SelectionDAG &DAG) const {
1862 SDValue CmpOp0 = Op.getOperand(0);
1863 SDValue CmpOp1 = Op.getOperand(1);
1864 SDValue TrueOp = Op.getOperand(2);
1865 SDValue FalseOp = Op.getOperand(3);
1866 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
1869 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
1871 // Check for absolute and negative-absolute selections, including those
1872 // where the comparison value is sign-extended (for LPGFR and LNGFR).
1873 // This check supplements the one in DAGCombiner.
1874 if (C.Opcode == SystemZISD::ICMP &&
1875 C.CCMask != SystemZ::CCMASK_CMP_EQ &&
1876 C.CCMask != SystemZ::CCMASK_CMP_NE &&
1877 C.Op1.getOpcode() == ISD::Constant &&
1878 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
1879 if (isAbsolute(C.Op0, TrueOp, FalseOp))
1880 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
1881 if (isAbsolute(C.Op0, FalseOp, TrueOp))
1882 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT);
1885 SDValue Glue = emitCmp(DAG, DL, C);
1887 // Special case for handling -1/0 results. The shifts we use here
1888 // should get optimized with the IPM conversion sequence.
1889 auto *TrueC = dyn_cast<ConstantSDNode>(TrueOp);
1890 auto *FalseC = dyn_cast<ConstantSDNode>(FalseOp);
1891 if (TrueC && FalseC) {
1892 int64_t TrueVal = TrueC->getSExtValue();
1893 int64_t FalseVal = FalseC->getSExtValue();
1894 if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) {
1895 // Invert the condition if we want -1 on false.
1897 C.CCMask ^= C.CCValid;
1898 SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask);
1899 EVT VT = Op.getValueType();
1900 // Extend the result to VT. Upper bits are ignored.
1902 Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result);
1903 // Sign-extend from the low bit.
1904 SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32);
1905 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt);
1906 return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt);
1910 SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, DL, MVT::i32),
1911 DAG.getConstant(C.CCMask, DL, MVT::i32), Glue};
1913 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
1914 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops);
1917 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
1918 SelectionDAG &DAG) const {
1920 const GlobalValue *GV = Node->getGlobal();
1921 int64_t Offset = Node->getOffset();
1922 EVT PtrVT = getPointerTy();
1923 Reloc::Model RM = DAG.getTarget().getRelocationModel();
1924 CodeModel::Model CM = DAG.getTarget().getCodeModel();
1927 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) {
1928 // Assign anchors at 1<<12 byte boundaries.
1929 uint64_t Anchor = Offset & ~uint64_t(0xfff);
1930 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
1931 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1933 // The offset can be folded into the address if it is aligned to a halfword.
1935 if (Offset != 0 && (Offset & 1) == 0) {
1936 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
1937 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
1941 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
1942 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1943 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
1944 MachinePointerInfo::getGOT(), false, false, false, 0);
1947 // If there was a non-zero offset that we didn't fold, create an explicit
1950 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
1951 DAG.getConstant(Offset, DL, PtrVT));
1956 SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node,
1959 SDValue GOTOffset) const {
1961 EVT PtrVT = getPointerTy();
1962 SDValue Chain = DAG.getEntryNode();
1965 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
1966 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
1967 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue);
1968 Glue = Chain.getValue(1);
1969 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue);
1970 Glue = Chain.getValue(1);
1972 // The first call operand is the chain and the second is the TLS symbol.
1973 SmallVector<SDValue, 8> Ops;
1974 Ops.push_back(Chain);
1975 Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL,
1976 Node->getValueType(0),
1979 // Add argument registers to the end of the list so that they are
1980 // known live into the call.
1981 Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT));
1982 Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT));
1984 // Add a register mask operand representing the call-preserved registers.
1985 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1986 const uint32_t *Mask =
1987 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
1988 assert(Mask && "Missing call preserved mask for calling convention");
1989 Ops.push_back(DAG.getRegisterMask(Mask));
1991 // Glue the call to the argument copies.
1992 Ops.push_back(Glue);
1995 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1996 Chain = DAG.getNode(Opcode, DL, NodeTys, Ops);
1997 Glue = Chain.getValue(1);
1999 // Copy the return value from %r2.
2000 return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue);
2003 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
2004 SelectionDAG &DAG) const {
2006 const GlobalValue *GV = Node->getGlobal();
2007 EVT PtrVT = getPointerTy();
2008 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
2010 // The high part of the thread pointer is in access register 0.
2011 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
2012 DAG.getConstant(0, DL, MVT::i32));
2013 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
2015 // The low part of the thread pointer is in access register 1.
2016 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
2017 DAG.getConstant(1, DL, MVT::i32));
2018 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
2020 // Merge them into a single 64-bit address.
2021 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
2022 DAG.getConstant(32, DL, PtrVT));
2023 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
2025 // Get the offset of GA from the thread pointer, based on the TLS model.
2028 case TLSModel::GeneralDynamic: {
2029 // Load the GOT offset of the tls_index (module ID / per-symbol offset).
2030 SystemZConstantPoolValue *CPV =
2031 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD);
2033 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2034 Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
2035 Offset, MachinePointerInfo::getConstantPool(),
2036 false, false, false, 0);
2038 // Call __tls_get_offset to retrieve the offset.
2039 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset);
2043 case TLSModel::LocalDynamic: {
2044 // Load the GOT offset of the module ID.
2045 SystemZConstantPoolValue *CPV =
2046 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM);
2048 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2049 Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
2050 Offset, MachinePointerInfo::getConstantPool(),
2051 false, false, false, 0);
2053 // Call __tls_get_offset to retrieve the module base offset.
2054 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset);
2056 // Note: The SystemZLDCleanupPass will remove redundant computations
2057 // of the module base offset. Count total number of local-dynamic
2058 // accesses to trigger execution of that pass.
2059 SystemZMachineFunctionInfo* MFI =
2060 DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>();
2061 MFI->incNumLocalDynamicTLSAccesses();
2063 // Add the per-symbol offset.
2064 CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF);
2066 SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8);
2067 DTPOffset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
2068 DTPOffset, MachinePointerInfo::getConstantPool(),
2069 false, false, false, 0);
2071 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset);
2075 case TLSModel::InitialExec: {
2076 // Load the offset from the GOT.
2077 Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2078 SystemZII::MO_INDNTPOFF);
2079 Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset);
2080 Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
2081 Offset, MachinePointerInfo::getGOT(),
2082 false, false, false, 0);
2086 case TLSModel::LocalExec: {
2087 // Force the offset into the constant pool and load it from there.
2088 SystemZConstantPoolValue *CPV =
2089 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
2091 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2092 Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
2093 Offset, MachinePointerInfo::getConstantPool(),
2094 false, false, false, 0);
2099 // Add the base and offset together.
2100 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
2103 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
2104 SelectionDAG &DAG) const {
2106 const BlockAddress *BA = Node->getBlockAddress();
2107 int64_t Offset = Node->getOffset();
2108 EVT PtrVT = getPointerTy();
2110 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
2111 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2115 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
2116 SelectionDAG &DAG) const {
2118 EVT PtrVT = getPointerTy();
2119 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2121 // Use LARL to load the address of the table.
2122 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2125 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
2126 SelectionDAG &DAG) const {
2128 EVT PtrVT = getPointerTy();
2131 if (CP->isMachineConstantPoolEntry())
2132 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
2133 CP->getAlignment());
2135 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
2136 CP->getAlignment(), CP->getOffset());
2138 // Use LARL to load the address of the constant pool entry.
2139 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2142 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
2143 SelectionDAG &DAG) const {
2145 SDValue In = Op.getOperand(0);
2146 EVT InVT = In.getValueType();
2147 EVT ResVT = Op.getValueType();
2149 if (InVT == MVT::i32 && ResVT == MVT::f32) {
2151 if (Subtarget.hasHighWord()) {
2152 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
2154 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
2155 MVT::i64, SDValue(U64, 0), In);
2157 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
2158 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
2159 DAG.getConstant(32, DL, MVT::i64));
2161 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
2162 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32,
2163 DL, MVT::f32, Out64);
2165 if (InVT == MVT::f32 && ResVT == MVT::i32) {
2166 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
2167 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
2168 MVT::f64, SDValue(U64, 0), In);
2169 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
2170 if (Subtarget.hasHighWord())
2171 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
2173 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
2174 DAG.getConstant(32, DL, MVT::i64));
2175 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
2177 llvm_unreachable("Unexpected bitcast combination");
2180 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
2181 SelectionDAG &DAG) const {
2182 MachineFunction &MF = DAG.getMachineFunction();
2183 SystemZMachineFunctionInfo *FuncInfo =
2184 MF.getInfo<SystemZMachineFunctionInfo>();
2185 EVT PtrVT = getPointerTy();
2187 SDValue Chain = Op.getOperand(0);
2188 SDValue Addr = Op.getOperand(1);
2189 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2192 // The initial values of each field.
2193 const unsigned NumFields = 4;
2194 SDValue Fields[NumFields] = {
2195 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT),
2196 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT),
2197 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
2198 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
2201 // Store each field into its respective slot.
2202 SDValue MemOps[NumFields];
2203 unsigned Offset = 0;
2204 for (unsigned I = 0; I < NumFields; ++I) {
2205 SDValue FieldAddr = Addr;
2207 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
2208 DAG.getIntPtrConstant(Offset, DL));
2209 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
2210 MachinePointerInfo(SV, Offset),
2214 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
2217 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
2218 SelectionDAG &DAG) const {
2219 SDValue Chain = Op.getOperand(0);
2220 SDValue DstPtr = Op.getOperand(1);
2221 SDValue SrcPtr = Op.getOperand(2);
2222 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
2223 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
2226 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL),
2227 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
2228 /*isTailCall*/false,
2229 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
2232 SDValue SystemZTargetLowering::
2233 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
2234 SDValue Chain = Op.getOperand(0);
2235 SDValue Size = Op.getOperand(1);
2238 unsigned SPReg = getStackPointerRegisterToSaveRestore();
2240 // Get a reference to the stack pointer.
2241 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
2243 // Get the new stack pointer value.
2244 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size);
2246 // Copy the new stack pointer back.
2247 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);
2249 // The allocated data lives above the 160 bytes allocated for the standard
2250 // frame, plus any outgoing stack arguments. We don't know how much that
2251 // amounts to yet, so emit a special ADJDYNALLOC placeholder.
2252 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
2253 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
2255 SDValue Ops[2] = { Result, Chain };
2256 return DAG.getMergeValues(Ops, DL);
2259 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
2260 SelectionDAG &DAG) const {
2261 EVT VT = Op.getValueType();
2265 // Just do a normal 64-bit multiplication and extract the results.
2266 // We define this so that it can be used for constant division.
2267 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
2268 Op.getOperand(1), Ops[1], Ops[0]);
2270 // Do a full 128-bit multiplication based on UMUL_LOHI64:
2272 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
2274 // but using the fact that the upper halves are either all zeros
2277 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
2279 // and grouping the right terms together since they are quicker than the
2282 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
2283 SDValue C63 = DAG.getConstant(63, DL, MVT::i64);
2284 SDValue LL = Op.getOperand(0);
2285 SDValue RL = Op.getOperand(1);
2286 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
2287 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
2288 // UMUL_LOHI64 returns the low result in the odd register and the high
2289 // result in the even register. SMUL_LOHI is defined to return the
2290 // low half first, so the results are in reverse order.
2291 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
2292 LL, RL, Ops[1], Ops[0]);
2293 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
2294 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
2295 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
2296 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
2298 return DAG.getMergeValues(Ops, DL);
2301 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
2302 SelectionDAG &DAG) const {
2303 EVT VT = Op.getValueType();
2307 // Just do a normal 64-bit multiplication and extract the results.
2308 // We define this so that it can be used for constant division.
2309 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
2310 Op.getOperand(1), Ops[1], Ops[0]);
2312 // UMUL_LOHI64 returns the low result in the odd register and the high
2313 // result in the even register. UMUL_LOHI is defined to return the
2314 // low half first, so the results are in reverse order.
2315 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
2316 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
2317 return DAG.getMergeValues(Ops, DL);
2320 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
2321 SelectionDAG &DAG) const {
2322 SDValue Op0 = Op.getOperand(0);
2323 SDValue Op1 = Op.getOperand(1);
2324 EVT VT = Op.getValueType();
2328 // We use DSGF for 32-bit division.
2330 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
2331 Opcode = SystemZISD::SDIVREM32;
2332 } else if (DAG.ComputeNumSignBits(Op1) > 32) {
2333 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
2334 Opcode = SystemZISD::SDIVREM32;
2336 Opcode = SystemZISD::SDIVREM64;
2338 // DSG(F) takes a 64-bit dividend, so the even register in the GR128
2339 // input is "don't care". The instruction returns the remainder in
2340 // the even register and the quotient in the odd register.
2342 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode,
2343 Op0, Op1, Ops[1], Ops[0]);
2344 return DAG.getMergeValues(Ops, DL);
2347 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
2348 SelectionDAG &DAG) const {
2349 EVT VT = Op.getValueType();
2352 // DL(G) uses a double-width dividend, so we need to clear the even
2353 // register in the GR128 input. The instruction returns the remainder
2354 // in the even register and the quotient in the odd register.
2357 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32,
2358 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
2360 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64,
2361 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
2362 return DAG.getMergeValues(Ops, DL);
2365 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
2366 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation");
2368 // Get the known-zero masks for each operand.
2369 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
2370 APInt KnownZero[2], KnownOne[2];
2371 DAG.computeKnownBits(Ops[0], KnownZero[0], KnownOne[0]);
2372 DAG.computeKnownBits(Ops[1], KnownZero[1], KnownOne[1]);
2374 // See if the upper 32 bits of one operand and the lower 32 bits of the
2375 // other are known zero. They are the low and high operands respectively.
2376 uint64_t Masks[] = { KnownZero[0].getZExtValue(),
2377 KnownZero[1].getZExtValue() };
2379 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
2381 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
2386 SDValue LowOp = Ops[Low];
2387 SDValue HighOp = Ops[High];
2389 // If the high part is a constant, we're better off using IILH.
2390 if (HighOp.getOpcode() == ISD::Constant)
2393 // If the low part is a constant that is outside the range of LHI,
2394 // then we're better off using IILF.
2395 if (LowOp.getOpcode() == ISD::Constant) {
2396 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
2397 if (!isInt<16>(Value))
2401 // Check whether the high part is an AND that doesn't change the
2402 // high 32 bits and just masks out low bits. We can skip it if so.
2403 if (HighOp.getOpcode() == ISD::AND &&
2404 HighOp.getOperand(1).getOpcode() == ISD::Constant) {
2405 SDValue HighOp0 = HighOp.getOperand(0);
2406 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
2407 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
2411 // Take advantage of the fact that all GR32 operations only change the
2412 // low 32 bits by truncating Low to an i32 and inserting it directly
2413 // using a subreg. The interesting cases are those where the truncation
2416 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
2417 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
2418 MVT::i64, HighOp, Low32);
2421 SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
2422 SelectionDAG &DAG) const {
2423 EVT VT = Op.getValueType();
2424 int64_t OrigBitSize = VT.getSizeInBits();
2427 // Get the known-zero mask for the operand.
2428 Op = Op.getOperand(0);
2429 APInt KnownZero, KnownOne;
2430 DAG.computeKnownBits(Op, KnownZero, KnownOne);
2431 unsigned NumSignificantBits = (~KnownZero).getActiveBits();
2432 if (NumSignificantBits == 0)
2433 return DAG.getConstant(0, DL, VT);
2435 // Skip known-zero high parts of the operand.
2436 int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits);
2437 BitSize = std::min(BitSize, OrigBitSize);
2439 // The POPCNT instruction counts the number of bits in each byte.
2440 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op);
2441 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op);
2442 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
2444 // Add up per-byte counts in a binary tree. All bits of Op at
2445 // position larger than BitSize remain zero throughout.
2446 for (int64_t I = BitSize / 2; I >= 8; I = I / 2) {
2447 SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT));
2448 if (BitSize != OrigBitSize)
2449 Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp,
2450 DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT));
2451 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
2454 // Extract overall result from high byte.
2456 Op = DAG.getNode(ISD::SRL, DL, VT, Op,
2457 DAG.getConstant(BitSize - 8, DL, VT));
2462 // Op is an atomic load. Lower it into a normal volatile load.
2463 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
2464 SelectionDAG &DAG) const {
2465 auto *Node = cast<AtomicSDNode>(Op.getNode());
2466 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(),
2467 Node->getChain(), Node->getBasePtr(),
2468 Node->getMemoryVT(), Node->getMemOperand());
2471 // Op is an atomic store. Lower it into a normal volatile store followed
2472 // by a serialization.
2473 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op,
2474 SelectionDAG &DAG) const {
2475 auto *Node = cast<AtomicSDNode>(Op.getNode());
2476 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(),
2477 Node->getBasePtr(), Node->getMemoryVT(),
2478 Node->getMemOperand());
2479 return SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), MVT::Other,
2483 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
2484 // two into the fullword ATOMIC_LOADW_* operation given by Opcode.
2485 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
2487 unsigned Opcode) const {
2488 auto *Node = cast<AtomicSDNode>(Op.getNode());
2490 // 32-bit operations need no code outside the main loop.
2491 EVT NarrowVT = Node->getMemoryVT();
2492 EVT WideVT = MVT::i32;
2493 if (NarrowVT == WideVT)
2496 int64_t BitSize = NarrowVT.getSizeInBits();
2497 SDValue ChainIn = Node->getChain();
2498 SDValue Addr = Node->getBasePtr();
2499 SDValue Src2 = Node->getVal();
2500 MachineMemOperand *MMO = Node->getMemOperand();
2502 EVT PtrVT = Addr.getValueType();
2504 // Convert atomic subtracts of constants into additions.
2505 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
2506 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
2507 Opcode = SystemZISD::ATOMIC_LOADW_ADD;
2508 Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType());
2511 // Get the address of the containing word.
2512 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
2513 DAG.getConstant(-4, DL, PtrVT));
2515 // Get the number of bits that the word must be rotated left in order
2516 // to bring the field to the top bits of a GR32.
2517 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
2518 DAG.getConstant(3, DL, PtrVT));
2519 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
2521 // Get the complementing shift amount, for rotating a field in the top
2522 // bits back to its proper position.
2523 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
2524 DAG.getConstant(0, DL, WideVT), BitShift);
2526 // Extend the source operand to 32 bits and prepare it for the inner loop.
2527 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
2528 // operations require the source to be shifted in advance. (This shift
2529 // can be folded if the source is constant.) For AND and NAND, the lower
2530 // bits must be set, while for other opcodes they should be left clear.
2531 if (Opcode != SystemZISD::ATOMIC_SWAPW)
2532 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
2533 DAG.getConstant(32 - BitSize, DL, WideVT));
2534 if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
2535 Opcode == SystemZISD::ATOMIC_LOADW_NAND)
2536 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
2537 DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT));
2539 // Construct the ATOMIC_LOADW_* node.
2540 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
2541 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
2542 DAG.getConstant(BitSize, DL, WideVT) };
2543 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
2546 // Rotate the result of the final CS so that the field is in the lower
2547 // bits of a GR32, then truncate it.
2548 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
2549 DAG.getConstant(BitSize, DL, WideVT));
2550 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
2552 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
2553 return DAG.getMergeValues(RetOps, DL);
2556 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
2557 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
2558 // operations into additions.
2559 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
2560 SelectionDAG &DAG) const {
2561 auto *Node = cast<AtomicSDNode>(Op.getNode());
2562 EVT MemVT = Node->getMemoryVT();
2563 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
2564 // A full-width operation.
2565 assert(Op.getValueType() == MemVT && "Mismatched VTs");
2566 SDValue Src2 = Node->getVal();
2570 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
2571 // Use an addition if the operand is constant and either LAA(G) is
2572 // available or the negative value is in the range of A(G)FHI.
2573 int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
2574 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
2575 NegSrc2 = DAG.getConstant(Value, DL, MemVT);
2576 } else if (Subtarget.hasInterlockedAccess1())
2577 // Use LAA(G) if available.
2578 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT),
2581 if (NegSrc2.getNode())
2582 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT,
2583 Node->getChain(), Node->getBasePtr(), NegSrc2,
2584 Node->getMemOperand(), Node->getOrdering(),
2585 Node->getSynchScope());
2587 // Use the node as-is.
2591 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
2594 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two
2595 // into a fullword ATOMIC_CMP_SWAPW operation.
2596 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
2597 SelectionDAG &DAG) const {
2598 auto *Node = cast<AtomicSDNode>(Op.getNode());
2600 // We have native support for 32-bit compare and swap.
2601 EVT NarrowVT = Node->getMemoryVT();
2602 EVT WideVT = MVT::i32;
2603 if (NarrowVT == WideVT)
2606 int64_t BitSize = NarrowVT.getSizeInBits();
2607 SDValue ChainIn = Node->getOperand(0);
2608 SDValue Addr = Node->getOperand(1);
2609 SDValue CmpVal = Node->getOperand(2);
2610 SDValue SwapVal = Node->getOperand(3);
2611 MachineMemOperand *MMO = Node->getMemOperand();
2613 EVT PtrVT = Addr.getValueType();
2615 // Get the address of the containing word.
2616 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
2617 DAG.getConstant(-4, DL, PtrVT));
2619 // Get the number of bits that the word must be rotated left in order
2620 // to bring the field to the top bits of a GR32.
2621 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
2622 DAG.getConstant(3, DL, PtrVT));
2623 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
2625 // Get the complementing shift amount, for rotating a field in the top
2626 // bits back to its proper position.
2627 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
2628 DAG.getConstant(0, DL, WideVT), BitShift);
2630 // Construct the ATOMIC_CMP_SWAPW node.
2631 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
2632 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
2633 NegBitShift, DAG.getConstant(BitSize, DL, WideVT) };
2634 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
2635 VTList, Ops, NarrowVT, MMO);
2639 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
2640 SelectionDAG &DAG) const {
2641 MachineFunction &MF = DAG.getMachineFunction();
2642 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
2643 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
2644 SystemZ::R15D, Op.getValueType());
2647 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
2648 SelectionDAG &DAG) const {
2649 MachineFunction &MF = DAG.getMachineFunction();
2650 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
2651 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op),
2652 SystemZ::R15D, Op.getOperand(1));
2655 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
2656 SelectionDAG &DAG) const {
2657 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
2659 // Just preserve the chain.
2660 return Op.getOperand(0);
2663 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
2664 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
2665 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
2668 DAG.getConstant(Code, DL, MVT::i32),
2671 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL,
2672 Node->getVTList(), Ops,
2673 Node->getMemoryVT(), Node->getMemOperand());
2676 // Return an i32 that contains the value of CC immediately after After,
2677 // whose final operand must be MVT::Glue.
2678 static SDValue getCCResult(SelectionDAG &DAG, SDNode *After) {
2680 SDValue Glue = SDValue(After, After->getNumValues() - 1);
2681 SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
2682 return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
2683 DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
2687 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
2688 SelectionDAG &DAG) const {
2689 unsigned Opcode, CCValid;
2690 if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) {
2691 assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
2692 SDValue Glued = emitIntrinsicWithChainAndGlue(DAG, Op, Opcode);
2693 SDValue CC = getCCResult(DAG, Glued.getNode());
2694 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC);
2701 SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
2702 SelectionDAG &DAG) const {
2703 switch (Op.getOpcode()) {
2705 return lowerBR_CC(Op, DAG);
2706 case ISD::SELECT_CC:
2707 return lowerSELECT_CC(Op, DAG);
2709 return lowerSETCC(Op, DAG);
2710 case ISD::GlobalAddress:
2711 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
2712 case ISD::GlobalTLSAddress:
2713 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
2714 case ISD::BlockAddress:
2715 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
2716 case ISD::JumpTable:
2717 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
2718 case ISD::ConstantPool:
2719 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
2721 return lowerBITCAST(Op, DAG);
2723 return lowerVASTART(Op, DAG);
2725 return lowerVACOPY(Op, DAG);
2726 case ISD::DYNAMIC_STACKALLOC:
2727 return lowerDYNAMIC_STACKALLOC(Op, DAG);
2728 case ISD::SMUL_LOHI:
2729 return lowerSMUL_LOHI(Op, DAG);
2730 case ISD::UMUL_LOHI:
2731 return lowerUMUL_LOHI(Op, DAG);
2733 return lowerSDIVREM(Op, DAG);
2735 return lowerUDIVREM(Op, DAG);
2737 return lowerOR(Op, DAG);
2739 return lowerCTPOP(Op, DAG);
2740 case ISD::ATOMIC_SWAP:
2741 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW);
2742 case ISD::ATOMIC_STORE:
2743 return lowerATOMIC_STORE(Op, DAG);
2744 case ISD::ATOMIC_LOAD:
2745 return lowerATOMIC_LOAD(Op, DAG);
2746 case ISD::ATOMIC_LOAD_ADD:
2747 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
2748 case ISD::ATOMIC_LOAD_SUB:
2749 return lowerATOMIC_LOAD_SUB(Op, DAG);
2750 case ISD::ATOMIC_LOAD_AND:
2751 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
2752 case ISD::ATOMIC_LOAD_OR:
2753 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
2754 case ISD::ATOMIC_LOAD_XOR:
2755 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
2756 case ISD::ATOMIC_LOAD_NAND:
2757 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
2758 case ISD::ATOMIC_LOAD_MIN:
2759 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
2760 case ISD::ATOMIC_LOAD_MAX:
2761 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
2762 case ISD::ATOMIC_LOAD_UMIN:
2763 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
2764 case ISD::ATOMIC_LOAD_UMAX:
2765 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
2766 case ISD::ATOMIC_CMP_SWAP:
2767 return lowerATOMIC_CMP_SWAP(Op, DAG);
2768 case ISD::STACKSAVE:
2769 return lowerSTACKSAVE(Op, DAG);
2770 case ISD::STACKRESTORE:
2771 return lowerSTACKRESTORE(Op, DAG);
2773 return lowerPREFETCH(Op, DAG);
2774 case ISD::INTRINSIC_W_CHAIN:
2775 return lowerINTRINSIC_W_CHAIN(Op, DAG);
2777 llvm_unreachable("Unexpected node to lower");
2781 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
2782 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
2787 OPCODE(PCREL_WRAPPER);
2788 OPCODE(PCREL_OFFSET);
2794 OPCODE(SELECT_CCMASK);
2795 OPCODE(ADJDYNALLOC);
2796 OPCODE(EXTRACT_ACCESS);
2797 OPCODE(UMUL_LOHI64);
2813 OPCODE(SEARCH_STRING);
2817 OPCODE(TBEGIN_NOFLOAT);
2819 OPCODE(ATOMIC_SWAPW);
2820 OPCODE(ATOMIC_LOADW_ADD);
2821 OPCODE(ATOMIC_LOADW_SUB);
2822 OPCODE(ATOMIC_LOADW_AND);
2823 OPCODE(ATOMIC_LOADW_OR);
2824 OPCODE(ATOMIC_LOADW_XOR);
2825 OPCODE(ATOMIC_LOADW_NAND);
2826 OPCODE(ATOMIC_LOADW_MIN);
2827 OPCODE(ATOMIC_LOADW_MAX);
2828 OPCODE(ATOMIC_LOADW_UMIN);
2829 OPCODE(ATOMIC_LOADW_UMAX);
2830 OPCODE(ATOMIC_CMP_SWAPW);
2837 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
2838 DAGCombinerInfo &DCI) const {
2839 SelectionDAG &DAG = DCI.DAG;
2840 unsigned Opcode = N->getOpcode();
2841 if (Opcode == ISD::SIGN_EXTEND) {
2842 // Convert (sext (ashr (shl X, C1), C2)) to
2843 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
2844 // cheap as narrower ones.
2845 SDValue N0 = N->getOperand(0);
2846 EVT VT = N->getValueType(0);
2847 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
2848 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
2849 SDValue Inner = N0.getOperand(0);
2850 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
2851 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
2852 unsigned Extra = (VT.getSizeInBits() -
2853 N0.getValueType().getSizeInBits());
2854 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
2855 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
2856 EVT ShiftVT = N0.getOperand(1).getValueType();
2857 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
2858 Inner.getOperand(0));
2859 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
2860 DAG.getConstant(NewShlAmt, SDLoc(Inner),
2862 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
2863 DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT));
2871 //===----------------------------------------------------------------------===//
2873 //===----------------------------------------------------------------------===//
2875 // Create a new basic block after MBB.
2876 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) {
2877 MachineFunction &MF = *MBB->getParent();
2878 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
2879 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
2883 // Split MBB after MI and return the new block (the one that contains
2884 // instructions after MI).
2885 static MachineBasicBlock *splitBlockAfter(MachineInstr *MI,
2886 MachineBasicBlock *MBB) {
2887 MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
2888 NewMBB->splice(NewMBB->begin(), MBB,
2889 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
2890 NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
2894 // Split MBB before MI and return the new block (the one that contains MI).
2895 static MachineBasicBlock *splitBlockBefore(MachineInstr *MI,
2896 MachineBasicBlock *MBB) {
2897 MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
2898 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
2899 NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
2903 // Force base value Base into a register before MI. Return the register.
2904 static unsigned forceReg(MachineInstr *MI, MachineOperand &Base,
2905 const SystemZInstrInfo *TII) {
2907 return Base.getReg();
2909 MachineBasicBlock *MBB = MI->getParent();
2910 MachineFunction &MF = *MBB->getParent();
2911 MachineRegisterInfo &MRI = MF.getRegInfo();
2913 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
2914 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg)
2915 .addOperand(Base).addImm(0).addReg(0);
2919 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
2921 SystemZTargetLowering::emitSelect(MachineInstr *MI,
2922 MachineBasicBlock *MBB) const {
2923 const SystemZInstrInfo *TII =
2924 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
2926 unsigned DestReg = MI->getOperand(0).getReg();
2927 unsigned TrueReg = MI->getOperand(1).getReg();
2928 unsigned FalseReg = MI->getOperand(2).getReg();
2929 unsigned CCValid = MI->getOperand(3).getImm();
2930 unsigned CCMask = MI->getOperand(4).getImm();
2931 DebugLoc DL = MI->getDebugLoc();
2933 MachineBasicBlock *StartMBB = MBB;
2934 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
2935 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
2938 // BRC CCMask, JoinMBB
2939 // # fallthrough to FalseMBB
2941 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2942 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
2943 MBB->addSuccessor(JoinMBB);
2944 MBB->addSuccessor(FalseMBB);
2947 // # fallthrough to JoinMBB
2949 MBB->addSuccessor(JoinMBB);
2952 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
2955 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg)
2956 .addReg(TrueReg).addMBB(StartMBB)
2957 .addReg(FalseReg).addMBB(FalseMBB);
2959 MI->eraseFromParent();
2963 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
2964 // StoreOpcode is the store to use and Invert says whether the store should
2965 // happen when the condition is false rather than true. If a STORE ON
2966 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
2968 SystemZTargetLowering::emitCondStore(MachineInstr *MI,
2969 MachineBasicBlock *MBB,
2970 unsigned StoreOpcode, unsigned STOCOpcode,
2971 bool Invert) const {
2972 const SystemZInstrInfo *TII =
2973 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
2975 unsigned SrcReg = MI->getOperand(0).getReg();
2976 MachineOperand Base = MI->getOperand(1);
2977 int64_t Disp = MI->getOperand(2).getImm();
2978 unsigned IndexReg = MI->getOperand(3).getReg();
2979 unsigned CCValid = MI->getOperand(4).getImm();
2980 unsigned CCMask = MI->getOperand(5).getImm();
2981 DebugLoc DL = MI->getDebugLoc();
2983 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
2985 // Use STOCOpcode if possible. We could use different store patterns in
2986 // order to avoid matching the index register, but the performance trade-offs
2987 // might be more complicated in that case.
2988 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
2991 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
2992 .addReg(SrcReg).addOperand(Base).addImm(Disp)
2993 .addImm(CCValid).addImm(CCMask);
2994 MI->eraseFromParent();
2998 // Get the condition needed to branch around the store.
3002 MachineBasicBlock *StartMBB = MBB;
3003 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
3004 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
3007 // BRC CCMask, JoinMBB
3008 // # fallthrough to FalseMBB
3010 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
3011 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
3012 MBB->addSuccessor(JoinMBB);
3013 MBB->addSuccessor(FalseMBB);
3016 // store %SrcReg, %Disp(%Index,%Base)
3017 // # fallthrough to JoinMBB
3019 BuildMI(MBB, DL, TII->get(StoreOpcode))
3020 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg);
3021 MBB->addSuccessor(JoinMBB);
3023 MI->eraseFromParent();
3027 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
3028 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
3029 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
3030 // BitSize is the width of the field in bits, or 0 if this is a partword
3031 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
3032 // is one of the operands. Invert says whether the field should be
3033 // inverted after performing BinOpcode (e.g. for NAND).
3035 SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
3036 MachineBasicBlock *MBB,
3039 bool Invert) const {
3040 MachineFunction &MF = *MBB->getParent();
3041 const SystemZInstrInfo *TII =
3042 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
3043 MachineRegisterInfo &MRI = MF.getRegInfo();
3044 bool IsSubWord = (BitSize < 32);
3046 // Extract the operands. Base can be a register or a frame index.
3047 // Src2 can be a register or immediate.
3048 unsigned Dest = MI->getOperand(0).getReg();
3049 MachineOperand Base = earlyUseOperand(MI->getOperand(1));
3050 int64_t Disp = MI->getOperand(2).getImm();
3051 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3));
3052 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0);
3053 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0);
3054 DebugLoc DL = MI->getDebugLoc();
3056 BitSize = MI->getOperand(6).getImm();
3058 // Subword operations use 32-bit registers.
3059 const TargetRegisterClass *RC = (BitSize <= 32 ?
3060 &SystemZ::GR32BitRegClass :
3061 &SystemZ::GR64BitRegClass);
3062 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
3063 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
3065 // Get the right opcodes for the displacement.
3066 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
3067 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
3068 assert(LOpcode && CSOpcode && "Displacement out of range");
3070 // Create virtual registers for temporary results.
3071 unsigned OrigVal = MRI.createVirtualRegister(RC);
3072 unsigned OldVal = MRI.createVirtualRegister(RC);
3073 unsigned NewVal = (BinOpcode || IsSubWord ?
3074 MRI.createVirtualRegister(RC) : Src2.getReg());
3075 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
3076 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
3078 // Insert a basic block for the main loop.
3079 MachineBasicBlock *StartMBB = MBB;
3080 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
3081 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
3085 // %OrigVal = L Disp(%Base)
3086 // # fall through to LoopMMB
3088 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
3089 .addOperand(Base).addImm(Disp).addReg(0);
3090 MBB->addSuccessor(LoopMBB);
3093 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
3094 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
3095 // %RotatedNewVal = OP %RotatedOldVal, %Src2
3096 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
3097 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
3099 // # fall through to DoneMMB
3101 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
3102 .addReg(OrigVal).addMBB(StartMBB)
3103 .addReg(Dest).addMBB(LoopMBB);
3105 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
3106 .addReg(OldVal).addReg(BitShift).addImm(0);
3108 // Perform the operation normally and then invert every bit of the field.
3109 unsigned Tmp = MRI.createVirtualRegister(RC);
3110 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp)
3111 .addReg(RotatedOldVal).addOperand(Src2);
3113 // XILF with the upper BitSize bits set.
3114 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
3115 .addReg(Tmp).addImm(-1U << (32 - BitSize));
3117 // Use LCGR and add -1 to the result, which is more compact than
3118 // an XILF, XILH pair.
3119 unsigned Tmp2 = MRI.createVirtualRegister(RC);
3120 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);
3121 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)
3122 .addReg(Tmp2).addImm(-1);
3124 } else if (BinOpcode)
3125 // A simply binary operation.
3126 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal)
3127 .addReg(RotatedOldVal).addOperand(Src2);
3129 // Use RISBG to rotate Src2 into position and use it to replace the
3130 // field in RotatedOldVal.
3131 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal)
3132 .addReg(RotatedOldVal).addReg(Src2.getReg())
3133 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize);
3135 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
3136 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
3137 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
3138 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
3139 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
3140 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
3141 MBB->addSuccessor(LoopMBB);
3142 MBB->addSuccessor(DoneMBB);
3144 MI->eraseFromParent();
3148 // Implement EmitInstrWithCustomInserter for pseudo
3149 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the
3150 // instruction that should be used to compare the current field with the
3151 // minimum or maximum value. KeepOldMask is the BRC condition-code mask
3152 // for when the current field should be kept. BitSize is the width of
3153 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
3155 SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
3156 MachineBasicBlock *MBB,
3157 unsigned CompareOpcode,
3158 unsigned KeepOldMask,
3159 unsigned BitSize) const {
3160 MachineFunction &MF = *MBB->getParent();
3161 const SystemZInstrInfo *TII =
3162 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
3163 MachineRegisterInfo &MRI = MF.getRegInfo();
3164 bool IsSubWord = (BitSize < 32);
3166 // Extract the operands. Base can be a register or a frame index.
3167 unsigned Dest = MI->getOperand(0).getReg();
3168 MachineOperand Base = earlyUseOperand(MI->getOperand(1));
3169 int64_t Disp = MI->getOperand(2).getImm();
3170 unsigned Src2 = MI->getOperand(3).getReg();
3171 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0);
3172 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0);
3173 DebugLoc DL = MI->getDebugLoc();
3175 BitSize = MI->getOperand(6).getImm();
3177 // Subword operations use 32-bit registers.
3178 const TargetRegisterClass *RC = (BitSize <= 32 ?
3179 &SystemZ::GR32BitRegClass :
3180 &SystemZ::GR64BitRegClass);
3181 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
3182 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
3184 // Get the right opcodes for the displacement.
3185 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
3186 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
3187 assert(LOpcode && CSOpcode && "Displacement out of range");
3189 // Create virtual registers for temporary results.
3190 unsigned OrigVal = MRI.createVirtualRegister(RC);
3191 unsigned OldVal = MRI.createVirtualRegister(RC);
3192 unsigned NewVal = MRI.createVirtualRegister(RC);
3193 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
3194 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
3195 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
3197 // Insert 3 basic blocks for the loop.
3198 MachineBasicBlock *StartMBB = MBB;
3199 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
3200 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
3201 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB);
3202 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB);
3206 // %OrigVal = L Disp(%Base)
3207 // # fall through to LoopMMB
3209 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
3210 .addOperand(Base).addImm(Disp).addReg(0);
3211 MBB->addSuccessor(LoopMBB);
3214 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
3215 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
3216 // CompareOpcode %RotatedOldVal, %Src2
3217 // BRC KeepOldMask, UpdateMBB
3219 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
3220 .addReg(OrigVal).addMBB(StartMBB)
3221 .addReg(Dest).addMBB(UpdateMBB);
3223 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
3224 .addReg(OldVal).addReg(BitShift).addImm(0);
3225 BuildMI(MBB, DL, TII->get(CompareOpcode))
3226 .addReg(RotatedOldVal).addReg(Src2);
3227 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
3228 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB);
3229 MBB->addSuccessor(UpdateMBB);
3230 MBB->addSuccessor(UseAltMBB);
3233 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
3234 // # fall through to UpdateMMB
3237 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal)
3238 .addReg(RotatedOldVal).addReg(Src2)
3239 .addImm(32).addImm(31 + BitSize).addImm(0);
3240 MBB->addSuccessor(UpdateMBB);
3243 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
3244 // [ %RotatedAltVal, UseAltMBB ]
3245 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
3246 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
3248 // # fall through to DoneMMB
3250 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal)
3251 .addReg(RotatedOldVal).addMBB(LoopMBB)
3252 .addReg(RotatedAltVal).addMBB(UseAltMBB);
3254 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
3255 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
3256 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
3257 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
3258 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
3259 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
3260 MBB->addSuccessor(LoopMBB);
3261 MBB->addSuccessor(DoneMBB);
3263 MI->eraseFromParent();
3267 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
3270 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
3271 MachineBasicBlock *MBB) const {
3272 MachineFunction &MF = *MBB->getParent();
3273 const SystemZInstrInfo *TII =
3274 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
3275 MachineRegisterInfo &MRI = MF.getRegInfo();
3277 // Extract the operands. Base can be a register or a frame index.
3278 unsigned Dest = MI->getOperand(0).getReg();
3279 MachineOperand Base = earlyUseOperand(MI->getOperand(1));
3280 int64_t Disp = MI->getOperand(2).getImm();
3281 unsigned OrigCmpVal = MI->getOperand(3).getReg();
3282 unsigned OrigSwapVal = MI->getOperand(4).getReg();
3283 unsigned BitShift = MI->getOperand(5).getReg();
3284 unsigned NegBitShift = MI->getOperand(6).getReg();
3285 int64_t BitSize = MI->getOperand(7).getImm();
3286 DebugLoc DL = MI->getDebugLoc();
3288 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
3290 // Get the right opcodes for the displacement.
3291 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp);
3292 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp);
3293 assert(LOpcode && CSOpcode && "Displacement out of range");
3295 // Create virtual registers for temporary results.
3296 unsigned OrigOldVal = MRI.createVirtualRegister(RC);
3297 unsigned OldVal = MRI.createVirtualRegister(RC);
3298 unsigned CmpVal = MRI.createVirtualRegister(RC);
3299 unsigned SwapVal = MRI.createVirtualRegister(RC);
3300 unsigned StoreVal = MRI.createVirtualRegister(RC);
3301 unsigned RetryOldVal = MRI.createVirtualRegister(RC);
3302 unsigned RetryCmpVal = MRI.createVirtualRegister(RC);
3303 unsigned RetrySwapVal = MRI.createVirtualRegister(RC);
3305 // Insert 2 basic blocks for the loop.
3306 MachineBasicBlock *StartMBB = MBB;
3307 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
3308 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
3309 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB);
3313 // %OrigOldVal = L Disp(%Base)
3314 // # fall through to LoopMMB
3316 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal)
3317 .addOperand(Base).addImm(Disp).addReg(0);
3318 MBB->addSuccessor(LoopMBB);
3321 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
3322 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ]
3323 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
3324 // %Dest = RLL %OldVal, BitSize(%BitShift)
3325 // ^^ The low BitSize bits contain the field
3327 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0
3328 // ^^ Replace the upper 32-BitSize bits of the
3329 // comparison value with those that we loaded,
3330 // so that we can use a full word comparison.
3331 // CR %Dest, %RetryCmpVal
3333 // # Fall through to SetMBB
3335 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
3336 .addReg(OrigOldVal).addMBB(StartMBB)
3337 .addReg(RetryOldVal).addMBB(SetMBB);
3338 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal)
3339 .addReg(OrigCmpVal).addMBB(StartMBB)
3340 .addReg(RetryCmpVal).addMBB(SetMBB);
3341 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal)
3342 .addReg(OrigSwapVal).addMBB(StartMBB)
3343 .addReg(RetrySwapVal).addMBB(SetMBB);
3344 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest)
3345 .addReg(OldVal).addReg(BitShift).addImm(BitSize);
3346 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal)
3347 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
3348 BuildMI(MBB, DL, TII->get(SystemZ::CR))
3349 .addReg(Dest).addReg(RetryCmpVal);
3350 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
3351 .addImm(SystemZ::CCMASK_ICMP)
3352 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB);
3353 MBB->addSuccessor(DoneMBB);
3354 MBB->addSuccessor(SetMBB);
3357 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0
3358 // ^^ Replace the upper 32-BitSize bits of the new
3359 // value with those that we loaded.
3360 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift)
3361 // ^^ Rotate the new field to its proper position.
3362 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base)
3364 // # fall through to ExitMMB
3366 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal)
3367 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
3368 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal)
3369 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize);
3370 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
3371 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp);
3372 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
3373 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
3374 MBB->addSuccessor(LoopMBB);
3375 MBB->addSuccessor(DoneMBB);
3377 MI->eraseFromParent();
3381 // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true
3382 // if the high register of the GR128 value must be cleared or false if
3383 // it's "don't care". SubReg is subreg_l32 when extending a GR32
3384 // and subreg_l64 when extending a GR64.
3386 SystemZTargetLowering::emitExt128(MachineInstr *MI,
3387 MachineBasicBlock *MBB,
3388 bool ClearEven, unsigned SubReg) const {
3389 MachineFunction &MF = *MBB->getParent();
3390 const SystemZInstrInfo *TII =
3391 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
3392 MachineRegisterInfo &MRI = MF.getRegInfo();
3393 DebugLoc DL = MI->getDebugLoc();
3395 unsigned Dest = MI->getOperand(0).getReg();
3396 unsigned Src = MI->getOperand(1).getReg();
3397 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
3399 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
3401 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
3402 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
3404 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
3406 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
3407 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64);
3410 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
3411 .addReg(In128).addReg(Src).addImm(SubReg);
3413 MI->eraseFromParent();
3418 SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
3419 MachineBasicBlock *MBB,
3420 unsigned Opcode) const {
3421 MachineFunction &MF = *MBB->getParent();
3422 const SystemZInstrInfo *TII =
3423 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
3424 MachineRegisterInfo &MRI = MF.getRegInfo();
3425 DebugLoc DL = MI->getDebugLoc();
3427 MachineOperand DestBase = earlyUseOperand(MI->getOperand(0));
3428 uint64_t DestDisp = MI->getOperand(1).getImm();
3429 MachineOperand SrcBase = earlyUseOperand(MI->getOperand(2));
3430 uint64_t SrcDisp = MI->getOperand(3).getImm();
3431 uint64_t Length = MI->getOperand(4).getImm();
3433 // When generating more than one CLC, all but the last will need to
3434 // branch to the end when a difference is found.
3435 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
3436 splitBlockAfter(MI, MBB) : nullptr);
3438 // Check for the loop form, in which operand 5 is the trip count.
3439 if (MI->getNumExplicitOperands() > 5) {
3440 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
3442 uint64_t StartCountReg = MI->getOperand(5).getReg();
3443 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII);
3444 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
3445 forceReg(MI, DestBase, TII));
3447 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
3448 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC);
3449 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
3450 MRI.createVirtualRegister(RC));
3451 uint64_t NextSrcReg = MRI.createVirtualRegister(RC);
3452 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
3453 MRI.createVirtualRegister(RC));
3455 RC = &SystemZ::GR64BitRegClass;
3456 uint64_t ThisCountReg = MRI.createVirtualRegister(RC);
3457 uint64_t NextCountReg = MRI.createVirtualRegister(RC);
3459 MachineBasicBlock *StartMBB = MBB;
3460 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
3461 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
3462 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB);
3465 // # fall through to LoopMMB
3466 MBB->addSuccessor(LoopMBB);
3469 // %ThisDestReg = phi [ %StartDestReg, StartMBB ],
3470 // [ %NextDestReg, NextMBB ]
3471 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
3472 // [ %NextSrcReg, NextMBB ]
3473 // %ThisCountReg = phi [ %StartCountReg, StartMBB ],
3474 // [ %NextCountReg, NextMBB ]
3475 // ( PFD 2, 768+DestDisp(%ThisDestReg) )
3476 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
3479 // The prefetch is used only for MVC. The JLH is used only for CLC.
3482 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg)
3483 .addReg(StartDestReg).addMBB(StartMBB)
3484 .addReg(NextDestReg).addMBB(NextMBB);
3485 if (!HaveSingleBase)
3486 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg)
3487 .addReg(StartSrcReg).addMBB(StartMBB)
3488 .addReg(NextSrcReg).addMBB(NextMBB);
3489 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg)
3490 .addReg(StartCountReg).addMBB(StartMBB)
3491 .addReg(NextCountReg).addMBB(NextMBB);
3492 if (Opcode == SystemZ::MVC)
3493 BuildMI(MBB, DL, TII->get(SystemZ::PFD))
3494 .addImm(SystemZ::PFD_WRITE)
3495 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0);
3496 BuildMI(MBB, DL, TII->get(Opcode))
3497 .addReg(ThisDestReg).addImm(DestDisp).addImm(256)
3498 .addReg(ThisSrcReg).addImm(SrcDisp);
3500 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
3501 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
3503 MBB->addSuccessor(EndMBB);
3504 MBB->addSuccessor(NextMBB);
3508 // %NextDestReg = LA 256(%ThisDestReg)
3509 // %NextSrcReg = LA 256(%ThisSrcReg)
3510 // %NextCountReg = AGHI %ThisCountReg, -1
3511 // CGHI %NextCountReg, 0
3513 // # fall through to DoneMMB
3515 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
3518 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg)
3519 .addReg(ThisDestReg).addImm(256).addReg(0);
3520 if (!HaveSingleBase)
3521 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg)
3522 .addReg(ThisSrcReg).addImm(256).addReg(0);
3523 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg)
3524 .addReg(ThisCountReg).addImm(-1);
3525 BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
3526 .addReg(NextCountReg).addImm(0);
3527 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
3528 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
3530 MBB->addSuccessor(LoopMBB);
3531 MBB->addSuccessor(DoneMBB);
3533 DestBase = MachineOperand::CreateReg(NextDestReg, false);
3534 SrcBase = MachineOperand::CreateReg(NextSrcReg, false);
3538 // Handle any remaining bytes with straight-line code.
3539 while (Length > 0) {
3540 uint64_t ThisLength = std::min(Length, uint64_t(256));
3541 // The previous iteration might have created out-of-range displacements.
3542 // Apply them using LAY if so.
3543 if (!isUInt<12>(DestDisp)) {
3544 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
3545 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg)
3546 .addOperand(DestBase).addImm(DestDisp).addReg(0);
3547 DestBase = MachineOperand::CreateReg(Reg, false);
3550 if (!isUInt<12>(SrcDisp)) {
3551 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
3552 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg)
3553 .addOperand(SrcBase).addImm(SrcDisp).addReg(0);
3554 SrcBase = MachineOperand::CreateReg(Reg, false);
3557 BuildMI(*MBB, MI, DL, TII->get(Opcode))
3558 .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength)
3559 .addOperand(SrcBase).addImm(SrcDisp);
3560 DestDisp += ThisLength;
3561 SrcDisp += ThisLength;
3562 Length -= ThisLength;
3563 // If there's another CLC to go, branch to the end if a difference
3565 if (EndMBB && Length > 0) {
3566 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB);
3567 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
3568 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
3570 MBB->addSuccessor(EndMBB);
3571 MBB->addSuccessor(NextMBB);
3576 MBB->addSuccessor(EndMBB);
3578 MBB->addLiveIn(SystemZ::CC);
3581 MI->eraseFromParent();
3585 // Decompose string pseudo-instruction MI into a loop that continually performs
3586 // Opcode until CC != 3.
3588 SystemZTargetLowering::emitStringWrapper(MachineInstr *MI,
3589 MachineBasicBlock *MBB,
3590 unsigned Opcode) const {
3591 MachineFunction &MF = *MBB->getParent();
3592 const SystemZInstrInfo *TII =
3593 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
3594 MachineRegisterInfo &MRI = MF.getRegInfo();
3595 DebugLoc DL = MI->getDebugLoc();
3597 uint64_t End1Reg = MI->getOperand(0).getReg();
3598 uint64_t Start1Reg = MI->getOperand(1).getReg();
3599 uint64_t Start2Reg = MI->getOperand(2).getReg();
3600 uint64_t CharReg = MI->getOperand(3).getReg();
3602 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
3603 uint64_t This1Reg = MRI.createVirtualRegister(RC);
3604 uint64_t This2Reg = MRI.createVirtualRegister(RC);
3605 uint64_t End2Reg = MRI.createVirtualRegister(RC);
3607 MachineBasicBlock *StartMBB = MBB;
3608 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
3609 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
3612 // # fall through to LoopMMB
3613 MBB->addSuccessor(LoopMBB);
3616 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
3617 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
3619 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
3621 // # fall through to DoneMMB
3623 // The load of R0L can be hoisted by post-RA LICM.
3626 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
3627 .addReg(Start1Reg).addMBB(StartMBB)
3628 .addReg(End1Reg).addMBB(LoopMBB);
3629 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
3630 .addReg(Start2Reg).addMBB(StartMBB)
3631 .addReg(End2Reg).addMBB(LoopMBB);
3632 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg);
3633 BuildMI(MBB, DL, TII->get(Opcode))
3634 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define)
3635 .addReg(This1Reg).addReg(This2Reg);
3636 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
3637 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB);
3638 MBB->addSuccessor(LoopMBB);
3639 MBB->addSuccessor(DoneMBB);
3641 DoneMBB->addLiveIn(SystemZ::CC);
3643 MI->eraseFromParent();
3647 // Update TBEGIN instruction with final opcode and register clobbers.
3649 SystemZTargetLowering::emitTransactionBegin(MachineInstr *MI,
3650 MachineBasicBlock *MBB,
3652 bool NoFloat) const {
3653 MachineFunction &MF = *MBB->getParent();
3654 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
3655 const SystemZInstrInfo *TII = Subtarget.getInstrInfo();
3658 MI->setDesc(TII->get(Opcode));
3660 // We cannot handle a TBEGIN that clobbers the stack or frame pointer.
3661 // Make sure to add the corresponding GRSM bits if they are missing.
3662 uint64_t Control = MI->getOperand(2).getImm();
3663 static const unsigned GPRControlBit[16] = {
3664 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
3665 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
3667 Control |= GPRControlBit[15];
3669 Control |= GPRControlBit[11];
3670 MI->getOperand(2).setImm(Control);
3672 // Add GPR clobbers.
3673 for (int I = 0; I < 16; I++) {
3674 if ((Control & GPRControlBit[I]) == 0) {
3675 unsigned Reg = SystemZMC::GR64Regs[I];
3676 MI->addOperand(MachineOperand::CreateReg(Reg, true, true));
3680 // Add FPR clobbers.
3681 if (!NoFloat && (Control & 4) != 0) {
3682 for (int I = 0; I < 16; I++) {
3683 unsigned Reg = SystemZMC::FP64Regs[I];
3684 MI->addOperand(MachineOperand::CreateReg(Reg, true, true));
3691 MachineBasicBlock *SystemZTargetLowering::
3692 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const {
3693 switch (MI->getOpcode()) {
3694 case SystemZ::Select32Mux:
3695 case SystemZ::Select32:
3696 case SystemZ::SelectF32:
3697 case SystemZ::Select64:
3698 case SystemZ::SelectF64:
3699 case SystemZ::SelectF128:
3700 return emitSelect(MI, MBB);
3702 case SystemZ::CondStore8Mux:
3703 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false);
3704 case SystemZ::CondStore8MuxInv:
3705 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true);
3706 case SystemZ::CondStore16Mux:
3707 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false);
3708 case SystemZ::CondStore16MuxInv:
3709 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true);
3710 case SystemZ::CondStore8:
3711 return emitCondStore(MI, MBB, SystemZ::STC, 0, false);
3712 case SystemZ::CondStore8Inv:
3713 return emitCondStore(MI, MBB, SystemZ::STC, 0, true);
3714 case SystemZ::CondStore16:
3715 return emitCondStore(MI, MBB, SystemZ::STH, 0, false);
3716 case SystemZ::CondStore16Inv:
3717 return emitCondStore(MI, MBB, SystemZ::STH, 0, true);
3718 case SystemZ::CondStore32:
3719 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false);
3720 case SystemZ::CondStore32Inv:
3721 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true);
3722 case SystemZ::CondStore64:
3723 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false);
3724 case SystemZ::CondStore64Inv:
3725 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true);
3726 case SystemZ::CondStoreF32:
3727 return emitCondStore(MI, MBB, SystemZ::STE, 0, false);
3728 case SystemZ::CondStoreF32Inv:
3729 return emitCondStore(MI, MBB, SystemZ::STE, 0, true);
3730 case SystemZ::CondStoreF64:
3731 return emitCondStore(MI, MBB, SystemZ::STD, 0, false);
3732 case SystemZ::CondStoreF64Inv:
3733 return emitCondStore(MI, MBB, SystemZ::STD, 0, true);
3735 case SystemZ::AEXT128_64:
3736 return emitExt128(MI, MBB, false, SystemZ::subreg_l64);
3737 case SystemZ::ZEXT128_32:
3738 return emitExt128(MI, MBB, true, SystemZ::subreg_l32);
3739 case SystemZ::ZEXT128_64:
3740 return emitExt128(MI, MBB, true, SystemZ::subreg_l64);
3742 case SystemZ::ATOMIC_SWAPW:
3743 return emitAtomicLoadBinary(MI, MBB, 0, 0);
3744 case SystemZ::ATOMIC_SWAP_32:
3745 return emitAtomicLoadBinary(MI, MBB, 0, 32);
3746 case SystemZ::ATOMIC_SWAP_64:
3747 return emitAtomicLoadBinary(MI, MBB, 0, 64);
3749 case SystemZ::ATOMIC_LOADW_AR:
3750 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0);
3751 case SystemZ::ATOMIC_LOADW_AFI:
3752 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0);
3753 case SystemZ::ATOMIC_LOAD_AR:
3754 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32);
3755 case SystemZ::ATOMIC_LOAD_AHI:
3756 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32);
3757 case SystemZ::ATOMIC_LOAD_AFI:
3758 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32);
3759 case SystemZ::ATOMIC_LOAD_AGR:
3760 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64);
3761 case SystemZ::ATOMIC_LOAD_AGHI:
3762 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64);
3763 case SystemZ::ATOMIC_LOAD_AGFI:
3764 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64);
3766 case SystemZ::ATOMIC_LOADW_SR:
3767 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0);
3768 case SystemZ::ATOMIC_LOAD_SR:
3769 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32);
3770 case SystemZ::ATOMIC_LOAD_SGR:
3771 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64);
3773 case SystemZ::ATOMIC_LOADW_NR:
3774 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
3775 case SystemZ::ATOMIC_LOADW_NILH:
3776 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
3777 case SystemZ::ATOMIC_LOAD_NR:
3778 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
3779 case SystemZ::ATOMIC_LOAD_NILL:
3780 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
3781 case SystemZ::ATOMIC_LOAD_NILH:
3782 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
3783 case SystemZ::ATOMIC_LOAD_NILF:
3784 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
3785 case SystemZ::ATOMIC_LOAD_NGR:
3786 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
3787 case SystemZ::ATOMIC_LOAD_NILL64:
3788 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
3789 case SystemZ::ATOMIC_LOAD_NILH64:
3790 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
3791 case SystemZ::ATOMIC_LOAD_NIHL64:
3792 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64);
3793 case SystemZ::ATOMIC_LOAD_NIHH64:
3794 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64);
3795 case SystemZ::ATOMIC_LOAD_NILF64:
3796 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
3797 case SystemZ::ATOMIC_LOAD_NIHF64:
3798 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64);
3800 case SystemZ::ATOMIC_LOADW_OR:
3801 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0);
3802 case SystemZ::ATOMIC_LOADW_OILH:
3803 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
3804 case SystemZ::ATOMIC_LOAD_OR:
3805 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32);
3806 case SystemZ::ATOMIC_LOAD_OILL:
3807 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
3808 case SystemZ::ATOMIC_LOAD_OILH:
3809 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
3810 case SystemZ::ATOMIC_LOAD_OILF:
3811 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
3812 case SystemZ::ATOMIC_LOAD_OGR:
3813 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
3814 case SystemZ::ATOMIC_LOAD_OILL64:
3815 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
3816 case SystemZ::ATOMIC_LOAD_OILH64:
3817 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
3818 case SystemZ::ATOMIC_LOAD_OIHL64:
3819 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64);
3820 case SystemZ::ATOMIC_LOAD_OIHH64:
3821 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64);
3822 case SystemZ::ATOMIC_LOAD_OILF64:
3823 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
3824 case SystemZ::ATOMIC_LOAD_OIHF64:
3825 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64);
3827 case SystemZ::ATOMIC_LOADW_XR:
3828 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
3829 case SystemZ::ATOMIC_LOADW_XILF:
3830 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
3831 case SystemZ::ATOMIC_LOAD_XR:
3832 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
3833 case SystemZ::ATOMIC_LOAD_XILF:
3834 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
3835 case SystemZ::ATOMIC_LOAD_XGR:
3836 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
3837 case SystemZ::ATOMIC_LOAD_XILF64:
3838 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
3839 case SystemZ::ATOMIC_LOAD_XIHF64:
3840 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64);
3842 case SystemZ::ATOMIC_LOADW_NRi:
3843 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true);
3844 case SystemZ::ATOMIC_LOADW_NILHi:
3845 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true);
3846 case SystemZ::ATOMIC_LOAD_NRi:
3847 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true);
3848 case SystemZ::ATOMIC_LOAD_NILLi:
3849 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true);
3850 case SystemZ::ATOMIC_LOAD_NILHi:
3851 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true);
3852 case SystemZ::ATOMIC_LOAD_NILFi:
3853 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true);
3854 case SystemZ::ATOMIC_LOAD_NGRi:
3855 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
3856 case SystemZ::ATOMIC_LOAD_NILL64i:
3857 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true);
3858 case SystemZ::ATOMIC_LOAD_NILH64i:
3859 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true);
3860 case SystemZ::ATOMIC_LOAD_NIHL64i:
3861 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true);
3862 case SystemZ::ATOMIC_LOAD_NIHH64i:
3863 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true);
3864 case SystemZ::ATOMIC_LOAD_NILF64i:
3865 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true);
3866 case SystemZ::ATOMIC_LOAD_NIHF64i:
3867 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true);
3869 case SystemZ::ATOMIC_LOADW_MIN:
3870 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
3871 SystemZ::CCMASK_CMP_LE, 0);
3872 case SystemZ::ATOMIC_LOAD_MIN_32:
3873 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
3874 SystemZ::CCMASK_CMP_LE, 32);
3875 case SystemZ::ATOMIC_LOAD_MIN_64:
3876 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
3877 SystemZ::CCMASK_CMP_LE, 64);
3879 case SystemZ::ATOMIC_LOADW_MAX:
3880 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
3881 SystemZ::CCMASK_CMP_GE, 0);
3882 case SystemZ::ATOMIC_LOAD_MAX_32:
3883 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
3884 SystemZ::CCMASK_CMP_GE, 32);
3885 case SystemZ::ATOMIC_LOAD_MAX_64:
3886 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
3887 SystemZ::CCMASK_CMP_GE, 64);
3889 case SystemZ::ATOMIC_LOADW_UMIN:
3890 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
3891 SystemZ::CCMASK_CMP_LE, 0);
3892 case SystemZ::ATOMIC_LOAD_UMIN_32:
3893 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
3894 SystemZ::CCMASK_CMP_LE, 32);
3895 case SystemZ::ATOMIC_LOAD_UMIN_64:
3896 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
3897 SystemZ::CCMASK_CMP_LE, 64);
3899 case SystemZ::ATOMIC_LOADW_UMAX:
3900 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
3901 SystemZ::CCMASK_CMP_GE, 0);
3902 case SystemZ::ATOMIC_LOAD_UMAX_32:
3903 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
3904 SystemZ::CCMASK_CMP_GE, 32);
3905 case SystemZ::ATOMIC_LOAD_UMAX_64:
3906 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
3907 SystemZ::CCMASK_CMP_GE, 64);
3909 case SystemZ::ATOMIC_CMP_SWAPW:
3910 return emitAtomicCmpSwapW(MI, MBB);
3911 case SystemZ::MVCSequence:
3912 case SystemZ::MVCLoop:
3913 return emitMemMemWrapper(MI, MBB, SystemZ::MVC);
3914 case SystemZ::NCSequence:
3915 case SystemZ::NCLoop:
3916 return emitMemMemWrapper(MI, MBB, SystemZ::NC);
3917 case SystemZ::OCSequence:
3918 case SystemZ::OCLoop:
3919 return emitMemMemWrapper(MI, MBB, SystemZ::OC);
3920 case SystemZ::XCSequence:
3921 case SystemZ::XCLoop:
3922 return emitMemMemWrapper(MI, MBB, SystemZ::XC);
3923 case SystemZ::CLCSequence:
3924 case SystemZ::CLCLoop:
3925 return emitMemMemWrapper(MI, MBB, SystemZ::CLC);
3926 case SystemZ::CLSTLoop:
3927 return emitStringWrapper(MI, MBB, SystemZ::CLST);
3928 case SystemZ::MVSTLoop:
3929 return emitStringWrapper(MI, MBB, SystemZ::MVST);
3930 case SystemZ::SRSTLoop:
3931 return emitStringWrapper(MI, MBB, SystemZ::SRST);
3932 case SystemZ::TBEGIN:
3933 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false);
3934 case SystemZ::TBEGIN_nofloat:
3935 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true);
3936 case SystemZ::TBEGINC:
3937 return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true);
3939 llvm_unreachable("Unexpected instr type to insert");