1 //===--- HexagonBitTracker.cpp --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "llvm/CodeGen/MachineRegisterInfo.h"
11 #include "llvm/IR/Module.h"
12 #include "llvm/Support/Debug.h"
13 #include "llvm/Support/raw_ostream.h"
16 #include "HexagonInstrInfo.h"
17 #include "HexagonRegisterInfo.h"
18 #include "HexagonTargetMachine.h"
19 #include "HexagonBitTracker.h"
23 typedef BitTracker BT;
25 HexagonEvaluator::HexagonEvaluator(const HexagonRegisterInfo &tri,
26 MachineRegisterInfo &mri,
27 const HexagonInstrInfo &tii,
29 : MachineEvaluator(tri, mri), MF(mf), MFI(*mf.getFrameInfo()), TII(tii) {
30 // Populate the VRX map (VR to extension-type).
31 // Go over all the formal parameters of the function. If a given parameter
32 // P is sign- or zero-extended, locate the virtual register holding that
33 // parameter and create an entry in the VRX map indicating the type of ex-
34 // tension (and the source type).
35 // This is a bit complicated to do accurately, since the memory layout in-
36 // formation is necessary to precisely determine whether an aggregate para-
37 // meter will be passed in a register or in memory. What is given in MRI
38 // is the association between the physical register that is live-in (i.e.
39 // holds an argument), and the virtual register that this value will be
40 // copied into. This, by itself, is not sufficient to map back the virtual
41 // register to a formal parameter from Function (since consecutive live-ins
42 // from MRI may not correspond to consecutive formal parameters from Func-
43 // tion). To avoid the complications with in-memory arguments, only consi-
44 // der the initial sequence of formal parameters that are known to be
45 // passed via registers.
47 unsigned InVirtReg, InPhysReg = 0;
48 const Function &F = *MF.getFunction();
49 typedef Function::const_arg_iterator arg_iterator;
50 for (arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
52 const Argument &Arg = *I;
53 Type *ATy = Arg.getType();
55 if (ATy->isIntegerTy())
56 Width = ATy->getIntegerBitWidth();
57 else if (ATy->isPointerTy())
59 // If pointer size is not set through target data, it will default to
60 // Module::AnyPointerSize.
61 if (Width == 0 || Width > 64)
63 InPhysReg = getNextPhysReg(InPhysReg, Width);
66 InVirtReg = getVirtRegFor(InPhysReg);
69 AttributeSet Attrs = F.getAttributes();
70 if (Attrs.hasAttribute(AttrIdx, Attribute::SExt))
71 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::SExt, Width)));
72 else if (Attrs.hasAttribute(AttrIdx, Attribute::ZExt))
73 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::ZExt, Width)));
78 BT::BitMask HexagonEvaluator::mask(unsigned Reg, unsigned Sub) const {
80 return MachineEvaluator::mask(Reg, 0);
81 using namespace Hexagon;
82 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
83 unsigned ID = RC->getID();
84 uint16_t RW = getRegBitWidth(RegisterRef(Reg, Sub));
86 case DoubleRegsRegClassID:
87 case VecDblRegsRegClassID:
88 case VecDblRegs128BRegClassID:
89 return (Sub == subreg_loreg) ? BT::BitMask(0, RW-1)
90 : BT::BitMask(RW, 2*RW-1);
95 dbgs() << PrintReg(Reg, &TRI, Sub) << '\n';
97 llvm_unreachable("Unexpected register/subregister");
102 std::vector<BT::RegisterRef> Vector;
105 RegisterRefs(const MachineInstr *MI) : Vector(MI->getNumOperands()) {
106 for (unsigned i = 0, n = Vector.size(); i < n; ++i) {
107 const MachineOperand &MO = MI->getOperand(i);
109 Vector[i] = BT::RegisterRef(MO);
110 // For indices that don't correspond to registers, the entry will
111 // remain constructed via the default constructor.
115 size_t size() const { return Vector.size(); }
116 const BT::RegisterRef &operator[](unsigned n) const {
117 // The main purpose of this operator is to assert with bad argument.
118 assert(n < Vector.size());
124 bool HexagonEvaluator::evaluate(const MachineInstr *MI,
125 const CellMapType &Inputs, CellMapType &Outputs) const {
126 unsigned NumDefs = 0;
128 // Sanity verification: there should not be any defs with subregisters.
129 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) {
130 const MachineOperand &MO = MI->getOperand(i);
131 if (!MO.isReg() || !MO.isDef())
134 assert(MO.getSubReg() == 0);
141 return evaluateLoad(MI, Inputs, Outputs);
143 // Check COPY instructions that copy formal parameters into virtual
144 // registers. Such parameters can be sign- or zero-extended at the
145 // call site, and we should take advantage of this knowledge. The MRI
146 // keeps a list of pairs of live-in physical and virtual registers,
147 // which provides information about which virtual registers will hold
148 // the argument values. The function will still contain instructions
149 // defining those virtual registers, and in practice those are COPY
150 // instructions from a physical to a virtual register. In such cases,
151 // applying the argument extension to the virtual register can be seen
152 // as simply mirroring the extension that had already been applied to
153 // the physical register at the call site. If the defining instruction
154 // was not a COPY, it would not be clear how to mirror that extension
155 // on the callee's side. For that reason, only check COPY instructions
156 // for potential extensions.
158 if (evaluateFormalCopy(MI, Inputs, Outputs))
162 // Beyond this point, if any operand is a global, skip that instruction.
163 // The reason is that certain instructions that can take an immediate
164 // operand can also have a global symbol in that operand. To avoid
165 // checking what kind of operand a given instruction has individually
166 // for each instruction, do it here. Global symbols as operands gene-
167 // rally do not provide any useful information.
168 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) {
169 const MachineOperand &MO = MI->getOperand(i);
170 if (MO.isGlobal() || MO.isBlockAddress() || MO.isSymbol() || MO.isJTI() ||
175 RegisterRefs Reg(MI);
176 unsigned Opc = MI->getOpcode();
177 using namespace Hexagon;
178 #define op(i) MI->getOperand(i)
179 #define rc(i) RegisterCell::ref(getCell(Reg[i],Inputs))
180 #define im(i) MI->getOperand(i).getImm()
182 // If the instruction has no register operands, skip it.
186 // Record result for register in operand 0.
187 auto rr0 = [this,Reg] (const BT::RegisterCell &Val, CellMapType &Outputs)
189 putCell(Reg[0], Val, Outputs);
192 // Get the cell corresponding to the N-th operand.
193 auto cop = [this,&Reg,&MI,&Inputs] (unsigned N, uint16_t W)
194 -> BT::RegisterCell {
195 const MachineOperand &Op = MI->getOperand(N);
197 return eIMM(Op.getImm(), W);
199 return RegisterCell::self(0, W);
200 assert(getRegBitWidth(Reg[N]) == W && "Register width mismatch");
203 // Extract RW low bits of the cell.
204 auto lo = [this] (const BT::RegisterCell &RC, uint16_t RW)
205 -> BT::RegisterCell {
206 assert(RW <= RC.width());
207 return eXTR(RC, 0, RW);
209 // Extract RW high bits of the cell.
210 auto hi = [this] (const BT::RegisterCell &RC, uint16_t RW)
211 -> BT::RegisterCell {
212 uint16_t W = RC.width();
214 return eXTR(RC, W-RW, W);
216 // Extract N-th halfword (counting from the least significant position).
217 auto half = [this] (const BT::RegisterCell &RC, unsigned N)
218 -> BT::RegisterCell {
219 assert(N*16+16 <= RC.width());
220 return eXTR(RC, N*16, N*16+16);
222 // Shuffle bits (pick even/odd from cells and merge into result).
223 auto shuffle = [this] (const BT::RegisterCell &Rs, const BT::RegisterCell &Rt,
224 uint16_t BW, bool Odd) -> BT::RegisterCell {
225 uint16_t I = Odd, Ws = Rs.width();
226 assert(Ws == Rt.width());
227 RegisterCell RC = eXTR(Rt, I*BW, I*BW+BW).cat(eXTR(Rs, I*BW, I*BW+BW));
230 RC.cat(eXTR(Rt, I*BW, I*BW+BW)).cat(eXTR(Rs, I*BW, I*BW+BW));
236 // The bitwidth of the 0th operand. In most (if not all) of the
237 // instructions below, the 0th operand is the defined register.
238 // Pre-compute the bitwidth here, because it is needed in many cases
240 uint16_t W0 = (Reg[0].Reg != 0) ? getRegBitWidth(Reg[0]) : 0;
243 // Transfer immediate:
248 case CONST32_Float_Real:
249 case CONST32_Int_Real:
250 case CONST64_Float_Real:
251 case CONST64_Int_Real:
252 return rr0(eIMM(im(1), W0), Outputs);
254 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::Zero), Outputs);
256 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::One), Outputs);
258 int FI = op(1).getIndex();
259 int Off = op(2).getImm();
260 unsigned A = MFI.getObjectAlignment(FI) + std::abs(Off);
261 unsigned L = Log2_32(A);
262 RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0);
263 RC.fill(0, L, BT::BitValue::Zero);
264 return rr0(RC, Outputs);
267 // Transfer register:
272 return rr0(rc(1), Outputs);
275 uint16_t PW = 8; // XXX Pred size: getRegBitWidth(Reg[1]);
277 RegisterCell PC = eXTR(rc(1), 0, PW);
278 RegisterCell RC = RegisterCell(RW).insert(PC, BT::BitMask(0, PW-1));
279 RC.fill(PW, RW, BT::BitValue::Zero);
280 return rr0(RC, Outputs);
283 RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0);
284 W0 = 8; // XXX Pred size
285 return rr0(eINS(RC, eXTR(rc(1), 0, W0), 0), Outputs);
296 uint16_t W1 = getRegBitWidth(Reg[1]);
297 assert(W0 == 64 && W1 == 32);
298 RegisterCell CW = RegisterCell(W0).insert(rc(1), BT::BitMask(0, W1-1));
299 RegisterCell RC = eADD(eSXT(CW, W1), rc(2));
300 return rr0(RC, Outputs);
304 return rr0(eADD(rc(1), rc(2)), Outputs);
306 return rr0(eADD(rc(1), eIMM(im(2), W0)), Outputs);
307 case S4_addi_asl_ri: {
308 RegisterCell RC = eADD(eIMM(im(1), W0), eASL(rc(2), im(3)));
309 return rr0(RC, Outputs);
311 case S4_addi_lsr_ri: {
312 RegisterCell RC = eADD(eIMM(im(1), W0), eLSR(rc(2), im(3)));
313 return rr0(RC, Outputs);
316 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0)));
317 return rr0(RC, Outputs);
319 case M4_mpyri_addi: {
320 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0));
321 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0));
322 return rr0(RC, Outputs);
324 case M4_mpyrr_addi: {
325 RegisterCell M = eMLS(rc(2), rc(3));
326 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0));
327 return rr0(RC, Outputs);
329 case M4_mpyri_addr_u2: {
330 RegisterCell M = eMLS(eIMM(im(2), W0), rc(3));
331 RegisterCell RC = eADD(rc(1), lo(M, W0));
332 return rr0(RC, Outputs);
334 case M4_mpyri_addr: {
335 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0));
336 RegisterCell RC = eADD(rc(1), lo(M, W0));
337 return rr0(RC, Outputs);
339 case M4_mpyrr_addr: {
340 RegisterCell M = eMLS(rc(2), rc(3));
341 RegisterCell RC = eADD(rc(1), lo(M, W0));
342 return rr0(RC, Outputs);
345 RegisterCell RC = eADD(rc(1), eSUB(eIMM(im(2), W0), rc(3)));
346 return rr0(RC, Outputs);
349 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0)));
350 return rr0(RC, Outputs);
353 RegisterCell RC = eADD(rc(1), eADD(rc(2), rc(3)));
354 return rr0(RC, Outputs);
357 RegisterCell RC = eADD(rc(1), eSUB(rc(2), rc(3)));
358 return rr0(RC, Outputs);
360 case S2_addasl_rrri: {
361 RegisterCell RC = eADD(rc(1), eASL(rc(2), im(3)));
362 return rr0(RC, Outputs);
365 RegisterCell RPC = RegisterCell::self(Reg[0].Reg, W0);
366 RPC.fill(0, 2, BT::BitValue::Zero);
367 return rr0(eADD(RPC, eIMM(im(2), W0)), Outputs);
371 return rr0(eSUB(rc(1), rc(2)), Outputs);
373 return rr0(eSUB(eIMM(im(1), W0), rc(2)), Outputs);
374 case S4_subi_asl_ri: {
375 RegisterCell RC = eSUB(eIMM(im(1), W0), eASL(rc(2), im(3)));
376 return rr0(RC, Outputs);
378 case S4_subi_lsr_ri: {
379 RegisterCell RC = eSUB(eIMM(im(1), W0), eLSR(rc(2), im(3)));
380 return rr0(RC, Outputs);
383 RegisterCell RC = eSUB(rc(1), eADD(rc(2), eIMM(im(3), W0)));
384 return rr0(RC, Outputs);
387 RegisterCell RC = eSUB(rc(1), eADD(rc(2), rc(3)));
388 return rr0(RC, Outputs);
390 // 32-bit negation is done by "Rd = A2_subri 0, Rs"
392 return rr0(eSUB(eIMM(0, W0), rc(1)), Outputs);
395 RegisterCell M = eMLS(rc(1), rc(2));
396 return rr0(hi(M, W0), Outputs);
399 return rr0(eMLS(rc(1), rc(2)), Outputs);
400 case M2_dpmpyss_acc_s0:
401 return rr0(eADD(rc(1), eMLS(rc(2), rc(3))), Outputs);
402 case M2_dpmpyss_nac_s0:
403 return rr0(eSUB(rc(1), eMLS(rc(2), rc(3))), Outputs);
405 RegisterCell M = eMLS(rc(1), rc(2));
406 return rr0(lo(M, W0), Outputs);
409 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0));
410 RegisterCell RC = eADD(rc(1), lo(M, W0));
411 return rr0(RC, Outputs);
414 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0));
415 RegisterCell RC = eSUB(rc(1), lo(M, W0));
416 return rr0(RC, Outputs);
419 RegisterCell M = eMLS(rc(2), rc(3));
420 RegisterCell RC = eADD(rc(1), lo(M, W0));
421 return rr0(RC, Outputs);
424 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0));
425 return rr0(lo(M, 32), Outputs);
428 RegisterCell M = eMLS(rc(1), eIMM(-im(2), W0));
429 return rr0(lo(M, 32), Outputs);
432 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0));
433 return rr0(lo(M, 32), Outputs);
436 RegisterCell M = eMLU(rc(1), rc(2));
437 return rr0(hi(M, W0), Outputs);
440 return rr0(eMLU(rc(1), rc(2)), Outputs);
441 case M2_dpmpyuu_acc_s0:
442 return rr0(eADD(rc(1), eMLU(rc(2), rc(3))), Outputs);
443 case M2_dpmpyuu_nac_s0:
444 return rr0(eSUB(rc(1), eMLU(rc(2), rc(3))), Outputs);
450 return rr0(eAND(rc(1), eIMM(im(2), W0)), Outputs);
453 return rr0(eAND(rc(1), rc(2)), Outputs);
456 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs);
457 case S4_andi_asl_ri: {
458 RegisterCell RC = eAND(eIMM(im(1), W0), eASL(rc(2), im(3)));
459 return rr0(RC, Outputs);
461 case S4_andi_lsr_ri: {
462 RegisterCell RC = eAND(eIMM(im(1), W0), eLSR(rc(2), im(3)));
463 return rr0(RC, Outputs);
466 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs);
468 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
470 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs);
472 return rr0(eAND(rc(1), eXOR(rc(2), rc(3))), Outputs);
474 return rr0(eORL(rc(1), eIMM(im(2), W0)), Outputs);
477 return rr0(eORL(rc(1), rc(2)), Outputs);
480 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs);
481 case S4_ori_asl_ri: {
482 RegisterCell RC = eORL(eIMM(im(1), W0), eASL(rc(2), im(3)));
483 return rr0(RC, Outputs);
485 case S4_ori_lsr_ri: {
486 RegisterCell RC = eORL(eIMM(im(1), W0), eLSR(rc(2), im(3)));
487 return rr0(RC, Outputs);
490 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs);
492 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
495 RegisterCell RC = eORL(rc(1), eAND(rc(2), eIMM(im(3), W0)));
496 return rr0(RC, Outputs);
499 RegisterCell RC = eORL(rc(1), eORL(rc(2), eIMM(im(3), W0)));
500 return rr0(RC, Outputs);
503 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs);
505 return rr0(eORL(rc(1), eXOR(rc(2), rc(3))), Outputs);
508 return rr0(eXOR(rc(1), rc(2)), Outputs);
510 return rr0(eXOR(rc(1), eAND(rc(2), rc(3))), Outputs);
512 return rr0(eXOR(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
514 return rr0(eXOR(rc(1), eORL(rc(2), rc(3))), Outputs);
516 return rr0(eXOR(rc(1), eXOR(rc(2), rc(3))), Outputs);
519 return rr0(eNOT(rc(1)), Outputs);
523 return rr0(eASL(rc(1), im(2)), Outputs);
525 return rr0(eASL(rc(1), 16), Outputs);
528 return rr0(eADD(rc(1), eASL(rc(2), im(3))), Outputs);
531 return rr0(eSUB(rc(1), eASL(rc(2), im(3))), Outputs);
534 return rr0(eAND(rc(1), eASL(rc(2), im(3))), Outputs);
537 return rr0(eORL(rc(1), eASL(rc(2), im(3))), Outputs);
538 case S2_asl_i_r_xacc:
539 case S2_asl_i_p_xacc:
540 return rr0(eXOR(rc(1), eASL(rc(2), im(3))), Outputs);
548 return rr0(eASR(rc(1), im(2)), Outputs);
550 return rr0(eASR(rc(1), 16), Outputs);
553 return rr0(eADD(rc(1), eASR(rc(2), im(3))), Outputs);
556 return rr0(eSUB(rc(1), eASR(rc(2), im(3))), Outputs);
559 return rr0(eAND(rc(1), eASR(rc(2), im(3))), Outputs);
562 return rr0(eORL(rc(1), eASR(rc(2), im(3))), Outputs);
563 case S2_asr_i_r_rnd: {
564 // The input is first sign-extended to 64 bits, then the output
565 // is truncated back to 32 bits.
567 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0);
568 RegisterCell RC = eASR(eADD(eASR(XC, im(2)), eIMM(1, 2*W0)), 1);
569 return rr0(eXTR(RC, 0, W0), Outputs);
571 case S2_asr_i_r_rnd_goodsyntax: {
574 return rr0(rc(1), Outputs);
575 // Result: S2_asr_i_r_rnd Rs, u5-1
576 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0);
577 RegisterCell RC = eLSR(eADD(eASR(XC, S-1), eIMM(1, 2*W0)), 1);
578 return rr0(eXTR(RC, 0, W0), Outputs);
582 case S2_asr_i_svw_trun:
588 return rr0(eLSR(rc(1), im(2)), Outputs);
591 return rr0(eADD(rc(1), eLSR(rc(2), im(3))), Outputs);
594 return rr0(eSUB(rc(1), eLSR(rc(2), im(3))), Outputs);
597 return rr0(eAND(rc(1), eLSR(rc(2), im(3))), Outputs);
600 return rr0(eORL(rc(1), eLSR(rc(2), im(3))), Outputs);
601 case S2_lsr_i_r_xacc:
602 case S2_lsr_i_p_xacc:
603 return rr0(eXOR(rc(1), eLSR(rc(2), im(3))), Outputs);
606 RegisterCell RC = rc(1);
607 RC[im(2)] = BT::BitValue::Zero;
608 return rr0(RC, Outputs);
611 RegisterCell RC = rc(1);
612 RC[im(2)] = BT::BitValue::One;
613 return rr0(RC, Outputs);
615 case S2_togglebit_i: {
616 RegisterCell RC = rc(1);
618 RC[BX] = RC[BX].is(0) ? BT::BitValue::One
619 : RC[BX].is(1) ? BT::BitValue::Zero
620 : BT::BitValue::self();
621 return rr0(RC, Outputs);
625 uint16_t W1 = getRegBitWidth(Reg[1]);
627 // Res.uw[1] = Rs[bx+1:], Res.uw[0] = Rs[0:bx]
628 const BT::BitValue Zero = BT::BitValue::Zero;
629 RegisterCell RZ = RegisterCell(W0).fill(BX, W1, Zero)
630 .fill(W1+(W1-BX), W0, Zero);
631 RegisterCell BF1 = eXTR(rc(1), 0, BX), BF2 = eXTR(rc(1), BX, W1);
632 RegisterCell RC = eINS(eINS(RZ, BF1, 0), BF2, W1);
633 return rr0(RC, Outputs);
639 uint16_t Wd = im(2), Of = im(3);
642 return rr0(eIMM(0, W0), Outputs);
643 // If the width extends beyond the register size, pad the register
645 RegisterCell Pad = (Wd+Of > W0) ? rc(1).cat(eIMM(0, Wd+Of-W0)) : rc(1);
646 RegisterCell Ext = eXTR(Pad, Of, Wd+Of);
647 // Ext is short, need to extend it with 0s or sign bit.
648 RegisterCell RC = RegisterCell(W0).insert(Ext, BT::BitMask(0, Wd-1));
649 if (Opc == S2_extractu || Opc == S2_extractup)
650 return rr0(eZXT(RC, Wd), Outputs);
651 return rr0(eSXT(RC, Wd), Outputs);
655 uint16_t Wd = im(3), Of = im(4);
656 assert(Wd < W0 && Of < W0);
657 // If Wd+Of exceeds W0, the inserted bits are truncated.
661 return rr0(rc(1), Outputs);
662 return rr0(eINS(rc(1), eXTR(rc(2), 0, Wd), Of), Outputs);
673 return rr0(cop(2, W0/2).cat(cop(1, W0/2)), Outputs);
677 case A2_combine_hh: {
679 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32);
680 // Low half in the output is 0 for _ll and _hl, 1 otherwise:
681 unsigned LoH = !(Opc == A2_combine_ll || Opc == A2_combine_hl);
682 // High half in the output is 0 for _ll and _lh, 1 otherwise:
683 unsigned HiH = !(Opc == A2_combine_ll || Opc == A2_combine_lh);
684 RegisterCell R1 = rc(1);
685 RegisterCell R2 = rc(2);
686 RegisterCell RC = half(R2, LoH).cat(half(R1, HiH));
687 return rr0(RC, Outputs);
691 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32);
692 RegisterCell R1 = rc(1);
693 RegisterCell R2 = rc(2);
694 RegisterCell RC = half(R2, 0).cat(half(R1, 0)).cat(half(R2, 1))
696 return rr0(RC, Outputs);
699 RegisterCell RC = shuffle(rc(1), rc(2), 8, false);
700 return rr0(RC, Outputs);
703 RegisterCell RC = shuffle(rc(1), rc(2), 16, false);
704 return rr0(RC, Outputs);
707 RegisterCell RC = shuffle(rc(1), rc(2), 8, true);
708 return rr0(RC, Outputs);
711 RegisterCell RC = shuffle(rc(1), rc(2), 16, true);
712 return rr0(RC, Outputs);
716 uint16_t WP = 8; // XXX Pred size: getRegBitWidth(Reg[1]);
717 assert(WR == 64 && WP == 8);
718 RegisterCell R1 = rc(1);
720 for (uint16_t i = 0; i < WP; ++i) {
721 const BT::BitValue &V = R1[i];
722 BT::BitValue F = (V.is(0) || V.is(1)) ? V : BT::BitValue::self();
723 RC.fill(i*8, i*8+8, F);
725 return rr0(RC, Outputs);
734 BT::BitValue PC0 = rc(1)[0];
735 RegisterCell R2 = cop(2, W0);
736 RegisterCell R3 = cop(3, W0);
737 if (PC0.is(0) || PC0.is(1))
738 return rr0(RegisterCell::ref(PC0 ? R2 : R3), Outputs);
739 R2.meet(R3, Reg[0].Reg);
740 return rr0(R2, Outputs);
746 // Sign- and zero-extension:
749 return rr0(eSXT(rc(1), 8), Outputs);
751 return rr0(eSXT(rc(1), 16), Outputs);
753 uint16_t W1 = getRegBitWidth(Reg[1]);
754 assert(W0 == 64 && W1 == 32);
755 RegisterCell RC = eSXT(rc(1).cat(eIMM(0, W1)), W1);
756 return rr0(RC, Outputs);
759 return rr0(eZXT(rc(1), 8), Outputs);
761 return rr0(eZXT(rc(1), 16), Outputs);
767 // Always produce a 32-bit result.
768 return rr0(eCLB(rc(1), 0/*bit*/, 32), Outputs);
771 return rr0(eCLB(rc(1), 1/*bit*/, 32), Outputs);
774 uint16_t W1 = getRegBitWidth(Reg[1]);
775 RegisterCell R1 = rc(1);
776 BT::BitValue TV = R1[W1-1];
777 if (TV.is(0) || TV.is(1))
778 return rr0(eCLB(R1, TV, 32), Outputs);
783 return rr0(eCTB(rc(1), 0/*bit*/, 32), Outputs);
786 return rr0(eCTB(rc(1), 1/*bit*/, 32), Outputs);
792 RegisterCell P1 = rc(1);
793 bool Has0 = false, All1 = true;
794 for (uint16_t i = 0; i < 8/*XXX*/; ++i) {
805 RC.fill(0, W0, (All1 ? BT::BitValue::One : BT::BitValue::Zero));
806 return rr0(RC, Outputs);
809 RegisterCell P1 = rc(1);
810 bool Has1 = false, All0 = true;
811 for (uint16_t i = 0; i < 8/*XXX*/; ++i) {
822 RC.fill(0, W0, (Has1 ? BT::BitValue::One : BT::BitValue::Zero));
823 return rr0(RC, Outputs);
826 return rr0(eAND(rc(1), rc(2)), Outputs);
828 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs);
830 return rr0(eNOT(rc(1)), Outputs);
832 return rr0(eORL(rc(1), rc(2)), Outputs);
834 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs);
836 return rr0(eXOR(rc(1), rc(2)), Outputs);
838 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs);
840 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
842 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs);
844 return rr0(eAND(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs);
846 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs);
848 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
850 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs);
852 return rr0(eORL(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs);
863 BT::BitValue V = rc(1)[im(2)];
864 if (V.is(0) || V.is(1)) {
865 // If instruction is S2_tstbit_i, test for 1, otherwise test for 0.
866 bool TV = (Opc == S2_tstbit_i);
867 BT::BitValue F = V.is(TV) ? BT::BitValue::One : BT::BitValue::Zero;
868 return rr0(RegisterCell(W0).fill(0, W0, F), Outputs);
874 return MachineEvaluator::evaluate(MI, Inputs, Outputs);
883 bool HexagonEvaluator::evaluate(const MachineInstr *BI,
884 const CellMapType &Inputs, BranchTargetList &Targets,
885 bool &FallsThru) const {
886 // We need to evaluate one branch at a time. TII::AnalyzeBranch checks
887 // all the branches in a basic block at once, so we cannot use it.
888 unsigned Opc = BI->getOpcode();
889 bool SimpleBranch = false;
890 bool Negated = false;
892 case Hexagon::J2_jumpf:
893 case Hexagon::J2_jumpfnew:
894 case Hexagon::J2_jumpfnewpt:
896 case Hexagon::J2_jumpt:
897 case Hexagon::J2_jumptnew:
898 case Hexagon::J2_jumptnewpt:
899 // Simple branch: if([!]Pn) jump ...
900 // i.e. Op0 = predicate, Op1 = branch target.
903 case Hexagon::J2_jump:
904 Targets.insert(BI->getOperand(0).getMBB());
908 // If the branch is of unknown type, assume that all successors are
916 // BI is a conditional branch if we got here.
917 RegisterRef PR = BI->getOperand(0);
918 RegisterCell PC = getCell(PR, Inputs);
919 const BT::BitValue &Test = PC[0];
921 // If the condition is neither true nor false, then it's unknown.
922 if (!Test.is(0) && !Test.is(1))
925 // "Test.is(!Negated)" means "branch condition is true".
926 if (!Test.is(!Negated)) {
927 // Condition known to be false.
932 Targets.insert(BI->getOperand(1).getMBB());
938 bool HexagonEvaluator::evaluateLoad(const MachineInstr *MI,
939 const CellMapType &Inputs, CellMapType &Outputs) const {
940 if (TII.isPredicated(MI))
942 assert(MI->mayLoad() && "A load that mayn't?");
943 unsigned Opc = MI->getOpcode();
947 using namespace Hexagon;
955 case L2_loadalignb_pbr:
956 case L2_loadalignb_pcr:
957 case L2_loadalignb_pi:
959 case L2_loadalignh_pbr:
960 case L2_loadalignh_pcr:
961 case L2_loadalignh_pi:
963 case L2_loadbsw2_pbr:
964 case L2_loadbsw2_pci:
965 case L2_loadbsw2_pcr:
967 case L2_loadbsw4_pbr:
968 case L2_loadbsw4_pci:
969 case L2_loadbsw4_pcr:
972 case L2_loadbzw2_pbr:
973 case L2_loadbzw2_pci:
974 case L2_loadbzw2_pcr:
976 case L2_loadbzw4_pbr:
977 case L2_loadbzw4_pci:
978 case L2_loadbzw4_pcr:
1000 case L2_loadrub_pcr:
1002 case L4_loadrub_abs:
1026 case L2_loadruh_pbr:
1027 case L2_loadruh_pci:
1028 case L2_loadruh_pcr:
1031 case L4_loadruh_abs:
1044 case L2_loadw_locked:
1060 case L4_loadd_locked:
1070 const MachineOperand &MD = MI->getOperand(0);
1071 assert(MD.isReg() && MD.isDef());
1072 RegisterRef RD = MD;
1074 uint16_t W = getRegBitWidth(RD);
1075 assert(W >= BitNum && BitNum > 0);
1076 RegisterCell Res(W);
1078 for (uint16_t i = 0; i < BitNum; ++i)
1079 Res[i] = BT::BitValue::self(BT::BitRef(RD.Reg, i));
1082 const BT::BitValue &Sign = Res[BitNum-1];
1083 for (uint16_t i = BitNum; i < W; ++i)
1084 Res[i] = BT::BitValue::ref(Sign);
1086 for (uint16_t i = BitNum; i < W; ++i)
1087 Res[i] = BT::BitValue::Zero;
1090 putCell(RD, Res, Outputs);
1095 bool HexagonEvaluator::evaluateFormalCopy(const MachineInstr *MI,
1096 const CellMapType &Inputs, CellMapType &Outputs) const {
1097 // If MI defines a formal parameter, but is not a copy (loads are handled
1098 // in evaluateLoad), then it's not clear what to do.
1099 assert(MI->isCopy());
1101 RegisterRef RD = MI->getOperand(0);
1102 RegisterRef RS = MI->getOperand(1);
1103 assert(RD.Sub == 0);
1104 if (!TargetRegisterInfo::isPhysicalRegister(RS.Reg))
1106 RegExtMap::const_iterator F = VRX.find(RD.Reg);
1110 uint16_t EW = F->second.Width;
1111 // Store RD's cell into the map. This will associate the cell with a virtual
1112 // register, and make zero-/sign-extends possible (otherwise we would be ex-
1113 // tending "self" bit values, which will have no effect, since "self" values
1114 // cannot be references to anything).
1115 putCell(RD, getCell(RS, Inputs), Outputs);
1118 // Read RD's cell from the outputs instead of RS's cell from the inputs:
1119 if (F->second.Type == ExtType::SExt)
1120 Res = eSXT(getCell(RD, Outputs), EW);
1121 else if (F->second.Type == ExtType::ZExt)
1122 Res = eZXT(getCell(RD, Outputs), EW);
1124 putCell(RD, Res, Outputs);
1129 unsigned HexagonEvaluator::getNextPhysReg(unsigned PReg, unsigned Width) const {
1130 using namespace Hexagon;
1131 bool Is64 = DoubleRegsRegClass.contains(PReg);
1132 assert(PReg == 0 || Is64 || IntRegsRegClass.contains(PReg));
1134 static const unsigned Phys32[] = { R0, R1, R2, R3, R4, R5 };
1135 static const unsigned Phys64[] = { D0, D1, D2 };
1136 const unsigned Num32 = sizeof(Phys32)/sizeof(unsigned);
1137 const unsigned Num64 = sizeof(Phys64)/sizeof(unsigned);
1139 // Return the first parameter register of the required width.
1141 return (Width <= 32) ? Phys32[0] : Phys64[0];
1143 // Set Idx32, Idx64 in such a way that Idx+1 would give the index of the
1145 unsigned Idx32 = 0, Idx64 = 0;
1147 while (Idx32 < Num32) {
1148 if (Phys32[Idx32] == PReg)
1154 while (Idx64 < Num64) {
1155 if (Phys64[Idx64] == PReg)
1163 return (Idx32+1 < Num32) ? Phys32[Idx32+1] : 0;
1164 return (Idx64+1 < Num64) ? Phys64[Idx64+1] : 0;
1168 unsigned HexagonEvaluator::getVirtRegFor(unsigned PReg) const {
1169 typedef MachineRegisterInfo::livein_iterator iterator;
1170 for (iterator I = MRI.livein_begin(), E = MRI.livein_end(); I != E; ++I) {
1171 if (I->first == PReg)