Subversion Repositories QNX 8.QNX8 LLVM/Clang compiler suite

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
14 pmbaty 1
//===- llvm/CodeGen/ScheduleDAG.h - Common Base Class -----------*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
/// \file Implements the ScheduleDAG class, which is used as the common base
10
/// class for instruction schedulers. This encapsulates the scheduling DAG,
11
/// which is shared between SelectionDAG and MachineInstr scheduling.
12
//
13
//===----------------------------------------------------------------------===//
14
 
15
#ifndef LLVM_CODEGEN_SCHEDULEDAG_H
16
#define LLVM_CODEGEN_SCHEDULEDAG_H
17
 
18
#include "llvm/ADT/BitVector.h"
19
#include "llvm/ADT/PointerIntPair.h"
20
#include "llvm/ADT/SmallVector.h"
21
#include "llvm/ADT/iterator.h"
22
#include "llvm/CodeGen/MachineInstr.h"
23
#include "llvm/CodeGen/TargetLowering.h"
24
#include "llvm/Support/ErrorHandling.h"
25
#include <cassert>
26
#include <cstddef>
27
#include <iterator>
28
#include <string>
29
#include <vector>
30
 
31
namespace llvm {
32
 
33
template <class GraphType> struct GraphTraits;
34
template<class Graph> class GraphWriter;
35
class LLVMTargetMachine;
36
class MachineFunction;
37
class MachineRegisterInfo;
38
class MCInstrDesc;
39
struct MCSchedClassDesc;
40
class SDNode;
41
class SUnit;
42
class ScheduleDAG;
43
class TargetInstrInfo;
44
class TargetRegisterClass;
45
class TargetRegisterInfo;
46
 
47
  /// Scheduling dependency. This represents one direction of an edge in the
48
  /// scheduling DAG.
49
  class SDep {
50
  public:
51
    /// These are the different kinds of scheduling dependencies.
52
    enum Kind {
53
      Data,        ///< Regular data dependence (aka true-dependence).
54
      Anti,        ///< A register anti-dependence (aka WAR).
55
      Output,      ///< A register output-dependence (aka WAW).
56
      Order        ///< Any other ordering dependency.
57
    };
58
 
59
    // Strong dependencies must be respected by the scheduler. Artificial
60
    // dependencies may be removed only if they are redundant with another
61
    // strong dependence.
62
    //
63
    // Weak dependencies may be violated by the scheduling strategy, but only if
64
    // the strategy can prove it is correct to do so.
65
    //
66
    // Strong OrderKinds must occur before "Weak".
67
    // Weak OrderKinds must occur after "Weak".
68
    enum OrderKind {
69
      Barrier,      ///< An unknown scheduling barrier.
70
      MayAliasMem,  ///< Nonvolatile load/Store instructions that may alias.
71
      MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
72
      Artificial,   ///< Arbitrary strong DAG edge (no real dependence).
73
      Weak,         ///< Arbitrary weak DAG edge.
74
      Cluster       ///< Weak DAG edge linking a chain of clustered instrs.
75
    };
76
 
77
  private:
78
    /// A pointer to the depending/depended-on SUnit, and an enum
79
    /// indicating the kind of the dependency.
80
    PointerIntPair<SUnit *, 2, Kind> Dep;
81
 
82
    /// A union discriminated by the dependence kind.
83
    union {
84
      /// For Data, Anti, and Output dependencies, the associated register. For
85
      /// Data dependencies that don't currently have a register/ assigned, this
86
      /// is set to zero.
87
      unsigned Reg;
88
 
89
      /// Additional information about Order dependencies.
90
      unsigned OrdKind; // enum OrderKind
91
    } Contents;
92
 
93
    /// The time associated with this edge. Often this is just the value of the
94
    /// Latency field of the predecessor, however advanced models may provide
95
    /// additional information about specific edges.
96
    unsigned Latency;
97
 
98
  public:
99
    /// Constructs a null SDep. This is only for use by container classes which
100
    /// require default constructors. SUnits may not/ have null SDep edges.
101
    SDep() : Dep(nullptr, Data) {}
102
 
103
    /// Constructs an SDep with the specified values.
104
    SDep(SUnit *S, Kind kind, unsigned Reg)
105
      : Dep(S, kind), Contents() {
106
      switch (kind) {
107
      default:
108
        llvm_unreachable("Reg given for non-register dependence!");
109
      case Anti:
110
      case Output:
111
        assert(Reg != 0 &&
112
               "SDep::Anti and SDep::Output must use a non-zero Reg!");
113
        Contents.Reg = Reg;
114
        Latency = 0;
115
        break;
116
      case Data:
117
        Contents.Reg = Reg;
118
        Latency = 1;
119
        break;
120
      }
121
    }
122
 
123
    SDep(SUnit *S, OrderKind kind)
124
      : Dep(S, Order), Contents(), Latency(0) {
125
      Contents.OrdKind = kind;
126
    }
127
 
128
    /// Returns true if the specified SDep is equivalent except for latency.
129
    bool overlaps(const SDep &Other) const;
130
 
131
    bool operator==(const SDep &Other) const {
132
      return overlaps(Other) && Latency == Other.Latency;
133
    }
134
 
135
    bool operator!=(const SDep &Other) const {
136
      return !operator==(Other);
137
    }
138
 
139
    /// Returns the latency value for this edge, which roughly means the
140
    /// minimum number of cycles that must elapse between the predecessor and
141
    /// the successor, given that they have this edge between them.
142
    unsigned getLatency() const {
143
      return Latency;
144
    }
145
 
146
    /// Sets the latency for this edge.
147
    void setLatency(unsigned Lat) {
148
      Latency = Lat;
149
    }
150
 
151
    //// Returns the SUnit to which this edge points.
152
    SUnit *getSUnit() const;
153
 
154
    //// Assigns the SUnit to which this edge points.
155
    void setSUnit(SUnit *SU);
156
 
157
    /// Returns an enum value representing the kind of the dependence.
158
    Kind getKind() const;
159
 
160
    /// Shorthand for getKind() != SDep::Data.
161
    bool isCtrl() const {
162
      return getKind() != Data;
163
    }
164
 
165
    /// Tests if this is an Order dependence between two memory accesses
166
    /// where both sides of the dependence access memory in non-volatile and
167
    /// fully modeled ways.
168
    bool isNormalMemory() const {
169
      return getKind() == Order && (Contents.OrdKind == MayAliasMem
170
                                    || Contents.OrdKind == MustAliasMem);
171
    }
172
 
173
    /// Tests if this is an Order dependence that is marked as a barrier.
174
    bool isBarrier() const {
175
      return getKind() == Order && Contents.OrdKind == Barrier;
176
    }
177
 
178
    /// Tests if this is could be any kind of memory dependence.
179
    bool isNormalMemoryOrBarrier() const {
180
      return (isNormalMemory() || isBarrier());
181
    }
182
 
183
    /// Tests if this is an Order dependence that is marked as
184
    /// "must alias", meaning that the SUnits at either end of the edge have a
185
    /// memory dependence on a known memory location.
186
    bool isMustAlias() const {
187
      return getKind() == Order && Contents.OrdKind == MustAliasMem;
188
    }
189
 
190
    /// Tests if this a weak dependence. Weak dependencies are considered DAG
191
    /// edges for height computation and other heuristics, but do not force
192
    /// ordering. Breaking a weak edge may require the scheduler to compensate,
193
    /// for example by inserting a copy.
194
    bool isWeak() const {
195
      return getKind() == Order && Contents.OrdKind >= Weak;
196
    }
197
 
198
    /// Tests if this is an Order dependence that is marked as
199
    /// "artificial", meaning it isn't necessary for correctness.
200
    bool isArtificial() const {
201
      return getKind() == Order && Contents.OrdKind == Artificial;
202
    }
203
 
204
    /// Tests if this is an Order dependence that is marked as "cluster",
205
    /// meaning it is artificial and wants to be adjacent.
206
    bool isCluster() const {
207
      return getKind() == Order && Contents.OrdKind == Cluster;
208
    }
209
 
210
    /// Tests if this is a Data dependence that is associated with a register.
211
    bool isAssignedRegDep() const {
212
      return getKind() == Data && Contents.Reg != 0;
213
    }
214
 
215
    /// Returns the register associated with this edge. This is only valid on
216
    /// Data, Anti, and Output edges. On Data edges, this value may be zero,
217
    /// meaning there is no associated register.
218
    unsigned getReg() const {
219
      assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
220
             "getReg called on non-register dependence edge!");
221
      return Contents.Reg;
222
    }
223
 
224
    /// Assigns the associated register for this edge. This is only valid on
225
    /// Data, Anti, and Output edges. On Anti and Output edges, this value must
226
    /// not be zero. On Data edges, the value may be zero, which would mean that
227
    /// no specific register is associated with this edge.
228
    void setReg(unsigned Reg) {
229
      assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
230
             "setReg called on non-register dependence edge!");
231
      assert((getKind() != Anti || Reg != 0) &&
232
             "SDep::Anti edge cannot use the zero register!");
233
      assert((getKind() != Output || Reg != 0) &&
234
             "SDep::Output edge cannot use the zero register!");
235
      Contents.Reg = Reg;
236
    }
237
 
238
    void dump(const TargetRegisterInfo *TRI = nullptr) const;
239
  };
240
 
241
  /// Scheduling unit. This is a node in the scheduling DAG.
242
  class SUnit {
243
  private:
244
    enum : unsigned { BoundaryID = ~0u };
245
 
246
    SDNode *Node = nullptr;        ///< Representative node.
247
    MachineInstr *Instr = nullptr; ///< Alternatively, a MachineInstr.
248
 
249
  public:
250
    SUnit *OrigNode = nullptr; ///< If not this, the node from which this node
251
                               /// was cloned. (SD scheduling only)
252
 
253
    const MCSchedClassDesc *SchedClass =
254
        nullptr; ///< nullptr or resolved SchedClass.
255
 
256
    SmallVector<SDep, 4> Preds;  ///< All sunit predecessors.
257
    SmallVector<SDep, 4> Succs;  ///< All sunit successors.
258
 
259
    typedef SmallVectorImpl<SDep>::iterator pred_iterator;
260
    typedef SmallVectorImpl<SDep>::iterator succ_iterator;
261
    typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator;
262
    typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator;
263
 
264
    unsigned NodeNum = BoundaryID;     ///< Entry # of node in the node vector.
265
    unsigned NodeQueueId = 0;          ///< Queue id of node.
266
    unsigned NumPreds = 0;             ///< # of SDep::Data preds.
267
    unsigned NumSuccs = 0;             ///< # of SDep::Data sucss.
268
    unsigned NumPredsLeft = 0;         ///< # of preds not scheduled.
269
    unsigned NumSuccsLeft = 0;         ///< # of succs not scheduled.
270
    unsigned WeakPredsLeft = 0;        ///< # of weak preds not scheduled.
271
    unsigned WeakSuccsLeft = 0;        ///< # of weak succs not scheduled.
272
    unsigned short NumRegDefsLeft = 0; ///< # of reg defs with no scheduled use.
273
    unsigned short Latency = 0;        ///< Node latency.
274
    bool isVRegCycle      : 1;         ///< May use and def the same vreg.
275
    bool isCall           : 1;         ///< Is a function call.
276
    bool isCallOp         : 1;         ///< Is a function call operand.
277
    bool isTwoAddress     : 1;         ///< Is a two-address instruction.
278
    bool isCommutable     : 1;         ///< Is a commutable instruction.
279
    bool hasPhysRegUses   : 1;         ///< Has physreg uses.
280
    bool hasPhysRegDefs   : 1;         ///< Has physreg defs that are being used.
281
    bool hasPhysRegClobbers : 1;       ///< Has any physreg defs, used or not.
282
    bool isPending        : 1;         ///< True once pending.
283
    bool isAvailable      : 1;         ///< True once available.
284
    bool isScheduled      : 1;         ///< True once scheduled.
285
    bool isScheduleHigh   : 1;         ///< True if preferable to schedule high.
286
    bool isScheduleLow    : 1;         ///< True if preferable to schedule low.
287
    bool isCloned         : 1;         ///< True if this node has been cloned.
288
    bool isUnbuffered     : 1;         ///< Uses an unbuffered resource.
289
    bool hasReservedResource : 1;      ///< Uses a reserved resource.
290
    Sched::Preference SchedulingPref = Sched::None; ///< Scheduling preference.
291
 
292
  private:
293
    bool isDepthCurrent   : 1;         ///< True if Depth is current.
294
    bool isHeightCurrent  : 1;         ///< True if Height is current.
295
    unsigned Depth = 0;                ///< Node depth.
296
    unsigned Height = 0;               ///< Node height.
297
 
298
  public:
299
    unsigned TopReadyCycle = 0; ///< Cycle relative to start when node is ready.
300
    unsigned BotReadyCycle = 0; ///< Cycle relative to end when node is ready.
301
 
302
    const TargetRegisterClass *CopyDstRC =
303
        nullptr; ///< Is a special copy node if != nullptr.
304
    const TargetRegisterClass *CopySrcRC = nullptr;
305
 
306
    /// Constructs an SUnit for pre-regalloc scheduling to represent an
307
    /// SDNode and any nodes flagged to it.
308
    SUnit(SDNode *node, unsigned nodenum)
309
      : Node(node), NodeNum(nodenum), isVRegCycle(false), isCall(false),
310
        isCallOp(false), isTwoAddress(false), isCommutable(false),
311
        hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
312
        isPending(false), isAvailable(false), isScheduled(false),
313
        isScheduleHigh(false), isScheduleLow(false), isCloned(false),
314
        isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
315
        isHeightCurrent(false) {}
316
 
317
    /// Constructs an SUnit for post-regalloc scheduling to represent a
318
    /// MachineInstr.
319
    SUnit(MachineInstr *instr, unsigned nodenum)
320
      : Instr(instr), NodeNum(nodenum), isVRegCycle(false), isCall(false),
321
        isCallOp(false), isTwoAddress(false), isCommutable(false),
322
        hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
323
        isPending(false), isAvailable(false), isScheduled(false),
324
        isScheduleHigh(false), isScheduleLow(false), isCloned(false),
325
        isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
326
        isHeightCurrent(false) {}
327
 
328
    /// Constructs a placeholder SUnit.
329
    SUnit()
330
      : isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false),
331
        isCommutable(false), hasPhysRegUses(false), hasPhysRegDefs(false),
332
        hasPhysRegClobbers(false), isPending(false), isAvailable(false),
333
        isScheduled(false), isScheduleHigh(false), isScheduleLow(false),
334
        isCloned(false), isUnbuffered(false), hasReservedResource(false),
335
        isDepthCurrent(false), isHeightCurrent(false) {}
336
 
337
    /// Boundary nodes are placeholders for the boundary of the
338
    /// scheduling region.
339
    ///
340
    /// BoundaryNodes can have DAG edges, including Data edges, but they do not
341
    /// correspond to schedulable entities (e.g. instructions) and do not have a
342
    /// valid ID. Consequently, always check for boundary nodes before accessing
343
    /// an associative data structure keyed on node ID.
344
    bool isBoundaryNode() const { return NodeNum == BoundaryID; }
345
 
346
    /// Assigns the representative SDNode for this SUnit. This may be used
347
    /// during pre-regalloc scheduling.
348
    void setNode(SDNode *N) {
349
      assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
350
      Node = N;
351
    }
352
 
353
    /// Returns the representative SDNode for this SUnit. This may be used
354
    /// during pre-regalloc scheduling.
355
    SDNode *getNode() const {
356
      assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
357
      return Node;
358
    }
359
 
360
    /// Returns true if this SUnit refers to a machine instruction as
361
    /// opposed to an SDNode.
362
    bool isInstr() const { return Instr; }
363
 
364
    /// Assigns the instruction for the SUnit. This may be used during
365
    /// post-regalloc scheduling.
366
    void setInstr(MachineInstr *MI) {
367
      assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
368
      Instr = MI;
369
    }
370
 
371
    /// Returns the representative MachineInstr for this SUnit. This may be used
372
    /// during post-regalloc scheduling.
373
    MachineInstr *getInstr() const {
374
      assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
375
      return Instr;
376
    }
377
 
378
    /// Adds the specified edge as a pred of the current node if not already.
379
    /// It also adds the current node as a successor of the specified node.
380
    bool addPred(const SDep &D, bool Required = true);
381
 
382
    /// Adds a barrier edge to SU by calling addPred(), with latency 0
383
    /// generally or latency 1 for a store followed by a load.
384
    bool addPredBarrier(SUnit *SU) {
385
      SDep Dep(SU, SDep::Barrier);
386
      unsigned TrueMemOrderLatency =
387
        ((SU->getInstr()->mayStore() && this->getInstr()->mayLoad()) ? 1 : 0);
388
      Dep.setLatency(TrueMemOrderLatency);
389
      return addPred(Dep);
390
    }
391
 
392
    /// Removes the specified edge as a pred of the current node if it exists.
393
    /// It also removes the current node as a successor of the specified node.
394
    void removePred(const SDep &D);
395
 
396
    /// Returns the depth of this node, which is the length of the maximum path
397
    /// up to any node which has no predecessors.
398
    unsigned getDepth() const {
399
      if (!isDepthCurrent)
400
        const_cast<SUnit *>(this)->ComputeDepth();
401
      return Depth;
402
    }
403
 
404
    /// Returns the height of this node, which is the length of the
405
    /// maximum path down to any node which has no successors.
406
    unsigned getHeight() const {
407
      if (!isHeightCurrent)
408
        const_cast<SUnit *>(this)->ComputeHeight();
409
      return Height;
410
    }
411
 
412
    /// If NewDepth is greater than this node's depth value, sets it to
413
    /// be the new depth value. This also recursively marks successor nodes
414
    /// dirty.
415
    void setDepthToAtLeast(unsigned NewDepth);
416
 
417
    /// If NewHeight is greater than this node's height value, set it to be
418
    /// the new height value. This also recursively marks predecessor nodes
419
    /// dirty.
420
    void setHeightToAtLeast(unsigned NewHeight);
421
 
422
    /// Sets a flag in this node to indicate that its stored Depth value
423
    /// will require recomputation the next time getDepth() is called.
424
    void setDepthDirty();
425
 
426
    /// Sets a flag in this node to indicate that its stored Height value
427
    /// will require recomputation the next time getHeight() is called.
428
    void setHeightDirty();
429
 
430
    /// Tests if node N is a predecessor of this node.
431
    bool isPred(const SUnit *N) const {
432
      for (const SDep &Pred : Preds)
433
        if (Pred.getSUnit() == N)
434
          return true;
435
      return false;
436
    }
437
 
438
    /// Tests if node N is a successor of this node.
439
    bool isSucc(const SUnit *N) const {
440
      for (const SDep &Succ : Succs)
441
        if (Succ.getSUnit() == N)
442
          return true;
443
      return false;
444
    }
445
 
446
    bool isTopReady() const {
447
      return NumPredsLeft == 0;
448
    }
449
    bool isBottomReady() const {
450
      return NumSuccsLeft == 0;
451
    }
452
 
453
    /// Orders this node's predecessor edges such that the critical path
454
    /// edge occurs first.
455
    void biasCriticalPath();
456
 
457
    void dumpAttributes() const;
458
 
459
  private:
460
    void ComputeDepth();
461
    void ComputeHeight();
462
  };
463
 
464
  /// Returns true if the specified SDep is equivalent except for latency.
465
  inline bool SDep::overlaps(const SDep &Other) const {
466
    if (Dep != Other.Dep)
467
      return false;
468
    switch (Dep.getInt()) {
469
    case Data:
470
    case Anti:
471
    case Output:
472
      return Contents.Reg == Other.Contents.Reg;
473
    case Order:
474
      return Contents.OrdKind == Other.Contents.OrdKind;
475
    }
476
    llvm_unreachable("Invalid dependency kind!");
477
  }
478
 
479
  //// Returns the SUnit to which this edge points.
480
  inline SUnit *SDep::getSUnit() const { return Dep.getPointer(); }
481
 
482
  //// Assigns the SUnit to which this edge points.
483
  inline void SDep::setSUnit(SUnit *SU) { Dep.setPointer(SU); }
484
 
485
  /// Returns an enum value representing the kind of the dependence.
486
  inline SDep::Kind SDep::getKind() const { return Dep.getInt(); }
487
 
488
  //===--------------------------------------------------------------------===//
489
 
490
  /// This interface is used to plug different priorities computation
491
  /// algorithms into the list scheduler. It implements the interface of a
492
  /// standard priority queue, where nodes are inserted in arbitrary order and
493
  /// returned in priority order.  The computation of the priority and the
494
  /// representation of the queue are totally up to the implementation to
495
  /// decide.
496
  class SchedulingPriorityQueue {
497
    virtual void anchor();
498
 
499
    unsigned CurCycle = 0;
500
    bool HasReadyFilter;
501
 
502
  public:
503
    SchedulingPriorityQueue(bool rf = false) :  HasReadyFilter(rf) {}
504
 
505
    virtual ~SchedulingPriorityQueue() = default;
506
 
507
    virtual bool isBottomUp() const = 0;
508
 
509
    virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
510
    virtual void addNode(const SUnit *SU) = 0;
511
    virtual void updateNode(const SUnit *SU) = 0;
512
    virtual void releaseState() = 0;
513
 
514
    virtual bool empty() const = 0;
515
 
516
    bool hasReadyFilter() const { return HasReadyFilter; }
517
 
518
    virtual bool tracksRegPressure() const { return false; }
519
 
520
    virtual bool isReady(SUnit *) const {
521
      assert(!HasReadyFilter && "The ready filter must override isReady()");
522
      return true;
523
    }
524
 
525
    virtual void push(SUnit *U) = 0;
526
 
527
    void push_all(const std::vector<SUnit *> &Nodes) {
528
      for (SUnit *SU : Nodes)
529
        push(SU);
530
    }
531
 
532
    virtual SUnit *pop() = 0;
533
 
534
    virtual void remove(SUnit *SU) = 0;
535
 
536
    virtual void dump(ScheduleDAG *) const {}
537
 
538
    /// As each node is scheduled, this method is invoked.  This allows the
539
    /// priority function to adjust the priority of related unscheduled nodes,
540
    /// for example.
541
    virtual void scheduledNode(SUnit *) {}
542
 
543
    virtual void unscheduledNode(SUnit *) {}
544
 
545
    void setCurCycle(unsigned Cycle) {
546
      CurCycle = Cycle;
547
    }
548
 
549
    unsigned getCurCycle() const {
550
      return CurCycle;
551
    }
552
  };
553
 
554
  class ScheduleDAG {
555
  public:
556
    const LLVMTargetMachine &TM;        ///< Target processor
557
    const TargetInstrInfo *TII;         ///< Target instruction information
558
    const TargetRegisterInfo *TRI;      ///< Target processor register info
559
    MachineFunction &MF;                ///< Machine function
560
    MachineRegisterInfo &MRI;           ///< Virtual/real register map
561
    std::vector<SUnit> SUnits;          ///< The scheduling units.
562
    SUnit EntrySU;                      ///< Special node for the region entry.
563
    SUnit ExitSU;                       ///< Special node for the region exit.
564
 
565
#ifdef NDEBUG
566
    static const bool StressSched = false;
567
#else
568
    bool StressSched;
569
#endif
570
 
571
    explicit ScheduleDAG(MachineFunction &mf);
572
 
573
    virtual ~ScheduleDAG();
574
 
575
    /// Clears the DAG state (between regions).
576
    void clearDAG();
577
 
578
    /// Returns the MCInstrDesc of this SUnit.
579
    /// Returns NULL for SDNodes without a machine opcode.
580
    const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
581
      if (SU->isInstr()) return &SU->getInstr()->getDesc();
582
      return getNodeDesc(SU->getNode());
583
    }
584
 
585
    /// Pops up a GraphViz/gv window with the ScheduleDAG rendered using 'dot'.
586
    virtual void viewGraph(const Twine &Name, const Twine &Title);
587
    virtual void viewGraph();
588
 
589
    virtual void dumpNode(const SUnit &SU) const = 0;
590
    virtual void dump() const = 0;
591
    void dumpNodeName(const SUnit &SU) const;
592
 
593
    /// Returns a label for an SUnit node in a visualization of the ScheduleDAG.
594
    virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
595
 
596
    /// Returns a label for the region of code covered by the DAG.
597
    virtual std::string getDAGName() const = 0;
598
 
599
    /// Adds custom features for a visualization of the ScheduleDAG.
600
    virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
601
 
602
#ifndef NDEBUG
603
    /// Verifies that all SUnits were scheduled and that their state is
604
    /// consistent. Returns the number of scheduled SUnits.
605
    unsigned VerifyScheduledDAG(bool isBottomUp);
606
#endif
607
 
608
  protected:
609
    void dumpNodeAll(const SUnit &SU) const;
610
 
611
  private:
612
    /// Returns the MCInstrDesc of this SDNode or NULL.
613
    const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
614
  };
615
 
616
  class SUnitIterator {
617
    SUnit *Node;
618
    unsigned Operand;
619
 
620
    SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
621
 
622
  public:
623
    using iterator_category = std::forward_iterator_tag;
624
    using value_type = SUnit;
625
    using difference_type = std::ptrdiff_t;
626
    using pointer = value_type *;
627
    using reference = value_type &;
628
 
629
    bool operator==(const SUnitIterator& x) const {
630
      return Operand == x.Operand;
631
    }
632
    bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
633
 
634
    pointer operator*() const {
635
      return Node->Preds[Operand].getSUnit();
636
    }
637
    pointer operator->() const { return operator*(); }
638
 
639
    SUnitIterator& operator++() {                // Preincrement
640
      ++Operand;
641
      return *this;
642
    }
643
    SUnitIterator operator++(int) { // Postincrement
644
      SUnitIterator tmp = *this; ++*this; return tmp;
645
    }
646
 
647
    static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
648
    static SUnitIterator end  (SUnit *N) {
649
      return SUnitIterator(N, (unsigned)N->Preds.size());
650
    }
651
 
652
    unsigned getOperand() const { return Operand; }
653
    const SUnit *getNode() const { return Node; }
654
 
655
    /// Tests if this is not an SDep::Data dependence.
656
    bool isCtrlDep() const {
657
      return getSDep().isCtrl();
658
    }
659
    bool isArtificialDep() const {
660
      return getSDep().isArtificial();
661
    }
662
    const SDep &getSDep() const {
663
      return Node->Preds[Operand];
664
    }
665
  };
666
 
667
  template <> struct GraphTraits<SUnit*> {
668
    typedef SUnit *NodeRef;
669
    typedef SUnitIterator ChildIteratorType;
670
    static NodeRef getEntryNode(SUnit *N) { return N; }
671
    static ChildIteratorType child_begin(NodeRef N) {
672
      return SUnitIterator::begin(N);
673
    }
674
    static ChildIteratorType child_end(NodeRef N) {
675
      return SUnitIterator::end(N);
676
    }
677
  };
678
 
679
  template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
680
    typedef pointer_iterator<std::vector<SUnit>::iterator> nodes_iterator;
681
    static nodes_iterator nodes_begin(ScheduleDAG *G) {
682
      return nodes_iterator(G->SUnits.begin());
683
    }
684
    static nodes_iterator nodes_end(ScheduleDAG *G) {
685
      return nodes_iterator(G->SUnits.end());
686
    }
687
  };
688
 
689
  /// This class can compute a topological ordering for SUnits and provides
690
  /// methods for dynamically updating the ordering as new edges are added.
691
  ///
692
  /// This allows a very fast implementation of IsReachable, for example.
693
  class ScheduleDAGTopologicalSort {
694
    /// A reference to the ScheduleDAG's SUnits.
695
    std::vector<SUnit> &SUnits;
696
    SUnit *ExitSU;
697
 
698
    // Have any new nodes been added?
699
    bool Dirty = false;
700
 
701
    // Outstanding added edges, that have not been applied to the ordering.
702
    SmallVector<std::pair<SUnit *, SUnit *>, 16> Updates;
703
 
704
    /// Maps topological index to the node number.
705
    std::vector<int> Index2Node;
706
    /// Maps the node number to its topological index.
707
    std::vector<int> Node2Index;
708
    /// a set of nodes visited during a DFS traversal.
709
    BitVector Visited;
710
 
711
    /// Makes a DFS traversal and mark all nodes affected by the edge insertion.
712
    /// These nodes will later get new topological indexes by means of the Shift
713
    /// method.
714
    void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
715
 
716
    /// Reassigns topological indexes for the nodes in the DAG to
717
    /// preserve the topological ordering.
718
    void Shift(BitVector& Visited, int LowerBound, int UpperBound);
719
 
720
    /// Assigns the topological index to the node n.
721
    void Allocate(int n, int index);
722
 
723
    /// Fix the ordering, by either recomputing from scratch or by applying
724
    /// any outstanding updates. Uses a heuristic to estimate what will be
725
    /// cheaper.
726
    void FixOrder();
727
 
728
  public:
729
    ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
730
 
731
    /// Add a SUnit without predecessors to the end of the topological order. It
732
    /// also must be the first new node added to the DAG.
733
    void AddSUnitWithoutPredecessors(const SUnit *SU);
734
 
735
    /// Creates the initial topological ordering from the DAG to be scheduled.
736
    void InitDAGTopologicalSorting();
737
 
738
    /// Returns an array of SUs that are both in the successor
739
    /// subtree of StartSU and in the predecessor subtree of TargetSU.
740
    /// StartSU and TargetSU are not in the array.
741
    /// Success is false if TargetSU is not in the successor subtree of
742
    /// StartSU, else it is true.
743
    std::vector<int> GetSubGraph(const SUnit &StartSU, const SUnit &TargetSU,
744
                                 bool &Success);
745
 
746
    /// Checks if \p SU is reachable from \p TargetSU.
747
    bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
748
 
749
    /// Returns true if addPred(TargetSU, SU) creates a cycle.
750
    bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);
751
 
752
    /// Updates the topological ordering to accommodate an edge to be
753
    /// added from SUnit \p X to SUnit \p Y.
754
    void AddPred(SUnit *Y, SUnit *X);
755
 
756
    /// Queues an update to the topological ordering to accommodate an edge to
757
    /// be added from SUnit \p X to SUnit \p Y.
758
    void AddPredQueued(SUnit *Y, SUnit *X);
759
 
760
    /// Updates the topological ordering to accommodate an an edge to be
761
    /// removed from the specified node \p N from the predecessors of the
762
    /// current node \p M.
763
    void RemovePred(SUnit *M, SUnit *N);
764
 
765
    /// Mark the ordering as temporarily broken, after a new node has been
766
    /// added.
767
    void MarkDirty() { Dirty = true; }
768
 
769
    typedef std::vector<int>::iterator iterator;
770
    typedef std::vector<int>::const_iterator const_iterator;
771
    iterator begin() { return Index2Node.begin(); }
772
    const_iterator begin() const { return Index2Node.begin(); }
773
    iterator end() { return Index2Node.end(); }
774
    const_iterator end() const { return Index2Node.end(); }
775
 
776
    typedef std::vector<int>::reverse_iterator reverse_iterator;
777
    typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
778
    reverse_iterator rbegin() { return Index2Node.rbegin(); }
779
    const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
780
    reverse_iterator rend() { return Index2Node.rend(); }
781
    const_reverse_iterator rend() const { return Index2Node.rend(); }
782
  };
783
 
784
} // end namespace llvm
785
 
786
#endif // LLVM_CODEGEN_SCHEDULEDAG_H