Subversion Repositories QNX 8.QNX8 LLVM/Clang compiler suite

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
14 pmbaty 1
//===- llvm/Analysis/MemoryProfileInfo.h - memory profile info ---*- C++ -*-==//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file contains utilities to analyze memory profile information.
10
//
11
//===----------------------------------------------------------------------===//
12
 
13
#ifndef LLVM_ANALYSIS_MEMORYPROFILEINFO_H
14
#define LLVM_ANALYSIS_MEMORYPROFILEINFO_H
15
 
16
#include "llvm/IR/Constants.h"
17
#include "llvm/IR/InstrTypes.h"
18
#include "llvm/IR/Metadata.h"
19
#include "llvm/IR/Module.h"
20
#include "llvm/IR/ModuleSummaryIndex.h"
21
#include <map>
22
 
23
namespace llvm {
24
namespace memprof {
25
 
26
/// Return the allocation type for a given set of memory profile values.
27
AllocationType getAllocType(uint64_t MaxAccessCount, uint64_t MinSize,
28
                            uint64_t MinLifetime);
29
 
30
/// Build callstack metadata from the provided list of call stack ids. Returns
31
/// the resulting metadata node.
32
MDNode *buildCallstackMetadata(ArrayRef<uint64_t> CallStack, LLVMContext &Ctx);
33
 
34
/// Returns the stack node from an MIB metadata node.
35
MDNode *getMIBStackNode(const MDNode *MIB);
36
 
37
/// Returns the allocation type from an MIB metadata node.
38
AllocationType getMIBAllocType(const MDNode *MIB);
39
 
40
/// Class to build a trie of call stack contexts for a particular profiled
41
/// allocation call, along with their associated allocation types.
42
/// The allocation will be at the root of the trie, which is then used to
43
/// compute the minimum lists of context ids needed to associate a call context
44
/// with a single allocation type.
45
class CallStackTrie {
46
private:
47
  struct CallStackTrieNode {
48
    // Allocation types for call context sharing the context prefix at this
49
    // node.
50
    uint8_t AllocTypes;
51
    // Map of caller stack id to the corresponding child Trie node.
52
    std::map<uint64_t, CallStackTrieNode *> Callers;
53
    CallStackTrieNode(AllocationType Type)
54
        : AllocTypes(static_cast<uint8_t>(Type)) {}
55
  };
56
 
57
  // The node for the allocation at the root.
58
  CallStackTrieNode *Alloc;
59
  // The allocation's leaf stack id.
60
  uint64_t AllocStackId;
61
 
62
  void deleteTrieNode(CallStackTrieNode *Node) {
63
    if (!Node)
64
      return;
65
    for (auto C : Node->Callers)
66
      deleteTrieNode(C.second);
67
    delete Node;
68
  }
69
 
70
  // Recursive helper to trim contexts and create metadata nodes.
71
  bool buildMIBNodes(CallStackTrieNode *Node, LLVMContext &Ctx,
72
                     std::vector<uint64_t> &MIBCallStack,
73
                     std::vector<Metadata *> &MIBNodes,
74
                     bool CalleeHasAmbiguousCallerContext);
75
 
76
public:
77
  CallStackTrie() : Alloc(nullptr), AllocStackId(0) {}
78
  ~CallStackTrie() { deleteTrieNode(Alloc); }
79
 
80
  bool empty() const { return Alloc == nullptr; }
81
 
82
  /// Add a call stack context with the given allocation type to the Trie.
83
  /// The context is represented by the list of stack ids (computed during
84
  /// matching via a debug location hash), expected to be in order from the
85
  /// allocation call down to the bottom of the call stack (i.e. callee to
86
  /// caller order).
87
  void addCallStack(AllocationType AllocType, ArrayRef<uint64_t> StackIds);
88
 
89
  /// Add the call stack context along with its allocation type from the MIB
90
  /// metadata to the Trie.
91
  void addCallStack(MDNode *MIB);
92
 
93
  /// Build and attach the minimal necessary MIB metadata. If the alloc has a
94
  /// single allocation type, add a function attribute instead. The reason for
95
  /// adding an attribute in this case is that it matches how the behavior for
96
  /// allocation calls will be communicated to lib call simplification after
97
  /// cloning or another optimization to distinguish the allocation types,
98
  /// which is lower overhead and more direct than maintaining this metadata.
99
  /// Returns true if memprof metadata attached, false if not (attribute added).
100
  bool buildAndAttachMIBMetadata(CallBase *CI);
101
};
102
 
103
/// Helper class to iterate through stack ids in both metadata (memprof MIB and
104
/// callsite) and the corresponding ThinLTO summary data structures
105
/// (CallsiteInfo and MIBInfo). This simplifies implementation of client code
106
/// which doesn't need to worry about whether we are operating with IR (Regular
107
/// LTO), or summary (ThinLTO).
108
template <class NodeT, class IteratorT> class CallStack {
109
public:
110
  CallStack(const NodeT *N = nullptr) : N(N) {}
111
 
112
  // Implement minimum required methods for range-based for loop.
113
  // The default implementation assumes we are operating on ThinLTO data
114
  // structures, which have a vector of StackIdIndices. There are specialized
115
  // versions provided to iterate through metadata.
116
  struct CallStackIterator {
117
    const NodeT *N = nullptr;
118
    IteratorT Iter;
119
    CallStackIterator(const NodeT *N, bool End);
120
    uint64_t operator*();
121
    bool operator==(const CallStackIterator &rhs) { return Iter == rhs.Iter; }
122
    bool operator!=(const CallStackIterator &rhs) { return !(*this == rhs); }
123
    void operator++() { ++Iter; }
124
  };
125
 
126
  bool empty() const { return N == nullptr; }
127
 
128
  CallStackIterator begin() const;
129
  CallStackIterator end() const { return CallStackIterator(N, /*End*/ true); }
130
  CallStackIterator beginAfterSharedPrefix(CallStack &Other);
131
 
132
private:
133
  const NodeT *N = nullptr;
134
};
135
 
136
template <class NodeT, class IteratorT>
137
CallStack<NodeT, IteratorT>::CallStackIterator::CallStackIterator(
138
    const NodeT *N, bool End)
139
    : N(N) {
140
  if (!N)
141
    return;
142
  Iter = End ? N->StackIdIndices.end() : N->StackIdIndices.begin();
143
}
144
 
145
template <class NodeT, class IteratorT>
146
uint64_t CallStack<NodeT, IteratorT>::CallStackIterator::operator*() {
147
  assert(Iter != N->StackIdIndices.end());
148
  return *Iter;
149
}
150
 
151
template <class NodeT, class IteratorT>
152
typename CallStack<NodeT, IteratorT>::CallStackIterator
153
CallStack<NodeT, IteratorT>::begin() const {
154
  return CallStackIterator(N, /*End*/ false);
155
}
156
 
157
template <class NodeT, class IteratorT>
158
typename CallStack<NodeT, IteratorT>::CallStackIterator
159
CallStack<NodeT, IteratorT>::beginAfterSharedPrefix(CallStack &Other) {
160
  CallStackIterator Cur = begin();
161
  for (CallStackIterator OtherCur = Other.begin();
162
       Cur != end() && OtherCur != Other.end(); ++Cur, ++OtherCur)
163
    assert(*Cur == *OtherCur);
164
  return Cur;
165
}
166
 
167
/// Specializations for iterating through IR metadata stack contexts.
168
template <>
169
CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::CallStackIterator(
170
    const MDNode *N, bool End);
171
template <>
172
uint64_t CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::operator*();
173
 
174
} // end namespace memprof
175
} // end namespace llvm
176
 
177
#endif