summaryrefslogtreecommitdiff
path: root/runtime/gc/allocation_record.cc
blob: f98edf4aa8a4d10b8cb90c5aeb6915ab50df439b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
/*
 * Copyright (C) 2015 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "allocation_record.h"

#include "art_method-inl.h"
#include "base/enums.h"
#include "base/logging.h"  // For VLOG
#include "base/stl_util.h"
#include "obj_ptr-inl.h"
#include "object_callbacks.h"
#include "stack.h"
#include "thread-inl.h"  // For GetWeakRefAccessEnabled().

#include <android-base/properties.h>

namespace art HIDDEN {
namespace gc {

int32_t AllocRecordStackTraceElement::ComputeLineNumber() const {
  DCHECK(method_ != nullptr);
  int32_t line_number = method_->GetLineNumFromDexPC(dex_pc_);
  if (line_number == -1 && !method_->IsProxyMethod()) {
    // If we failed to map the dex pc to a line number, then most probably there is no debug info.
    // Make the line_number same as the dex pc - it can be decoded later using a map file.
    // See b/30183883 and b/228000954.
    line_number = static_cast<int32_t>(dex_pc_);
  }
  return line_number;
}

const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
  // klass_ could contain null only if we implement class unloading.
  return klass_.IsNull() ? "null" : klass_.Read()->GetDescriptor(storage);
}

void AllocRecordObjectMap::SetMaxStackDepth(size_t max_stack_depth) {
  // Log fatal since this should already be checked when calling VMDebug.setAllocTrackerStackDepth.
  CHECK_LE(max_stack_depth, kMaxSupportedStackDepth)
      << "Allocation record max stack depth is too large";
  max_stack_depth_ = max_stack_depth;
}

AllocRecordObjectMap::~AllocRecordObjectMap() {
  Clear();
}

void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
  // When we are compacting in userfaultfd GC, the class GC-roots are already
  // updated in SweepAllocationRecords()->SweepClassObject().
  if (Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {
    return;
  }
  CHECK_LE(recent_record_max_, alloc_record_max_);
  BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
  size_t count = recent_record_max_;
  // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
  // klass_ fields as strong roots.
  for (auto it = entries_.rbegin(), end = entries_.rend(); it != end; ++it) {
    AllocRecord& record = it->second;
    if (count > 0) {
      buffered_visitor.VisitRootIfNonNull(record.GetClassGcRoot());
      --count;
    }
    // Visit all of the stack frames to make sure no methods in the stack traces get unloaded by
    // class unloading.
    for (size_t i = 0, depth = record.GetDepth(); i < depth; ++i) {
      const AllocRecordStackTraceElement& element = record.StackElement(i);
      DCHECK(element.GetMethod() != nullptr);
      element.GetMethod()->VisitRoots(buffered_visitor, kRuntimePointerSize);
    }
  }
}

static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
    REQUIRES_SHARED(Locks::mutator_lock_)
    REQUIRES(Locks::alloc_tracker_lock_) {
  GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
  // This does not need a read barrier because this is called by GC.
  mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
  if (old_object != nullptr) {
    // The class object can become null if we implement class unloading.
    // In that case we might still want to keep the class name string (not implemented).
    mirror::Object* new_object = visitor->IsMarked(old_object);
    DCHECK(new_object != nullptr);
    if (UNLIKELY(old_object != new_object)) {
      // We can't use AsClass() as it uses IsClass in a DCHECK, which expects
      // the class' contents to be there. This is not the case in userfaultfd
      // GC.
      klass = GcRoot<mirror::Class>(ObjPtr<mirror::Class>::DownCast(new_object));
    }
  }
}

void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
  VLOG(heap) << "Start SweepAllocationRecords()";
  size_t count_deleted = 0, count_moved = 0, count = 0;
  // Only the first (size - recent_record_max_) number of records can be deleted.
  const size_t delete_bound = std::max(entries_.size(), recent_record_max_) - recent_record_max_;
  for (auto it = entries_.begin(), end = entries_.end(); it != end;) {
    ++count;
    // This does not need a read barrier because this is called by GC.
    mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
    AllocRecord& record = it->second;
    mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
    if (new_object == nullptr) {
      if (count > delete_bound) {
        it->first = GcRoot<mirror::Object>(nullptr);
        SweepClassObject(&record, visitor);
        ++it;
      } else {
        it = entries_.erase(it);
        ++count_deleted;
      }
    } else {
      if (old_object != new_object) {
        it->first = GcRoot<mirror::Object>(new_object);
        ++count_moved;
      }
      SweepClassObject(&record, visitor);
      ++it;
    }
  }
  VLOG(heap) << "Deleted " << count_deleted << " allocation records";
  VLOG(heap) << "Updated " << count_moved << " allocation records";
}

void AllocRecordObjectMap::AllowNewAllocationRecords() {
  CHECK(!gUseReadBarrier);
  allow_new_record_ = true;
  new_record_condition_.Broadcast(Thread::Current());
}

void AllocRecordObjectMap::DisallowNewAllocationRecords() {
  CHECK(!gUseReadBarrier);
  allow_new_record_ = false;
}

void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
  new_record_condition_.Broadcast(Thread::Current());
}

void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
  Thread* self = Thread::Current();
  Heap* heap = Runtime::Current()->GetHeap();
  if (enable) {
    {
      MutexLock mu(self, *Locks::alloc_tracker_lock_);
      if (heap->IsAllocTrackingEnabled()) {
        return;  // Already enabled, bail.
      }
      AllocRecordObjectMap* records = heap->GetAllocationRecords();
      if (records == nullptr) {
        records = new AllocRecordObjectMap;
        heap->SetAllocationRecords(records);
      }
      CHECK(records != nullptr);
      records->SetMaxStackDepth(heap->GetAllocTrackerStackDepth());
      size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
                  sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
      LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
                << records->max_stack_depth_ << " frames, taking up to "
                << PrettySize(sz * records->alloc_record_max_) << ")";
    }
    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
    {
      MutexLock mu(self, *Locks::alloc_tracker_lock_);
      heap->SetAllocTrackingEnabled(true);
    }
  } else {
    // Delete outside of the critical section to avoid possible lock violations like the runtime
    // shutdown lock.
    {
      MutexLock mu(self, *Locks::alloc_tracker_lock_);
      if (!heap->IsAllocTrackingEnabled()) {
        return;  // Already disabled, bail.
      }
      heap->SetAllocTrackingEnabled(false);
      LOG(INFO) << "Disabling alloc tracker";
      AllocRecordObjectMap* records = heap->GetAllocationRecords();
      records->Clear();
    }
    // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
    Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
  }
}

void AllocRecordObjectMap::RecordAllocation(Thread* self,
                                            ObjPtr<mirror::Object>* obj,
                                            size_t byte_count) {
  // Get stack trace outside of lock in case there are allocations during the stack walk.
  // b/27858645.
  AllocRecordStackTrace trace;
  {
    StackHandleScope<1> hs(self);
    auto obj_wrapper = hs.NewHandleWrapper(obj);

    StackVisitor::WalkStack(
        [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
          if (trace.GetDepth() >= max_stack_depth_) {
            return false;
          }
          ArtMethod* m = stack_visitor->GetMethod();
          // m may be null if we have inlined methods of unresolved classes. b/27858645
          if (m != nullptr && !m->IsRuntimeMethod()) {
            m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
            trace.AddStackElement(AllocRecordStackTraceElement(m, stack_visitor->GetDexPc()));
          }
          return true;
        },
        self,
        /* context= */ nullptr,
        art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
  }

  MutexLock mu(self, *Locks::alloc_tracker_lock_);
  Heap* const heap = Runtime::Current()->GetHeap();
  if (!heap->IsAllocTrackingEnabled()) {
    // In the process of shutting down recording, bail.
    return;
  }

  // TODO Skip recording allocations associated with DDMS. This was a feature of the old debugger
  // but when we switched to the JVMTI based debugger the feature was (unintentionally) broken.
  // Since nobody seemed to really notice or care it might not be worth the trouble.

  // Wait for GC's sweeping to complete and allow new records.
  while (UNLIKELY((!gUseReadBarrier && !allow_new_record_) ||
                  (gUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
    // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
    // presence of threads blocking for weak ref access.
    self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_);
    new_record_condition_.WaitHoldingLocks(self);
  }

  if (!heap->IsAllocTrackingEnabled()) {
    // Return if the allocation tracking has been disabled while waiting for system weak access
    // above.
    return;
  }

  DCHECK_LE(Size(), alloc_record_max_);

  // Erase extra unfilled elements.
  trace.SetTid(self->GetTid());

  // Add the record.
  Put(obj->Ptr(), AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
  DCHECK_LE(Size(), alloc_record_max_);
}

void AllocRecordObjectMap::Clear() {
  entries_.clear();
}

AllocRecordObjectMap::AllocRecordObjectMap()
    : new_record_condition_("New allocation record condition", *Locks::alloc_tracker_lock_) {}

}  // namespace gc
}  // namespace art