summaryrefslogtreecommitdiff
path: root/runtime/thread_list.h
blob: 55baed8a31ea90403b920659a91e2205730cda00 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
/*
 * Copyright (C) 2011 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ART_RUNTIME_THREAD_LIST_H_
#define ART_RUNTIME_THREAD_LIST_H_

#include "barrier.h"
#include "base/histogram.h"
#include "base/mutex.h"
#include "base/value_object.h"
#include "jni.h"
#include "reflective_handle_scope.h"
#include "suspend_reason.h"

#include <bitset>
#include <list>
#include <vector>

namespace art {
namespace gc {
namespace collector {
class GarbageCollector;
}  // namespace collector
class GcPauseListener;
}  // namespace gc
class Closure;
class IsMarkedVisitor;
class RootVisitor;
class Thread;
class TimingLogger;
enum VisitRootFlags : uint8_t;

class ThreadList {
 public:
  static constexpr uint32_t kMaxThreadId = 0xFFFF;
  static constexpr uint32_t kInvalidThreadId = 0;
  static constexpr uint32_t kMainThreadId = 1;
  static constexpr uint64_t kDefaultThreadSuspendTimeout =
      kIsDebugBuild ? 2'000'000'000ull : 4'000'000'000ull;
  // We fail more aggressively in debug builds to catch potential issues early.
  // The number of times we may retry when we find ourselves in a suspend-unfriendly state.
  static constexpr int kMaxSuspendRetries = kIsDebugBuild ? 500 : 5000;
  static constexpr useconds_t kThreadSuspendSleepUs = 100;

  explicit ThreadList(uint64_t thread_suspend_timeout_ns);
  ~ThreadList();

  void ShutDown();

  void DumpForSigQuit(std::ostream& os)
      REQUIRES(!Locks::thread_list_lock_, !Locks::mutator_lock_);
  // For thread suspend timeout dumps.
  void Dump(std::ostream& os, bool dump_native_stack = true)
      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
  pid_t GetLockOwner();  // For SignalCatcher.

  // Thread suspension support.
  void ResumeAll()
      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
      UNLOCK_FUNCTION(Locks::mutator_lock_);
  bool Resume(Thread* thread, SuspendReason reason = SuspendReason::kInternal)
      REQUIRES(!Locks::thread_suspend_count_lock_) WARN_UNUSED;

  // Suspends all other threads and gets exclusive access to the mutator lock.
  // If long_suspend is true, then other threads who try to suspend will never timeout.
  // long_suspend is currenly used for hprof since large heaps take a long time.
  void SuspendAll(const char* cause, bool long_suspend = false)
      EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_)
      REQUIRES(!Locks::thread_list_lock_,
               !Locks::thread_suspend_count_lock_,
               !Locks::mutator_lock_);

  // Suspend a thread using a peer, typically used by the debugger. Returns the thread on success,
  // else null. The peer is used to identify the thread to avoid races with the thread terminating.
  Thread* SuspendThreadByPeer(jobject peer, SuspendReason reason)
      REQUIRES(!Locks::mutator_lock_,
               !Locks::thread_list_lock_,
               !Locks::thread_suspend_count_lock_);

  // Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the
  // thread on success else null. The thread id is used to identify the thread to avoid races with
  // the thread terminating. Note that as thread ids are recycled this may not suspend the expected
  // thread, that may be terminating.
  Thread* SuspendThreadByThreadId(uint32_t thread_id, SuspendReason reason)
      REQUIRES(!Locks::mutator_lock_,
               !Locks::thread_list_lock_,
               !Locks::thread_suspend_count_lock_);

  // Find an existing thread (or self) by its thread id (not tid).
  Thread* FindThreadByThreadId(uint32_t thread_id) REQUIRES(Locks::thread_list_lock_);

  // Find an existing thread (or self) by its tid (not thread id).
  Thread* FindThreadByTid(int tid) REQUIRES(Locks::thread_list_lock_);

  // Does the thread list still contain the given thread, or one at the same address?
  // Used by Monitor to provide (mostly accurate) debugging information.
  bool Contains(Thread* thread) REQUIRES(Locks::thread_list_lock_);

  // Run a checkpoint on all threads. Return the total number of threads for which the checkpoint
  // function has been or will be called.
  // Running threads are not suspended but run the checkpoint inside of the suspend check. The
  // return value includes already suspended threads for b/24191051. Runs or requests the
  // callback, if non-null, inside the thread_list_lock critical section after determining the
  // runnable/suspended states of the threads. Does not wait for completion of the checkpoint
  // function in running threads. If the caller holds the mutator lock, then all instances of the
  // checkpoint function are run with the mutator lock. If the caller does not hold the mutator
  // lock (see mutator_gc_coord.md) then, since the checkpoint code may not acquire or release the
  // mutator lock, the checkpoint will have no way to access Java data.
  // TODO: Is it possible to just require the mutator lock here?
  size_t RunCheckpoint(Closure* checkpoint_function,
                       Closure* callback = nullptr,
                       bool allow_lock_checking = true)
      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);

  // Convenience version of the above to disable lock checking inside Run function. Hopefully this
  // and the third parameter above will eventually disappear.
  size_t RunCheckpointUnchecked(Closure* checkpoint_function, Closure* callback = nullptr)
      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) {
    return RunCheckpoint(checkpoint_function, callback, false);
  }

  // Run an empty checkpoint on threads. Wait until threads pass the next suspend point or are
  // suspended. This is used to ensure that the threads finish or aren't in the middle of an
  // in-flight mutator heap access (eg. a read barrier.) Runnable threads will respond by
  // decrementing the empty checkpoint barrier count. This works even when the weak ref access is
  // disabled. Only one concurrent use is currently supported.
  void RunEmptyCheckpoint()
      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);

  // Used to flip thread roots from from-space refs to to-space refs. Used only by the concurrent
  // moving collectors during a GC, and hence cannot be called from multiple threads concurrently.
  //
  // Briefly suspends all threads to atomically install a checkpoint-like thread_flip_visitor
  // function to be run on each thread. Run flip_callback while threads are suspended.
  // Thread_flip_visitors are run by each thread before it becomes runnable, or by us. We do not
  // return until all thread_flip_visitors have been run.
  void FlipThreadRoots(Closure* thread_flip_visitor,
                       Closure* flip_callback,
                       gc::collector::GarbageCollector* collector,
                       gc::GcPauseListener* pause_listener)
      REQUIRES(!Locks::mutator_lock_,
               !Locks::thread_list_lock_,
               !Locks::thread_suspend_count_lock_);

  // Iterates over all the threads.
  void ForEach(void (*callback)(Thread*, void*), void* context)
      REQUIRES(Locks::thread_list_lock_);

  template<typename CallBack>
  void ForEach(CallBack cb) REQUIRES(Locks::thread_list_lock_) {
    ForEach([](Thread* t, void* ctx) REQUIRES(Locks::thread_list_lock_) {
      (*reinterpret_cast<CallBack*>(ctx))(t);
    }, &cb);
  }

  // Add/remove current thread from list.
  void Register(Thread* self)
      REQUIRES(Locks::runtime_shutdown_lock_)
      REQUIRES(!Locks::mutator_lock_,
               !Locks::thread_list_lock_,
               !Locks::thread_suspend_count_lock_);
  void Unregister(Thread* self, bool should_run_callbacks)
      REQUIRES(!Locks::mutator_lock_,
               !Locks::thread_list_lock_,
               !Locks::thread_suspend_count_lock_);

  // Wait until there are no Unregister() requests in flight. Only makes sense when we know that
  // no new calls can be made. e.g. because we're the last thread.
  void WaitForUnregisterToComplete(Thread* self) REQUIRES(Locks::thread_list_lock_);

  void VisitRoots(RootVisitor* visitor, VisitRootFlags flags) const
      REQUIRES_SHARED(Locks::mutator_lock_);

  void VisitRootsForSuspendedThreads(RootVisitor* visitor)
      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) const REQUIRES(Locks::mutator_lock_);

  void SweepInterpreterCaches(IsMarkedVisitor* visitor) const
      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_);

  // Return a copy of the thread list.
  std::list<Thread*> GetList() REQUIRES(Locks::thread_list_lock_) {
    return list_;
  }

  size_t Size() REQUIRES(Locks::thread_list_lock_) { return list_.size(); }

  void DumpNativeStacks(std::ostream& os)
      REQUIRES(!Locks::thread_list_lock_);

  Barrier* EmptyCheckpointBarrier() {
    return empty_checkpoint_barrier_.get();
  }

  void WaitForOtherNonDaemonThreadsToExit(bool check_no_birth = true)
      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
               !Locks::mutator_lock_);

  // Wait for suspend barrier to reach zero. Return false on timeout.
  bool WaitForSuspendBarrier(AtomicInteger* barrier);

 private:
  uint32_t AllocThreadId(Thread* self);
  void ReleaseThreadId(Thread* self, uint32_t id) REQUIRES(!Locks::allocated_thread_ids_lock_);

  void DumpUnattachedThreads(std::ostream& os, bool dump_native_stack)
      REQUIRES(!Locks::thread_list_lock_);

  void SuspendAllDaemonThreadsForShutdown()
      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);

  void ResumeAllInternal(Thread* self)
      REQUIRES(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_)
          UNLOCK_FUNCTION(Locks::mutator_lock_);

  void SuspendAllInternal(Thread* self, SuspendReason reason = SuspendReason::kInternal)
      REQUIRES(!Locks::thread_list_lock_,
               !Locks::thread_suspend_count_lock_,
               !Locks::mutator_lock_);

  void AssertOtherThreadsAreSuspended(Thread* self)
      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);

  std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(Locks::allocated_thread_ids_lock_);

  // The actual list of all threads.
  std::list<Thread*> list_ GUARDED_BY(Locks::thread_list_lock_);

  // Ongoing suspend all requests, used to ensure threads added to list_ respect SuspendAll, and
  // to ensure that only one SuspendAll ot FlipThreadRoots call is active at a time.  The value is
  // always either 0 or 1. Thread_suspend_count_lock must be held continuously while these two
  // functions modify suspend counts of all other threads and modify suspend_all_count_ .
  int suspend_all_count_ GUARDED_BY(Locks::thread_suspend_count_lock_);

  // Number of threads unregistering, ~ThreadList blocks until this hits 0.
  int unregistering_count_ GUARDED_BY(Locks::thread_list_lock_);

  // Thread suspend time histogram. Only modified when all the threads are suspended, so guarding
  // by mutator lock ensures no thread can read when another thread is modifying it.
  Histogram<uint64_t> suspend_all_histogram_ GUARDED_BY(Locks::mutator_lock_);

  // Whether or not the current thread suspension is long.
  bool long_suspend_;

  // Whether the shutdown function has been called. This is checked in the destructor. It is an
  // error to destroy a ThreadList instance without first calling ShutDown().
  bool shut_down_;

  // Thread suspension timeout in nanoseconds.
  const uint64_t thread_suspend_timeout_ns_;

  std::unique_ptr<Barrier> empty_checkpoint_barrier_;

  friend class Thread;

  friend class Mutex;
  friend class BaseMutex;

  DISALLOW_COPY_AND_ASSIGN(ThreadList);
};

// Helper for suspending all threads and getting exclusive access to the mutator lock.
class ScopedSuspendAll : public ValueObject {
 public:
  explicit ScopedSuspendAll(const char* cause, bool long_suspend = false)
     EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_)
     REQUIRES(!Locks::thread_list_lock_,
              !Locks::thread_suspend_count_lock_,
              !Locks::mutator_lock_);
  // No REQUIRES(mutator_lock_) since the unlock function already asserts this.
  ~ScopedSuspendAll()
      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
      UNLOCK_FUNCTION(Locks::mutator_lock_);
};

}  // namespace art

#endif  // ART_RUNTIME_THREAD_LIST_H_