summaryrefslogtreecommitdiff
path: root/runtime/jit/jit_code_cache.h
blob: daa1d616a68226db69987210727634ece8a54fe8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
/*
 * Copyright 2014 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
#define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_

#include "instrumentation.h"

#include "atomic.h"
#include "base/arena_containers.h"
#include "base/histogram-inl.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "gc/accounting/bitmap.h"
#include "gc_root.h"
#include "jni.h"
#include "method_reference.h"
#include "oat_file.h"
#include "profile_compilation_info.h"
#include "safe_map.h"
#include "thread_pool.h"

namespace art {

class ArtMethod;
class LinearAlloc;
class InlineCache;
class IsMarkedVisitor;
class OatQuickMethodHeader;
class ProfilingInfo;

namespace jit {

class JitInstrumentationCache;

// Alignment in bits that will suit all architectures.
static constexpr int kJitCodeAlignment = 16;
using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;

class JitCodeCache {
 public:
  static constexpr size_t kMaxCapacity = 64 * MB;
  // Put the default to a very low amount for debug builds to stress the code cache
  // collection.
  static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB;

  // By default, do not GC until reaching 256KB.
  static constexpr size_t kReservedCapacity = kInitialCapacity * 4;

  // Create the code cache with a code + data capacity equal to "capacity", error message is passed
  // in the out arg error_msg.
  static JitCodeCache* Create(size_t initial_capacity,
                              size_t max_capacity,
                              bool generate_debug_info,
                              std::string* error_msg);

  // Number of bytes allocated in the code cache.
  size_t CodeCacheSize() REQUIRES(!lock_);

  // Number of bytes allocated in the data cache.
  size_t DataCacheSize() REQUIRES(!lock_);

  bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(!lock_);

  void NotifyMethodRedefined(ArtMethod* method)
      REQUIRES(Locks::mutator_lock_)
      REQUIRES(!lock_);

  // Notify to the code cache that the compiler wants to use the
  // profiling info of `method` to drive optimizations,
  // and therefore ensure the returned profiling info object is not
  // collected.
  ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(!lock_);

  void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(!lock_);

  void DoneCompilerUse(ArtMethod* method, Thread* self)
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(!lock_);

  // Allocate and write code and its metadata to the code cache.
  // `cha_single_implementation_list` needs to be registered via CHA (if it's
  // still valid), since the compiled code still needs to be invalidated if the
  // single-implementation assumptions are violated later. This needs to be done
  // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
  // guard elimination.
  uint8_t* CommitCode(Thread* self,
                      ArtMethod* method,
                      uint8_t* stack_map,
                      uint8_t* method_info,
                      uint8_t* roots_data,
                      size_t frame_size_in_bytes,
                      size_t core_spill_mask,
                      size_t fp_spill_mask,
                      const uint8_t* code,
                      size_t code_size,
                      size_t data_size,
                      bool osr,
                      Handle<mirror::ObjectArray<mirror::Object>> roots,
                      bool has_should_deoptimize_flag,
                      const ArenaSet<ArtMethod*>& cha_single_implementation_list)
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(!lock_);

  // Return true if the code cache contains this pc.
  bool ContainsPc(const void* pc) const;

  // Return true if the code cache contains this method.
  bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);

  // Allocate a region of data that contain `size` bytes, and potentially space
  // for storing `number_of_roots` roots. Returns null if there is no more room.
  // Return the number of bytes allocated.
  size_t ReserveData(Thread* self,
                     size_t stack_map_size,
                     size_t method_info_size,
                     size_t number_of_roots,
                     ArtMethod* method,
                     uint8_t** stack_map_data,
                     uint8_t** method_info_data,
                     uint8_t** roots_data)
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(!lock_);

  // Clear data from the data portion of the code cache.
  void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(!lock_);

  CodeCacheBitmap* GetLiveBitmap() const {
    return live_bitmap_.get();
  }

  // Return whether we should do a full collection given the current state of the cache.
  bool ShouldDoFullCollection()
      REQUIRES(lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  // Perform a collection on the code cache.
  void GarbageCollectCache(Thread* self)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  // Given the 'pc', try to find the JIT compiled code associated with it.
  // Return null if 'pc' is not in the code cache. 'method' is passed for
  // sanity check.
  OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  // Removes method from the cache for testing purposes. The caller
  // must ensure that all threads are suspended and the method should
  // not be in any thread's stack.
  bool RemoveMethod(ArtMethod* method, bool release_memory)
      REQUIRES(!lock_)
      REQUIRES(Locks::mutator_lock_);

  // Remove all methods in our cache that were allocated by 'alloc'.
  void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
  // will collect and retry if the first allocation is unsuccessful.
  ProfilingInfo* AddProfilingInfo(Thread* self,
                                  ArtMethod* method,
                                  const std::vector<uint32_t>& entries,
                                  bool retry_allocation)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
    return mspace == code_mspace_ || mspace == data_mspace_;
  }

  void* MoreCore(const void* mspace, intptr_t increment);

  // Adds to `methods` all profiled methods which are part of any of the given dex locations.
  void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
                          std::vector<ProfileMethodInfo>& methods)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  uint64_t GetLastUpdateTimeNs() const;

  size_t GetCurrentCapacity() REQUIRES(!lock_) {
    MutexLock lock(Thread::Current(), lock_);
    return current_capacity_;
  }

  size_t GetMemorySizeOfCodePointer(const void* ptr) REQUIRES(!lock_);

  void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  void Dump(std::ostream& os) REQUIRES(!lock_);

  bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);

  void SweepRootTables(IsMarkedVisitor* visitor)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  // The GC needs to disallow the reading of inline caches when it processes them,
  // to avoid having a class being used while it is being deleted.
  void AllowInlineCacheAccess() REQUIRES(!lock_);
  void DisallowInlineCacheAccess() REQUIRES(!lock_);
  void BroadcastForInlineCacheAccess() REQUIRES(!lock_);

  // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
  // 'new_method' since it is being made obsolete.
  void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
      REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);

  // Dynamically change whether we want to garbage collect code. Should only be used
  // by tests.
  void SetGarbageCollectCode(bool value) {
    garbage_collect_code_ = value;
  }

 private:
  // Take ownership of maps.
  JitCodeCache(MemMap* code_map,
               MemMap* data_map,
               size_t initial_code_capacity,
               size_t initial_data_capacity,
               size_t max_capacity,
               bool garbage_collect_code);

  // Internal version of 'CommitCode' that will not retry if the
  // allocation fails. Return null if the allocation fails.
  uint8_t* CommitCodeInternal(Thread* self,
                              ArtMethod* method,
                              uint8_t* stack_map,
                              uint8_t* method_info,
                              uint8_t* roots_data,
                              size_t frame_size_in_bytes,
                              size_t core_spill_mask,
                              size_t fp_spill_mask,
                              const uint8_t* code,
                              size_t code_size,
                              size_t data_size,
                              bool osr,
                              Handle<mirror::ObjectArray<mirror::Object>> roots,
                              bool has_should_deoptimize_flag,
                              const ArenaSet<ArtMethod*>& cha_single_implementation_list)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  ProfilingInfo* AddProfilingInfoInternal(Thread* self,
                                          ArtMethod* method,
                                          const std::vector<uint32_t>& entries)
      REQUIRES(lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  // If a collection is in progress, wait for it to finish. Return
  // whether the thread actually waited.
  bool WaitForPotentialCollectionToComplete(Thread* self)
      REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);

  // Remove CHA dependents and underlying allocations for entries in `method_headers`.
  void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
      REQUIRES(!lock_)
      REQUIRES(!Locks::cha_lock_);

  // Free in the mspace allocations for `code_ptr`.
  void FreeCode(const void* code_ptr) REQUIRES(lock_);

  // Number of bytes allocated in the code cache.
  size_t CodeCacheSizeLocked() REQUIRES(lock_);

  // Number of bytes allocated in the data cache.
  size_t DataCacheSizeLocked() REQUIRES(lock_);

  // Notify all waiting threads that a collection is done.
  void NotifyCollectionDone(Thread* self) REQUIRES(lock_);

  // Try to increase the current capacity of the code cache. Return whether we
  // succeeded at doing so.
  bool IncreaseCodeCacheCapacity() REQUIRES(lock_);

  // Set the footprint limit of the code cache.
  void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);

  void DoCollection(Thread* self, bool collect_profiling_info)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  void RemoveUnmarkedCode(Thread* self)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  void MarkCompiledCodeOnThreadStacks(Thread* self)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  bool CheckLiveCompiledCodeHasProfilingInfo()
      REQUIRES(lock_);

  void FreeCode(uint8_t* code) REQUIRES(lock_);
  uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
  void FreeData(uint8_t* data) REQUIRES(lock_);
  uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);

  bool IsWeakAccessEnabled(Thread* self) const;
  void WaitUntilInlineCacheAccessible(Thread* self)
      REQUIRES(!lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  // Lock for guarding allocations, collections, and the method_code_map_.
  Mutex lock_;
  // Condition to wait on during collection.
  ConditionVariable lock_cond_ GUARDED_BY(lock_);
  // Whether there is a code cache collection in progress.
  bool collection_in_progress_ GUARDED_BY(lock_);
  // Mem map which holds code.
  std::unique_ptr<MemMap> code_map_;
  // Mem map which holds data (stack maps and profiling info).
  std::unique_ptr<MemMap> data_map_;
  // The opaque mspace for allocating code.
  void* code_mspace_ GUARDED_BY(lock_);
  // The opaque mspace for allocating data.
  void* data_mspace_ GUARDED_BY(lock_);
  // Bitmap for collecting code and data.
  std::unique_ptr<CodeCacheBitmap> live_bitmap_;
  // Holds compiled code associated to the ArtMethod.
  SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
  // Holds osr compiled code associated to the ArtMethod.
  SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
  // ProfilingInfo objects we have allocated.
  std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);

  // The maximum capacity in bytes this code cache can go to.
  size_t max_capacity_ GUARDED_BY(lock_);

  // The current capacity in bytes of the code cache.
  size_t current_capacity_ GUARDED_BY(lock_);

  // The current footprint in bytes of the code portion of the code cache.
  size_t code_end_ GUARDED_BY(lock_);

  // The current footprint in bytes of the data portion of the code cache.
  size_t data_end_ GUARDED_BY(lock_);

  // Whether the last collection round increased the code cache.
  bool last_collection_increased_code_cache_ GUARDED_BY(lock_);

  // Last time the the code_cache was updated.
  // It is atomic to avoid locking when reading it.
  Atomic<uint64_t> last_update_time_ns_;

  // Whether we can do garbage collection. Not 'const' as tests may override this.
  bool garbage_collect_code_;

  // The size in bytes of used memory for the data portion of the code cache.
  size_t used_memory_for_data_ GUARDED_BY(lock_);

  // The size in bytes of used memory for the code portion of the code cache.
  size_t used_memory_for_code_ GUARDED_BY(lock_);

  // Number of compilations done throughout the lifetime of the JIT.
  size_t number_of_compilations_ GUARDED_BY(lock_);

  // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
  size_t number_of_osr_compilations_ GUARDED_BY(lock_);

  // Number of code cache collections done throughout the lifetime of the JIT.
  size_t number_of_collections_ GUARDED_BY(lock_);

  // Histograms for keeping track of stack map size statistics.
  Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);

  // Histograms for keeping track of code size statistics.
  Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);

  // Histograms for keeping track of profiling info statistics.
  Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);

  // Whether the GC allows accessing weaks in inline caches. Note that this
  // is not used by the concurrent collector, which uses
  // Thread::SetWeakRefAccessEnabled instead.
  Atomic<bool> is_weak_access_enabled_;

  // Condition to wait on for accessing inline caches.
  ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);

  DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};

}  // namespace jit
}  // namespace art

#endif  // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_