summaryrefslogtreecommitdiff
path: root/runtime/gc/heap.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc/heap.cc')
-rw-r--r--runtime/gc/heap.cc130
1 files changed, 109 insertions, 21 deletions
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 08aaaefa8f..e0263297c1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -22,6 +22,9 @@
#include <malloc.h> // For mallinfo()
#endif
#include <memory>
+#include <random>
+#include <unistd.h>
+#include <sys/types.h>
#include <vector>
#include "android-base/stringprintf.h"
@@ -316,6 +319,7 @@ Heap::Heap(size_t initial_size,
next_gc_type_(collector::kGcTypePartial),
capacity_(capacity),
growth_limit_(growth_limit),
+ initial_heap_size_(initial_size),
target_footprint_(initial_size),
// Using kPostMonitorLock as a lock at kDefaultMutexLevel is acquired after
// this one.
@@ -2128,6 +2132,27 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
return HomogeneousSpaceCompactResult::kSuccess;
}
+void Heap::SetDefaultConcurrentStartBytes() {
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
+ if (collector_type_running_ != kCollectorTypeNone) {
+ // If a collector is already running, just let it set concurrent_start_bytes_ .
+ return;
+ }
+ SetDefaultConcurrentStartBytesLocked();
+}
+
+void Heap::SetDefaultConcurrentStartBytesLocked() {
+ if (IsGcConcurrent()) {
+ size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
+ size_t reserve_bytes = target_footprint / 4;
+ reserve_bytes = std::min(reserve_bytes, kMaxConcurrentRemainingBytes);
+ reserve_bytes = std::max(reserve_bytes, kMinConcurrentRemainingBytes);
+ concurrent_start_bytes_ = UnsignedDifference(target_footprint, reserve_bytes);
+ } else {
+ concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
+ }
+}
+
void Heap::ChangeCollector(CollectorType collector_type) {
// TODO: Only do this with all mutators suspended to avoid races.
if (collector_type != collector_type_) {
@@ -2174,13 +2199,7 @@ void Heap::ChangeCollector(CollectorType collector_type) {
UNREACHABLE();
}
}
- if (IsGcConcurrent()) {
- concurrent_start_bytes_ =
- UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
- kMinConcurrentRemainingBytes);
- } else {
- concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
- }
+ SetDefaultConcurrentStartBytesLocked();
}
}
@@ -3544,6 +3563,11 @@ double Heap::HeapGrowthMultiplier() const {
void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
size_t bytes_allocated_before_gc) {
+ // We're running in the thread that set collector_type_running_ to something other than none,
+ // thus ensuring that there is only one of us running. Thus
+ // collector_type_running_ != kCollectorTypeNone, but that's a little tricky to turn into a
+ // DCHECK.
+
// We know what our utilization is at this moment.
// This doesn't actually resize any memory. It just lets the heap grow more when necessary.
const size_t bytes_allocated = GetBytesAllocated();
@@ -3669,8 +3693,7 @@ void Heap::ClearGrowthLimit() {
if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_
&& growth_limit_ < capacity_) {
target_footprint_.store(capacity_, std::memory_order_relaxed);
- concurrent_start_bytes_ =
- UnsignedDifference(capacity_, kMinConcurrentRemainingBytes);
+ SetDefaultConcurrentStartBytes();
}
growth_limit_ = capacity_;
ScopedObjectAccess soa(Thread::Current());
@@ -4437,32 +4460,97 @@ void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t all
<< PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
}
+// Run a gc if we haven't run one since initial_gc_num. This forces processes to
+// reclaim memory allocated during startup, even if they don't do much
+// allocation post startup. If the process is actively allocating and triggering
+// GCs, or has moved to the background and hence forced a GC, this does nothing.
class Heap::TriggerPostForkCCGcTask : public HeapTask {
public:
- explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {}
+ explicit TriggerPostForkCCGcTask(uint64_t target_time, uint32_t initial_gc_num) :
+ HeapTask(target_time), initial_gc_num_(initial_gc_num) {}
void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
- // Trigger a GC, if not already done. The first GC after fork, whenever it
- // takes place, will adjust the thresholds to normal levels.
- if (heap->target_footprint_.load(std::memory_order_relaxed) == heap->growth_limit_) {
- heap->RequestConcurrentGC(self, kGcCauseBackground, false, heap->GetCurrentGcNum());
+ if (heap->GetCurrentGcNum() == initial_gc_num_) {
+ if (kLogAllGCs) {
+ LOG(INFO) << "Forcing GC for allocation-inactive process";
+ }
+ heap->RequestConcurrentGC(self, kGcCauseBackground, false, initial_gc_num_);
}
}
+ private:
+ uint32_t initial_gc_num_;
};
+// Reduce target footprint, if no GC has occurred since initial_gc_num.
+// If a GC already occurred, it will have done this for us.
+class Heap::ReduceTargetFootprintTask : public HeapTask {
+ public:
+ explicit ReduceTargetFootprintTask(uint64_t target_time, size_t new_target_sz,
+ uint32_t initial_gc_num) :
+ HeapTask(target_time), new_target_sz_(new_target_sz), initial_gc_num_(initial_gc_num) {}
+ void Run(Thread* self) override {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ MutexLock mu(self, *(heap->gc_complete_lock_));
+ if (heap->GetCurrentGcNum() == initial_gc_num_
+ && heap->collector_type_running_ == kCollectorTypeNone) {
+ size_t target_footprint = heap->target_footprint_.load(std::memory_order_relaxed);
+ if (target_footprint > new_target_sz_) {
+ if (heap->target_footprint_.CompareAndSetStrongRelaxed(target_footprint, new_target_sz_)) {
+ heap->SetDefaultConcurrentStartBytesLocked();
+ }
+ }
+ }
+ }
+ private:
+ size_t new_target_sz_;
+ uint32_t initial_gc_num_;
+};
+
+// Return a pseudo-random integer between 0 and 19999, using the uid as a seed. We want this to
+// be deterministic for a given process, but to vary randomly across processes. Empirically, the
+// uids for processes for which this matters are distinct.
+static uint32_t GetPseudoRandomFromUid() {
+ std::default_random_engine rng(getuid());
+ std::uniform_int_distribution<int> dist(0, 19999);
+ return dist(rng);
+}
+
void Heap::PostForkChildAction(Thread* self) {
+ uint32_t starting_gc_num = GetCurrentGcNum();
+ uint64_t last_adj_time = NanoTime();
+ next_gc_type_ = NonStickyGcType(); // Always start with a full gc.
+
// Temporarily increase target_footprint_ and concurrent_start_bytes_ to
// max values to avoid GC during app launch.
- if (collector_type_ == kCollectorTypeCC && !IsLowMemoryMode()) {
+ if (!IsLowMemoryMode()) {
// Set target_footprint_ to the largest allowed value.
SetIdealFootprint(growth_limit_);
- // Set concurrent_start_bytes_ to half of the heap size.
- size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
- concurrent_start_bytes_ = std::max(target_footprint / 2, GetBytesAllocated());
-
- GetTaskProcessor()->AddTask(
- self, new TriggerPostForkCCGcTask(NanoTime() + MsToNs(kPostForkMaxHeapDurationMS)));
+ SetDefaultConcurrentStartBytes();
+
+ // Shrink heap after kPostForkMaxHeapDurationMS, to force a memory hog process to GC.
+ // This remains high enough that many processes will continue without a GC.
+ if (initial_heap_size_ < growth_limit_) {
+ size_t first_shrink_size = std::max(growth_limit_ / 4, initial_heap_size_);
+ last_adj_time += MsToNs(kPostForkMaxHeapDurationMS);
+ GetTaskProcessor()->AddTask(
+ self, new ReduceTargetFootprintTask(last_adj_time, first_shrink_size, starting_gc_num));
+ // Shrink to a small value after a substantial time period. This will typically force a
+ // GC if none has occurred yet. Has no effect if there was a GC before this anyway, which
+ // is commonly the case, e.g. because of a process transition.
+ if (initial_heap_size_ < first_shrink_size) {
+ last_adj_time += MsToNs(4 * kPostForkMaxHeapDurationMS);
+ GetTaskProcessor()->AddTask(
+ self,
+ new ReduceTargetFootprintTask(last_adj_time, initial_heap_size_, starting_gc_num));
+ }
+ }
}
+ // Schedule a GC after a substantial period of time. This will become a no-op if another GC is
+ // scheduled in the interim. If not, we want to avoid holding onto start-up garbage.
+ uint64_t post_fork_gc_time = last_adj_time
+ + MsToNs(4 * kPostForkMaxHeapDurationMS + GetPseudoRandomFromUid());
+ GetTaskProcessor()->AddTask(self,
+ new TriggerPostForkCCGcTask(post_fork_gc_time, starting_gc_num));
}
void Heap::VisitReflectiveTargets(ReflectiveValueVisitor *visit) {