diff options
author | Tom Cherry <tomcherry@google.com> | 2020-10-06 10:22:35 -0700 |
---|---|---|
committer | Tom Cherry <tomcherry@google.com> | 2020-10-07 15:00:49 -0700 |
commit | c581886eeaa486bca4d5bee6e1c1bb5f8df23745 (patch) | |
tree | 2acd67f703b160988367d1e986a89d48bc80c989 /logd/LogReaderThread.cpp | |
parent | 8401907adcefe1079f5e5fea524c4ea06c8324bf (diff) |
logd: single std::mutex for locking log buffers and tracking readers
There are only three places where the log buffer lock is not already
held when the reader lock is taken:
1) In LogReader, when a new reader connects
2) In LogReader, when a misbehaving reader disconnects
3) LogReaderThread::ThreadFunction()
1) and 2) happen sufficiently rarely that there's no impact if they
additionally held a global lock.
3) is refactored in this CL. Previously, it would do the below in a loop
1) Lock the reader lock then wait on a condition variable
2) Unlock the reader lock
3) Lock the log buffer lock in LogBuffer::FlushTo()
4) In each iteration in the LogBuffer::FlushTo() loop
1) Lock then unlock the reader lock in FilterSecondPass()
2) Unlock the log buffer lock to send the message, then re-lock it
5) Unlock the log buffer lock when leaving LogBuffer::FlushTo()
If these locks are collapsed into a single lock, then this simplifies to:
1) Lock the single lock then wait on a condition variable
2) In each iteration in the LogBuffer::FlushTo() loop
1) Unlock the single lock to send the message, then re-lock it
Collapsing both these locks into a single lock simplifes the code and
removes the overhead of acquiring the second lock, in the majority of
use cases where the first lock is already held.
Secondly, this lock will be a plain std::mutex instead of a RwLock.
RwLock's are appropriate when there is a substantial imbalance between
readers and writers and high contention, neither are true for logd.
Bug: 169736426
Test: logging unit tests
Change-Id: Ia511506f2d0935a5321c1b2f65569066f91ecb06
Diffstat (limited to 'logd/LogReaderThread.cpp')
-rw-r--r-- | logd/LogReaderThread.cpp | 40 |
1 files changed, 12 insertions, 28 deletions
diff --git a/logd/LogReaderThread.cpp b/logd/LogReaderThread.cpp index 6ac9741c30..cf769bafb5 100644 --- a/logd/LogReaderThread.cpp +++ b/logd/LogReaderThread.cpp @@ -24,6 +24,7 @@ #include "LogBuffer.h" #include "LogReaderList.h" +#include "SerializedFlushToState.h" LogReaderThread::LogReaderThread(LogBuffer* log_buffer, LogReaderList* reader_list, std::unique_ptr<LogWriter> writer, bool non_block, @@ -40,7 +41,7 @@ LogReaderThread::LogReaderThread(LogBuffer* log_buffer, LogReaderList* reader_li start_time_(start_time), deadline_(deadline), non_block_(non_block) { - cleanSkip_Locked(); + CleanSkip(); flush_to_state_ = log_buffer_->CreateFlushToState(start, log_mask); auto thread = std::thread{&LogReaderThread::ThreadFunction, this}; thread.detach(); @@ -49,7 +50,8 @@ LogReaderThread::LogReaderThread(LogBuffer* log_buffer, LogReaderList* reader_li void LogReaderThread::ThreadFunction() { prctl(PR_SET_NAME, "logd.reader.per"); - auto lock = std::unique_lock{reader_list_->reader_threads_lock()}; + auto lock = std::unique_lock{logd_lock}; + auto lock_assertion = android::base::ScopedLockAssertion{logd_lock}; while (!release_) { if (deadline_.time_since_epoch().count() != 0) { @@ -62,23 +64,19 @@ void LogReaderThread::ThreadFunction() { } } - lock.unlock(); - if (tail_) { auto first_pass_state = log_buffer_->CreateFlushToState(flush_to_state_->start(), flush_to_state_->log_mask()); - log_buffer_->FlushTo( - writer_.get(), *first_pass_state, - [this](log_id_t log_id, pid_t pid, uint64_t sequence, log_time realtime) { - return FilterFirstPass(log_id, pid, sequence, realtime); - }); - log_buffer_->DeleteFlushToState(std::move(first_pass_state)); + log_buffer_->FlushTo(writer_.get(), *first_pass_state, + [this](log_id_t log_id, pid_t pid, uint64_t sequence, + log_time realtime) REQUIRES(logd_lock) { + return FilterFirstPass(log_id, pid, sequence, realtime); + }); } bool flush_success = log_buffer_->FlushTo( writer_.get(), *flush_to_state_, - [this](log_id_t log_id, pid_t pid, uint64_t sequence, log_time realtime) { - return FilterSecondPass(log_id, pid, sequence, realtime); - }); + [this](log_id_t log_id, pid_t pid, uint64_t sequence, log_time realtime) REQUIRES( + logd_lock) { return FilterSecondPass(log_id, pid, sequence, realtime); }); // We only ignore entries before the original start time for the first flushTo(), if we // get entries after this first flush before the original start time, then the client @@ -89,8 +87,6 @@ void LogReaderThread::ThreadFunction() { start_time_.tv_sec = 0; start_time_.tv_nsec = 0; - lock.lock(); - if (!flush_success) { break; } @@ -99,17 +95,13 @@ void LogReaderThread::ThreadFunction() { break; } - cleanSkip_Locked(); + CleanSkip(); if (deadline_.time_since_epoch().count() == 0) { thread_triggered_condition_.wait(lock); } } - lock.unlock(); - log_buffer_->DeleteFlushToState(std::move(flush_to_state_)); - lock.lock(); - writer_->Release(); auto& log_reader_threads = reader_list_->reader_threads(); @@ -123,8 +115,6 @@ void LogReaderThread::ThreadFunction() { // A first pass to count the number of elements FilterResult LogReaderThread::FilterFirstPass(log_id_t, pid_t pid, uint64_t, log_time realtime) { - auto lock = std::lock_guard{reader_list_->reader_threads_lock()}; - if ((!pid_ || pid_ == pid) && (start_time_ == log_time::EPOCH || start_time_ <= realtime)) { ++count_; } @@ -135,8 +125,6 @@ FilterResult LogReaderThread::FilterFirstPass(log_id_t, pid_t pid, uint64_t, log // A second pass to send the selected elements FilterResult LogReaderThread::FilterSecondPass(log_id_t log_id, pid_t pid, uint64_t, log_time realtime) { - auto lock = std::lock_guard{reader_list_->reader_threads_lock()}; - if (skip_ahead_[log_id]) { skip_ahead_[log_id]--; return FilterResult::kSkip; @@ -179,7 +167,3 @@ ok: } return FilterResult::kSkip; } - -void LogReaderThread::cleanSkip_Locked(void) { - memset(skip_ahead_, 0, sizeof(skip_ahead_)); -} |