summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2021-06-29 16:28:54 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2021-06-29 16:28:54 +0000
commit04816a52949c2d4f69a5a3abd5843316acbde836 (patch)
tree01ec931931503ed80f181b2d85efb5df0079e4ba
parent36d3d72cc3aeb7132164ff9a5b9e8255576c9bca (diff)
parentf79cb91b4218a68642c94a01defc0594e742b3f4 (diff)
Snap for 7441201 from f79cb91b4218a68642c94a01defc0594e742b3f4 to s-keystone-qcom-release
Change-Id: I8fe51a7a4c0ba91d864383e6501cf0e8b8ab71d5
-rw-r--r--aosp/update_attempter_android.cc17
-rw-r--r--aosp/update_attempter_android_unittest.cc15
-rw-r--r--common/constants.cc1
-rw-r--r--common/constants.h1
-rw-r--r--common/scoped_task_id.h123
-rw-r--r--payload_consumer/file_descriptor.cc4
-rw-r--r--payload_consumer/filesystem_verifier_action.cc290
-rw-r--r--payload_consumer/filesystem_verifier_action.h32
-rw-r--r--payload_consumer/filesystem_verifier_action_unittest.cc342
-rw-r--r--stable/Android.bp1
-rw-r--r--stable/aidl_api/libupdate_engine_stable/1/.hash1
-rw-r--r--stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStable.aidl39
-rw-r--r--stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStableCallback.aidl38
13 files changed, 226 insertions, 678 deletions
diff --git a/aosp/update_attempter_android.cc b/aosp/update_attempter_android.cc
index 4636c430..ba61f255 100644
--- a/aosp/update_attempter_android.cc
+++ b/aosp/update_attempter_android.cc
@@ -364,12 +364,6 @@ bool UpdateAttempterAndroid::ResetStatus(brillo::ErrorPtr* error) {
LOG(INFO) << "Attempting to reset state from "
<< UpdateStatusToString(status_) << " to UpdateStatus::IDLE";
- if (apex_handler_android_ != nullptr) {
- LOG(INFO) << "Cleaning up reserved space for compressed APEX (if any)";
- std::vector<ApexInfo> apex_infos_blank;
- apex_handler_android_->AllocateSpace(apex_infos_blank);
- }
-
switch (status_) {
case UpdateStatus::IDLE: {
if (!boot_control_->GetDynamicPartitionControl()->ResetUpdate(prefs_)) {
@@ -890,25 +884,20 @@ void UpdateAttempterAndroid::UpdatePrefsAndReportUpdateMetricsOnReboot() {
string current_version =
android::base::GetProperty("ro.build.version.incremental", "");
TEST_AND_RETURN(!current_version.empty());
- const auto current_slot = boot_control_->GetCurrentSlot();
// If there's no record of previous version (e.g. due to a data wipe), we
// save the info of current boot and skip the metrics report.
if (!prefs_->Exists(kPrefsPreviousVersion)) {
prefs_->SetString(kPrefsBootId, current_boot_id);
prefs_->SetString(kPrefsPreviousVersion, current_version);
- prefs_->SetInt64(std::string{kPrefsPreviousSlot},
- boot_control_->GetCurrentSlot());
ClearMetricsPrefs();
return;
}
- int64_t previous_slot = -1;
- prefs_->GetInt64(kPrefsPreviousSlot, &previous_slot);
string previous_version;
- // update_engine restarted under the same build and same slot.
+ // update_engine restarted under the same build.
// TODO(xunchang) identify and report rollback by checking UpdateMarker.
if (prefs_->GetString(kPrefsPreviousVersion, &previous_version) &&
- previous_version == current_version && previous_slot == current_slot) {
+ previous_version == current_version) {
string last_boot_id;
bool is_reboot = prefs_->Exists(kPrefsBootId) &&
(prefs_->GetString(kPrefsBootId, &last_boot_id) &&
@@ -928,8 +917,6 @@ void UpdateAttempterAndroid::UpdatePrefsAndReportUpdateMetricsOnReboot() {
// TODO(xunchang) check the build version is larger than the previous one.
prefs_->SetString(kPrefsBootId, current_boot_id);
prefs_->SetString(kPrefsPreviousVersion, current_version);
- prefs_->SetInt64(std::string{kPrefsPreviousSlot},
- boot_control_->GetCurrentSlot());
bool previous_attempt_exists = prefs_->Exists(kPrefsPayloadAttemptNumber);
// |kPrefsPayloadAttemptNumber| should be cleared upon successful update.
diff --git a/aosp/update_attempter_android_unittest.cc b/aosp/update_attempter_android_unittest.cc
index f73df168..f799df3e 100644
--- a/aosp/update_attempter_android_unittest.cc
+++ b/aosp/update_attempter_android_unittest.cc
@@ -24,7 +24,6 @@
#include <base/time/time.h>
#include <gtest/gtest.h>
-#include "common/constants.h"
#include "update_engine/aosp/daemon_state_android.h"
#include "update_engine/common/fake_boot_control.h"
#include "update_engine/common/fake_clock.h"
@@ -82,8 +81,6 @@ TEST_F(UpdateAttempterAndroidTest, UpdatePrefsSameBuildVersionOnInit) {
prefs_.SetString(kPrefsPreviousVersion, build_version);
prefs_.SetString(kPrefsBootId, "oldboot");
prefs_.SetInt64(kPrefsNumReboots, 1);
- prefs_.SetInt64(kPrefsPreviousSlot, 1);
- boot_control_.SetCurrentSlot(1);
EXPECT_CALL(*metrics_reporter_, ReportTimeToReboot(_)).Times(0);
update_attempter_android_.Init();
@@ -91,15 +88,15 @@ TEST_F(UpdateAttempterAndroidTest, UpdatePrefsSameBuildVersionOnInit) {
// Check that the boot_id and reboot_count are updated.
std::string boot_id;
utils::GetBootId(&boot_id);
- ASSERT_TRUE(prefs_.Exists(kPrefsBootId));
+ EXPECT_TRUE(prefs_.Exists(kPrefsBootId));
std::string prefs_boot_id;
- ASSERT_TRUE(prefs_.GetString(kPrefsBootId, &prefs_boot_id));
- ASSERT_EQ(boot_id, prefs_boot_id);
+ EXPECT_TRUE(prefs_.GetString(kPrefsBootId, &prefs_boot_id));
+ EXPECT_EQ(boot_id, prefs_boot_id);
- ASSERT_TRUE(prefs_.Exists(kPrefsNumReboots));
+ EXPECT_TRUE(prefs_.Exists(kPrefsNumReboots));
int64_t reboot_count;
- ASSERT_TRUE(prefs_.GetInt64(kPrefsNumReboots, &reboot_count));
- ASSERT_EQ(2, reboot_count);
+ EXPECT_TRUE(prefs_.GetInt64(kPrefsNumReboots, &reboot_count));
+ EXPECT_EQ(2, reboot_count);
}
TEST_F(UpdateAttempterAndroidTest, UpdatePrefsBuildVersionChangeOnInit) {
diff --git a/common/constants.cc b/common/constants.cc
index 0677e663..a9cf238d 100644
--- a/common/constants.cc
+++ b/common/constants.cc
@@ -112,7 +112,6 @@ const char kPrefsWallClockScatteringWaitPeriod[] = "wall-clock-wait-period";
const char kPrefsWallClockStagingWaitPeriod[] =
"wall-clock-staging-wait-period";
const char kPrefsManifestBytes[] = "manifest-bytes";
-const char kPrefsPreviousSlot[] = "previous-slot";
// These four fields are generated by scripts/brillo_update_payload.
const char kPayloadPropertyFileSize[] = "FILE_SIZE";
diff --git a/common/constants.h b/common/constants.h
index 68f720db..64447cea 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -76,7 +76,6 @@ extern const char kPrefsPingLastRollcall[];
extern const char kPrefsLastFp[];
extern const char kPrefsPostInstallSucceeded[];
extern const char kPrefsPreviousVersion[];
-extern const char kPrefsPreviousSlot[];
extern const char kPrefsResumedUpdateFailures[];
extern const char kPrefsRollbackHappened[];
extern const char kPrefsRollbackVersion[];
diff --git a/common/scoped_task_id.h b/common/scoped_task_id.h
deleted file mode 100644
index 91a29860..00000000
--- a/common/scoped_task_id.h
+++ /dev/null
@@ -1,123 +0,0 @@
-//
-// Copyright (C) 2021 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_SCOPED_TASK_ID_H_
-#define UPDATE_ENGINE_SCOPED_TASK_ID_H_
-
-#include <type_traits>
-#include <utility>
-
-#include <base/bind.h>
-#include <brillo/message_loops/message_loop.h>
-
-namespace chromeos_update_engine {
-
-// This class provides unique_ptr like semantic for |MessageLoop::TaskId|, when
-// instance of this class goes out of scope, underlying task will be cancelled.
-class ScopedTaskId {
- using MessageLoop = brillo::MessageLoop;
-
- public:
- // Move only type similar to unique_ptr.
- ScopedTaskId(const ScopedTaskId&) = delete;
- ScopedTaskId& operator=(const ScopedTaskId&) = delete;
-
- constexpr ScopedTaskId() = default;
-
- constexpr ScopedTaskId(ScopedTaskId&& other) noexcept {
- *this = std::move(other);
- }
-
- constexpr ScopedTaskId& operator=(ScopedTaskId&& other) noexcept {
- std::swap(task_id_, other.task_id_);
- return *this;
- }
-
- // Post a callback on current message loop, return true if succeeded, false if
- // the previous callback hasn't run yet, or scheduling failed at MessageLoop
- // side.
- [[nodiscard]] bool PostTask(const base::Location& from_here,
- base::OnceClosure&& callback,
- base::TimeDelta delay = {}) noexcept {
- return PostTask<decltype(callback)>(from_here, std::move(callback), delay);
- }
- [[nodiscard]] bool PostTask(const base::Location& from_here,
- std::function<void()>&& callback,
- base::TimeDelta delay = {}) noexcept {
- return PostTask<decltype(callback)>(from_here, std::move(callback), delay);
- }
-
- ~ScopedTaskId() noexcept { Cancel(); }
-
- // Cancel the underlying managed task, true if cancel successful. False if no
- // task scheduled or task cancellation failed
- bool Cancel() noexcept {
- if (task_id_ != MessageLoop::kTaskIdNull) {
- if (MessageLoop::current()->CancelTask(task_id_)) {
- LOG(INFO) << "Cancelled task id " << task_id_;
- task_id_ = MessageLoop::kTaskIdNull;
- return true;
- }
- }
- return false;
- }
-
- [[nodiscard]] constexpr bool IsScheduled() const noexcept {
- return task_id_ != MessageLoop::kTaskIdNull;
- }
-
- [[nodiscard]] constexpr bool operator==(const ScopedTaskId& other) const
- noexcept {
- return other.task_id_ == task_id_;
- }
-
- [[nodiscard]] constexpr bool operator<(const ScopedTaskId& other) const
- noexcept {
- return task_id_ < other.task_id_;
- }
-
- private:
- template <typename Callable>
- [[nodiscard]] bool PostTask(const base::Location& from_here,
- Callable&& callback,
- base::TimeDelta delay) noexcept {
- if (task_id_ != MessageLoop::kTaskIdNull) {
- LOG(ERROR) << "Scheduling another task but task id " << task_id_
- << " isn't executed yet! This can cause the old task to leak.";
- return false;
- }
- task_id_ = MessageLoop::current()->PostDelayedTask(
- from_here,
- base::BindOnce(&ScopedTaskId::ExecuteTask<decltype(callback)>,
- base::Unretained(this),
- std::move(callback)),
- delay);
- return task_id_ != MessageLoop::kTaskIdNull;
- }
- template <typename Callable>
- void ExecuteTask(Callable&& callback) {
- task_id_ = MessageLoop::kTaskIdNull;
- if constexpr (std::is_same_v<Callable&&, base::OnceClosure&&>) {
- std::move(callback).Run();
- } else {
- std::move(callback)();
- }
- }
- MessageLoop::TaskId task_id_{MessageLoop::kTaskIdNull};
-};
-} // namespace chromeos_update_engine
-
-#endif
diff --git a/payload_consumer/file_descriptor.cc b/payload_consumer/file_descriptor.cc
index da76327c..7c69c1b4 100644
--- a/payload_consumer/file_descriptor.cc
+++ b/payload_consumer/file_descriptor.cc
@@ -139,9 +139,7 @@ bool EintrSafeFileDescriptor::Flush() {
}
bool EintrSafeFileDescriptor::Close() {
- if (fd_ < 0) {
- return false;
- }
+ CHECK_GE(fd_, 0);
// https://stackoverflow.com/questions/705454/does-linux-guarantee-the-contents-of-a-file-is-flushed-to-disc-after-close
// |close()| doesn't imply |fsync()|, we need to do it manually.
fsync(fd_);
diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc
index 22c8e0bd..b14cbc8d 100644
--- a/payload_consumer/filesystem_verifier_action.cc
+++ b/payload_consumer/filesystem_verifier_action.cc
@@ -35,7 +35,6 @@
#include <brillo/secure_blob.h>
#include <brillo/streams/file_stream.h>
-#include "common/error_code.h"
#include "payload_generator/delta_diff_generator.h"
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/file_descriptor.h"
@@ -78,7 +77,6 @@ namespace chromeos_update_engine {
namespace {
const off_t kReadFileBufferSize = 128 * 1024;
-constexpr float kVerityProgressPercent = 0.6;
} // namespace
void FilesystemVerifierAction::PerformAction() {
@@ -104,6 +102,7 @@ void FilesystemVerifierAction::PerformAction() {
}
void FilesystemVerifierAction::TerminateProcessing() {
+ brillo::MessageLoop::current()->CancelTask(pending_task_id_);
cancelled_ = true;
Cleanup(ErrorCode::kSuccess); // error code is ignored if canceled_ is true.
}
@@ -135,29 +134,11 @@ void FilesystemVerifierAction::UpdateProgress(double progress) {
}
}
-void FilesystemVerifierAction::UpdatePartitionProgress(double progress) {
- // We don't consider sizes of each partition. Every partition
- // has the same length on progress bar.
- // TODO(b/186087589): Take sizes of each partition into account.
- UpdateProgress((progress + partition_index_) /
- install_plan_.partitions.size());
-}
-
-bool FilesystemVerifierAction::InitializeFdVABC(bool should_write_verity) {
+bool FilesystemVerifierAction::InitializeFdVABC() {
const InstallPlan::Partition& partition =
install_plan_.partitions[partition_index_];
- if (!should_write_verity) {
- // In VABC, we cannot map/unmap partitions w/o first closing ALL fds first.
- // Since this function might be called inside a ScheduledTask, the closure
- // might have a copy of partition_fd_ when executing this function. Which
- // means even if we do |partition_fd_.reset()| here, there's a chance that
- // underlying fd isn't closed until we return. This is unacceptable, we need
- // to close |partition_fd| right away.
- if (partition_fd_) {
- partition_fd_->Close();
- partition_fd_.reset();
- }
+ if (!ShouldWriteVerity()) {
// In VABC, if we are not writing verity, just map all partitions,
// and read using regular fd on |postinstall_mount_device| .
// All read will go through snapuserd, which provides a consistent
@@ -171,6 +152,8 @@ bool FilesystemVerifierAction::InitializeFdVABC(bool should_write_verity) {
dynamic_control_->MapAllPartitions();
return InitializeFd(partition.readonly_target_path);
}
+
+ // FilesystemVerifierAction need the read_fd_.
partition_fd_ =
dynamic_control_->OpenCowFd(partition.name, partition.source_path, true);
if (!partition_fd_) {
@@ -197,112 +180,6 @@ bool FilesystemVerifierAction::InitializeFd(const std::string& part_path) {
return true;
}
-void FilesystemVerifierAction::WriteVerityAndHashPartition(
- FileDescriptorPtr fd,
- const off64_t start_offset,
- const off64_t end_offset,
- void* buffer,
- const size_t buffer_size) {
- if (start_offset >= end_offset) {
- LOG_IF(WARNING, start_offset > end_offset)
- << "start_offset is greater than end_offset : " << start_offset << " > "
- << end_offset;
- if (!verity_writer_->Finalize(fd, fd)) {
- LOG(ERROR) << "Failed to write verity data";
- Cleanup(ErrorCode::kVerityCalculationError);
- return;
- }
- if (dynamic_control_->UpdateUsesSnapshotCompression()) {
- // Spin up snapuserd to read fs.
- if (!InitializeFdVABC(false)) {
- LOG(ERROR) << "Failed to map all partitions";
- Cleanup(ErrorCode::kFilesystemVerifierError);
- return;
- }
- }
- HashPartition(partition_fd_, 0, partition_size_, buffer, buffer_size);
- return;
- }
- const auto cur_offset = fd->Seek(start_offset, SEEK_SET);
- if (cur_offset != start_offset) {
- PLOG(ERROR) << "Failed to seek to offset: " << start_offset;
- Cleanup(ErrorCode::kVerityCalculationError);
- return;
- }
- const auto read_size =
- std::min<size_t>(buffer_size, end_offset - start_offset);
- const auto bytes_read = fd->Read(buffer, read_size);
- if (bytes_read < 0 || static_cast<size_t>(bytes_read) != read_size) {
- PLOG(ERROR) << "Failed to read offset " << start_offset << " expected "
- << read_size << " bytes, actual: " << bytes_read;
- Cleanup(ErrorCode::kVerityCalculationError);
- return;
- }
- if (!verity_writer_->Update(
- start_offset, static_cast<const uint8_t*>(buffer), read_size)) {
- LOG(ERROR) << "VerityWriter::Update() failed";
- Cleanup(ErrorCode::kVerityCalculationError);
- return;
- }
- UpdatePartitionProgress((start_offset + bytes_read) * 1.0f / partition_size_ *
- kVerityProgressPercent);
- CHECK(pending_task_id_.PostTask(
- FROM_HERE,
- base::BindOnce(&FilesystemVerifierAction::WriteVerityAndHashPartition,
- base::Unretained(this),
- fd,
- start_offset + bytes_read,
- end_offset,
- buffer,
- buffer_size)));
-}
-
-void FilesystemVerifierAction::HashPartition(FileDescriptorPtr fd,
- const off64_t start_offset,
- const off64_t end_offset,
- void* buffer,
- const size_t buffer_size) {
- if (start_offset >= end_offset) {
- LOG_IF(WARNING, start_offset > end_offset)
- << "start_offset is greater than end_offset : " << start_offset << " > "
- << end_offset;
- FinishPartitionHashing();
- return;
- }
- const auto cur_offset = fd->Seek(start_offset, SEEK_SET);
- if (cur_offset != start_offset) {
- PLOG(ERROR) << "Failed to seek to offset: " << start_offset;
- Cleanup(ErrorCode::kFilesystemVerifierError);
- return;
- }
- const auto read_size =
- std::min<size_t>(buffer_size, end_offset - start_offset);
- const auto bytes_read = fd->Read(buffer, read_size);
- if (bytes_read < 0 || static_cast<size_t>(bytes_read) != read_size) {
- PLOG(ERROR) << "Failed to read offset " << start_offset << " expected "
- << read_size << " bytes, actual: " << bytes_read;
- Cleanup(ErrorCode::kFilesystemVerifierError);
- return;
- }
- if (!hasher_->Update(buffer, read_size)) {
- LOG(ERROR) << "Hasher updated failed on offset" << start_offset;
- Cleanup(ErrorCode::kFilesystemVerifierError);
- return;
- }
- const auto progress = (start_offset + bytes_read) * 1.0f / partition_size_;
- UpdatePartitionProgress(progress * (1 - kVerityProgressPercent) +
- kVerityProgressPercent);
- CHECK(pending_task_id_.PostTask(
- FROM_HERE,
- base::BindOnce(&FilesystemVerifierAction::HashPartition,
- base::Unretained(this),
- fd,
- start_offset + bytes_read,
- end_offset,
- buffer,
- buffer_size)));
-}
-
void FilesystemVerifierAction::StartPartitionHashing() {
if (partition_index_ == install_plan_.partitions.size()) {
if (!install_plan_.untouched_dynamic_partitions.empty()) {
@@ -324,14 +201,26 @@ void FilesystemVerifierAction::StartPartitionHashing() {
}
const InstallPlan::Partition& partition =
install_plan_.partitions[partition_index_];
- const auto& part_path = GetPartitionPath();
- partition_size_ = GetPartitionSize();
+ string part_path;
+ switch (verifier_step_) {
+ case VerifierStep::kVerifySourceHash:
+ part_path = partition.source_path;
+ partition_size_ = partition.source_size;
+ break;
+ case VerifierStep::kVerifyTargetHash:
+ part_path = partition.target_path;
+ partition_size_ = partition.target_size;
+ break;
+ }
LOG(INFO) << "Hashing partition " << partition_index_ << " ("
<< partition.name << ") on device " << part_path;
auto success = false;
- if (IsVABC(partition)) {
- success = InitializeFdVABC(ShouldWriteVerity());
+ if (dynamic_control_->UpdateUsesSnapshotCompression() &&
+ verifier_step_ == VerifierStep::kVerifyTargetHash &&
+ dynamic_control_->IsDynamicPartition(partition.name,
+ install_plan_.target_slot)) {
+ success = InitializeFdVABC();
} else {
if (part_path.empty()) {
if (partition_size_ == 0) {
@@ -366,61 +255,125 @@ void FilesystemVerifierAction::StartPartitionHashing() {
filesystem_data_end_ = partition.fec_offset;
}
if (ShouldWriteVerity()) {
- LOG(INFO) << "Verity writes enabled on partition " << partition.name;
if (!verity_writer_->Init(partition)) {
LOG(INFO) << "Verity writes enabled on partition " << partition.name;
Cleanup(ErrorCode::kVerityCalculationError);
return;
}
- WriteVerityAndHashPartition(
- partition_fd_, 0, filesystem_data_end_, buffer_.data(), buffer_.size());
} else {
LOG(INFO) << "Verity writes disabled on partition " << partition.name;
- HashPartition(
- partition_fd_, 0, partition_size_, buffer_.data(), buffer_.size());
}
-}
-bool FilesystemVerifierAction::IsVABC(
- const InstallPlan::Partition& partition) const {
- return dynamic_control_->UpdateUsesSnapshotCompression() &&
- verifier_step_ == VerifierStep::kVerifyTargetHash &&
- dynamic_control_->IsDynamicPartition(partition.name,
- install_plan_.target_slot);
+ // Start the first read.
+ ScheduleFileSystemRead();
}
-const std::string& FilesystemVerifierAction::GetPartitionPath() const {
+bool FilesystemVerifierAction::ShouldWriteVerity() {
const InstallPlan::Partition& partition =
install_plan_.partitions[partition_index_];
- switch (verifier_step_) {
- case VerifierStep::kVerifySourceHash:
- return partition.source_path;
- case VerifierStep::kVerifyTargetHash:
- if (IsVABC(partition)) {
- return partition.readonly_target_path;
- } else {
- return partition.target_path;
- }
+ return verifier_step_ == VerifierStep::kVerifyTargetHash &&
+ install_plan_.write_verity &&
+ (partition.hash_tree_size > 0 || partition.fec_size > 0);
+}
+
+void FilesystemVerifierAction::ReadVerityAndFooter() {
+ if (ShouldWriteVerity()) {
+ if (!verity_writer_->Finalize(partition_fd_, partition_fd_)) {
+ LOG(ERROR) << "Failed to write hashtree/FEC data.";
+ Cleanup(ErrorCode::kFilesystemVerifierError);
+ return;
+ }
}
+ // Since we handed our |read_fd_| to verity_writer_ during |Finalize()|
+ // call, fd's position could have been changed. Re-seek.
+ partition_fd_->Seek(filesystem_data_end_, SEEK_SET);
+ auto bytes_to_read = partition_size_ - filesystem_data_end_;
+ while (bytes_to_read > 0) {
+ const auto read_size = std::min<size_t>(buffer_.size(), bytes_to_read);
+ auto bytes_read = partition_fd_->Read(buffer_.data(), read_size);
+ if (bytes_read <= 0) {
+ PLOG(ERROR) << "Failed to read hash tree " << bytes_read;
+ Cleanup(ErrorCode::kFilesystemVerifierError);
+ return;
+ }
+ if (!hasher_->Update(buffer_.data(), bytes_read)) {
+ LOG(ERROR) << "Unable to update the hash.";
+ Cleanup(ErrorCode::kError);
+ return;
+ }
+ bytes_to_read -= bytes_read;
+ }
+ FinishPartitionHashing();
}
-size_t FilesystemVerifierAction::GetPartitionSize() const {
- const InstallPlan::Partition& partition =
- install_plan_.partitions[partition_index_];
- switch (verifier_step_) {
- case VerifierStep::kVerifySourceHash:
- return partition.source_size;
- case VerifierStep::kVerifyTargetHash:
- return partition.target_size;
+void FilesystemVerifierAction::ScheduleFileSystemRead() {
+ // We can only start reading anything past |hash_tree_offset| after we have
+ // already read all the data blocks that the hash tree covers. The same
+ // applies to FEC.
+
+ size_t bytes_to_read = std::min(static_cast<uint64_t>(buffer_.size()),
+ filesystem_data_end_ - offset_);
+ if (!bytes_to_read) {
+ ReadVerityAndFooter();
+ return;
+ }
+ partition_fd_->Seek(offset_, SEEK_SET);
+ auto bytes_read = partition_fd_->Read(buffer_.data(), bytes_to_read);
+ if (bytes_read < 0) {
+ LOG(ERROR) << "Unable to schedule an asynchronous read from the stream. "
+ << bytes_read;
+ Cleanup(ErrorCode::kError);
+ } else {
+ // We could just invoke |OnReadDoneCallback()|, it works. But |PostTask|
+ // is used so that users can cancel updates.
+ pending_task_id_ = brillo::MessageLoop::current()->PostTask(
+ base::Bind(&FilesystemVerifierAction::OnReadDone,
+ base::Unretained(this),
+ bytes_read));
}
}
-bool FilesystemVerifierAction::ShouldWriteVerity() {
- const InstallPlan::Partition& partition =
- install_plan_.partitions[partition_index_];
- return verifier_step_ == VerifierStep::kVerifyTargetHash &&
- install_plan_.write_verity &&
- (partition.hash_tree_size > 0 || partition.fec_size > 0);
+void FilesystemVerifierAction::OnReadDone(size_t bytes_read) {
+ if (cancelled_) {
+ Cleanup(ErrorCode::kError);
+ return;
+ }
+ if (bytes_read == 0) {
+ LOG(ERROR) << "Failed to read the remaining " << partition_size_ - offset_
+ << " bytes from partition "
+ << install_plan_.partitions[partition_index_].name;
+ Cleanup(ErrorCode::kFilesystemVerifierError);
+ return;
+ }
+
+ if (!hasher_->Update(buffer_.data(), bytes_read)) {
+ LOG(ERROR) << "Unable to update the hash.";
+ Cleanup(ErrorCode::kError);
+ return;
+ }
+
+ // WE don't consider sizes of each partition. Every partition
+ // has the same length on progress bar.
+ // TODO(zhangkelvin) Take sizes of each partition into account
+
+ UpdateProgress(
+ (static_cast<double>(offset_) / partition_size_ + partition_index_) /
+ install_plan_.partitions.size());
+ if (ShouldWriteVerity()) {
+ if (!verity_writer_->Update(offset_, buffer_.data(), bytes_read)) {
+ LOG(ERROR) << "Unable to update verity";
+ Cleanup(ErrorCode::kVerityCalculationError);
+ return;
+ }
+ }
+
+ offset_ += bytes_read;
+ if (offset_ == filesystem_data_end_) {
+ ReadVerityAndFooter();
+ return;
+ }
+
+ ScheduleFileSystemRead();
}
void FilesystemVerifierAction::FinishPartitionHashing() {
@@ -494,7 +447,6 @@ void FilesystemVerifierAction::FinishPartitionHashing() {
hasher_.reset();
buffer_.clear();
if (partition_fd_) {
- partition_fd_->Close();
partition_fd_.reset();
}
StartPartitionHashing();
diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h
index 850abdad..78634cb3 100644
--- a/payload_consumer/filesystem_verifier_action.h
+++ b/payload_consumer/filesystem_verifier_action.h
@@ -22,14 +22,12 @@
#include <memory>
#include <string>
-#include <utility>
#include <vector>
#include <brillo/message_loops/message_loop.h>
#include "update_engine/common/action.h"
#include "update_engine/common/hash_calculator.h"
-#include "update_engine/common/scoped_task_id.h"
#include "update_engine/payload_consumer/file_descriptor.h"
#include "update_engine/payload_consumer/install_plan.h"
#include "update_engine/payload_consumer/verity_writer_interface.h"
@@ -86,16 +84,6 @@ class FilesystemVerifierAction : public InstallPlanAction {
private:
friend class FilesystemVerifierActionTestDelegate;
- void WriteVerityAndHashPartition(FileDescriptorPtr fd,
- const off64_t start_offset,
- const off64_t end_offset,
- void* buffer,
- const size_t buffer_size);
- void HashPartition(FileDescriptorPtr fd,
- const off64_t start_offset,
- const off64_t end_offset,
- void* buffer,
- const size_t buffer_size);
// Return true if we need to write verity bytes.
bool ShouldWriteVerity();
@@ -103,11 +91,16 @@ class FilesystemVerifierAction : public InstallPlanAction {
// remaining to be hashed, it finishes the action.
void StartPartitionHashing();
- const std::string& GetPartitionPath() const;
+ // Schedules the asynchronous read of the filesystem part of this
+ // partition(not including hashtree/verity).
+ void ScheduleFileSystemRead();
- bool IsVABC(const InstallPlan::Partition& partition) const;
+ // Read the verity part of this partition.(hash tree and FEC)
+ void ReadVerityAndFooter();
- size_t GetPartitionSize() const;
+ // Called from the main loop when a single read from |src_stream_| succeeds or
+ // fails, calling OnReadDoneCallback() and OnReadErrorCallback() respectively.
+ void OnReadDone(size_t bytes_read);
// When the read is done, finalize the hash checking of the current partition
// and continue checking the next one.
@@ -121,13 +114,9 @@ class FilesystemVerifierAction : public InstallPlanAction {
// Invoke delegate callback to report progress, if delegate is not null
void UpdateProgress(double progress);
- // Updates progress of current partition. |progress| should be in range [0,
- // 1], and it will be scaled appropriately with # of partitions.
- void UpdatePartitionProgress(double progress);
-
// Initialize read_fd_ and write_fd_
bool InitializeFd(const std::string& part_path);
- bool InitializeFdVABC(bool should_write_verity);
+ bool InitializeFdVABC();
// The type of the partition that we are verifying.
VerifierStep verifier_step_ = VerifierStep::kVerifyTargetHash;
@@ -172,7 +161,8 @@ class FilesystemVerifierAction : public InstallPlanAction {
// Callback that should be cancelled on |TerminateProcessing|. Usually this
// points to pending read callbacks from async stream.
- ScopedTaskId pending_task_id_;
+ brillo::MessageLoop::TaskId pending_task_id_{
+ brillo::MessageLoop::kTaskIdNull};
DISALLOW_COPY_AND_ASSIGN(FilesystemVerifierAction);
};
diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc
index f2f29547..586662d9 100644
--- a/payload_consumer/filesystem_verifier_action_unittest.cc
+++ b/payload_consumer/filesystem_verifier_action_unittest.cc
@@ -16,8 +16,6 @@
#include "update_engine/payload_consumer/filesystem_verifier_action.h"
-#include <algorithm>
-#include <cstring>
#include <memory>
#include <string>
#include <utility>
@@ -27,10 +25,8 @@
#include <brillo/message_loops/fake_message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
#include <brillo/secure_blob.h>
-#include <fec/ecc.h>
#include <gtest/gtest.h>
#include <libsnapshot/snapshot_writer.h>
-#include <sys/stat.h>
#include "update_engine/common/dynamic_partition_control_stub.h"
#include "update_engine/common/hash_calculator.h"
@@ -39,7 +35,6 @@
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/fake_file_descriptor.h"
#include "update_engine/payload_consumer/install_plan.h"
-#include "update_engine/payload_consumer/verity_writer_android.h"
using brillo::MessageLoop;
using std::string;
@@ -55,46 +50,19 @@ namespace chromeos_update_engine {
class FilesystemVerifierActionTest : public ::testing::Test {
public:
static constexpr size_t BLOCK_SIZE = 4096;
- // We use SHA256 for testing, so hash size is 256bits / 8
- static constexpr size_t HASH_SIZE = 256 / 8;
static constexpr size_t PARTITION_SIZE = BLOCK_SIZE * 1024;
- static constexpr size_t HASH_TREE_START_OFFSET = 800 * BLOCK_SIZE;
- size_t hash_tree_size = 0;
- size_t fec_start_offset = 0;
- size_t fec_data_size = 0;
- static constexpr size_t FEC_ROOTS = 2;
- size_t fec_rounds = 0;
- size_t fec_size = 0;
protected:
void SetUp() override {
- hash_tree_size = HashTreeBuilder::CalculateSize(
- HASH_TREE_START_OFFSET, BLOCK_SIZE, HASH_SIZE);
- fec_start_offset = HASH_TREE_START_OFFSET + hash_tree_size;
- fec_data_size = fec_start_offset;
- static constexpr size_t FEC_ROOTS = 2;
- fec_rounds =
- utils::DivRoundUp(fec_data_size / BLOCK_SIZE, FEC_RSM - FEC_ROOTS);
- fec_size = fec_rounds * FEC_ROOTS * BLOCK_SIZE;
-
- fec_data_.resize(fec_size);
- hash_tree_data_.resize(hash_tree_size);
- // Globally readable writable, as we want to write data
- ASSERT_EQ(0, fchmod(source_part_.fd(), 0666))
- << " Failed to set " << source_part_.path() << " as writable "
- << strerror(errno);
- ASSERT_EQ(0, fchmod(target_part_.fd(), 0666))
- << " Failed to set " << target_part_.path() << " as writable "
- << strerror(errno);
brillo::Blob part_data(PARTITION_SIZE);
test_utils::FillWithData(&part_data);
ASSERT_TRUE(utils::WriteFile(
- source_part_.path().c_str(), part_data.data(), part_data.size()));
- // FillWithData() will fill with different data next call. We want
+ source_part.path().c_str(), part_data.data(), part_data.size()));
+ // FillWithData() will will with different data next call. We want
// source/target partitions to contain different data for testing.
test_utils::FillWithData(&part_data);
ASSERT_TRUE(utils::WriteFile(
- target_part_.path().c_str(), part_data.data(), part_data.size()));
+ target_part.path().c_str(), part_data.data(), part_data.size()));
loop_.SetAsCurrent();
}
@@ -102,8 +70,6 @@ class FilesystemVerifierActionTest : public ::testing::Test {
EXPECT_EQ(0, brillo::MessageLoopRunMaxIterations(&loop_, 1));
}
- void DoTestVABC(bool clear_target_hash, bool enable_verity);
-
// Returns true iff test has completed successfully.
bool DoTest(bool terminate_early, bool hash_fail);
@@ -115,105 +81,29 @@ class FilesystemVerifierActionTest : public ::testing::Test {
std::string name = "fake_part") {
InstallPlan::Partition& part = install_plan->partitions.emplace_back();
part.name = name;
- part.target_path = target_part_.path();
+ part.target_path = target_part.path();
part.readonly_target_path = part.target_path;
part.target_size = PARTITION_SIZE;
part.block_size = BLOCK_SIZE;
- part.source_path = source_part_.path();
- part.source_size = PARTITION_SIZE;
+ part.source_path = source_part.path();
EXPECT_TRUE(
- HashCalculator::RawHashOfFile(source_part_.path(), &part.source_hash));
+ HashCalculator::RawHashOfFile(source_part.path(), &part.source_hash));
EXPECT_TRUE(
- HashCalculator::RawHashOfFile(target_part_.path(), &part.target_hash));
+ HashCalculator::RawHashOfFile(target_part.path(), &part.target_hash));
return &part;
}
- static void ZeroRange(FileDescriptorPtr fd,
- size_t start_block,
- size_t num_blocks) {
- std::vector<unsigned char> buffer(BLOCK_SIZE);
- ASSERT_EQ((ssize_t)(start_block * BLOCK_SIZE),
- fd->Seek(start_block * BLOCK_SIZE, SEEK_SET));
- for (size_t i = 0; i < num_blocks; i++) {
- ASSERT_TRUE(utils::WriteAll(fd, buffer.data(), buffer.size()));
- }
- }
-
- void SetHashWithVerity(InstallPlan::Partition* partition) {
- partition->hash_tree_algorithm = "sha256";
- partition->hash_tree_size = hash_tree_size;
- partition->hash_tree_offset = HASH_TREE_START_OFFSET;
- partition->hash_tree_data_offset = 0;
- partition->hash_tree_data_size = HASH_TREE_START_OFFSET;
- partition->fec_size = fec_size;
- partition->fec_offset = fec_start_offset;
- partition->fec_data_offset = 0;
- partition->fec_data_size = fec_data_size;
- partition->fec_roots = FEC_ROOTS;
- VerityWriterAndroid verity_writer;
- ASSERT_TRUE(verity_writer.Init(*partition));
- LOG(INFO) << "Opening " << partition->readonly_target_path;
- auto fd = std::make_shared<EintrSafeFileDescriptor>();
- ASSERT_TRUE(fd->Open(partition->readonly_target_path.c_str(), O_RDWR))
- << "Failed to open " << partition->target_path.c_str() << " "
- << strerror(errno);
- std::vector<unsigned char> buffer(BLOCK_SIZE);
- // Only need to read up to hash tree
- auto bytes_to_read = HASH_TREE_START_OFFSET;
- auto offset = 0;
- while (bytes_to_read > 0) {
- const auto bytes_read = fd->Read(
- buffer.data(), std::min<size_t>(buffer.size(), bytes_to_read));
- ASSERT_GT(bytes_read, 0)
- << "offset: " << offset << " bytes to read: " << bytes_to_read
- << " error: " << strerror(errno);
- ASSERT_TRUE(verity_writer.Update(offset, buffer.data(), bytes_read));
- bytes_to_read -= bytes_read;
- offset += bytes_read;
- }
- ASSERT_TRUE(verity_writer.Finalize(fd, fd));
- ASSERT_TRUE(fd->IsOpen());
- ASSERT_TRUE(HashCalculator::RawHashOfFile(target_part_.path(),
- &partition->target_hash));
-
- ASSERT_TRUE(fd->Seek(HASH_TREE_START_OFFSET, SEEK_SET));
- ASSERT_EQ(fd->Read(hash_tree_data_.data(), hash_tree_data_.size()),
- static_cast<ssize_t>(hash_tree_data_.size()))
- << "Failed to read hashtree " << strerror(errno);
- ASSERT_TRUE(fd->Seek(fec_start_offset, SEEK_SET));
- ASSERT_EQ(fd->Read(fec_data_.data(), fec_data_.size()),
- static_cast<ssize_t>(fec_data_.size()))
- << "Failed to read FEC " << strerror(errno);
- // Fs verification action is expected to write them, so clear verity data to
- // ensure that they are re-created correctly.
- ZeroRange(
- fd, HASH_TREE_START_OFFSET / BLOCK_SIZE, hash_tree_size / BLOCK_SIZE);
- ZeroRange(fd, fec_start_offset / BLOCK_SIZE, fec_size / BLOCK_SIZE);
- }
brillo::FakeMessageLoop loop_{nullptr};
ActionProcessor processor_;
DynamicPartitionControlStub dynamic_control_stub_;
- std::vector<unsigned char> fec_data_;
- std::vector<unsigned char> hash_tree_data_;
- static ScopedTempFile source_part_;
- static ScopedTempFile target_part_;
- InstallPlan install_plan_;
+ static ScopedTempFile source_part;
+ static ScopedTempFile target_part;
};
-ScopedTempFile FilesystemVerifierActionTest::source_part_{
- "source_part.XXXXXX", true, PARTITION_SIZE};
-ScopedTempFile FilesystemVerifierActionTest::target_part_{
- "target_part.XXXXXX", true, PARTITION_SIZE};
-
-static void EnableVABC(MockDynamicPartitionControl* dynamic_control,
- const std::string& part_name) {
- ON_CALL(*dynamic_control, GetDynamicPartitionsFeatureFlag())
- .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
- ON_CALL(*dynamic_control, UpdateUsesSnapshotCompression())
- .WillByDefault(Return(true));
- ON_CALL(*dynamic_control, IsDynamicPartition(part_name, _))
- .WillByDefault(Return(true));
-}
+ScopedTempFile FilesystemVerifierActionTest::source_part{
+ "source_part.XXXXXX", false, PARTITION_SIZE};
+ScopedTempFile FilesystemVerifierActionTest::target_part{
+ "target_part.XXXXXX", false, PARTITION_SIZE};
class FilesystemVerifierActionTestDelegate : public ActionProcessorDelegate {
public:
@@ -280,8 +170,9 @@ bool FilesystemVerifierActionTest::DoTest(bool terminate_early,
bool success = true;
// Set up the action objects
- install_plan_.source_slot = 0;
- install_plan_.target_slot = 1;
+ InstallPlan install_plan;
+ install_plan.source_slot = 0;
+ install_plan.target_slot = 1;
InstallPlan::Partition part;
part.name = "part";
part.target_size = kLoopFileSize - (hash_fail ? 1 : 0);
@@ -296,19 +187,23 @@ bool FilesystemVerifierActionTest::DoTest(bool terminate_early,
ADD_FAILURE();
success = false;
}
- install_plan_.partitions = {part};
+ install_plan.partitions = {part};
- BuildActions(install_plan_);
+ BuildActions(install_plan);
FilesystemVerifierActionTestDelegate delegate;
processor_.set_delegate(&delegate);
- loop_.PostTask(base::Bind(&ActionProcessor::StartProcessing,
- base::Unretained(&processor_)));
- if (terminate_early) {
- loop_.PostTask(base::Bind(&ActionProcessor::StopProcessing,
- base::Unretained(&processor_)));
- }
+ loop_.PostTask(FROM_HERE,
+ base::Bind(
+ [](ActionProcessor* processor, bool terminate_early) {
+ processor->StartProcessing();
+ if (terminate_early) {
+ processor->StopProcessing();
+ }
+ },
+ base::Unretained(&processor_),
+ terminate_early));
loop_.Run();
if (!terminate_early) {
@@ -337,7 +232,7 @@ bool FilesystemVerifierActionTest::DoTest(bool terminate_early,
EXPECT_TRUE(is_a_file_reading_eq);
success = success && is_a_file_reading_eq;
- bool is_install_plan_eq = (*delegate.install_plan_ == install_plan_);
+ bool is_install_plan_eq = (*delegate.install_plan_ == install_plan);
EXPECT_TRUE(is_install_plan_eq);
success = success && is_install_plan_eq;
return success;
@@ -402,13 +297,14 @@ TEST_F(FilesystemVerifierActionTest, MissingInputObjectTest) {
}
TEST_F(FilesystemVerifierActionTest, NonExistentDriveTest) {
+ InstallPlan install_plan;
InstallPlan::Partition part;
part.name = "nope";
part.source_path = "/no/such/file";
part.target_path = "/no/such/file";
- install_plan_.partitions = {part};
+ install_plan.partitions = {part};
- BuildActions(install_plan_);
+ BuildActions(install_plan);
FilesystemVerifierActionTest2Delegate delegate;
processor_.set_delegate(&delegate);
@@ -449,6 +345,7 @@ TEST_F(FilesystemVerifierActionTest, RunAsRootWriteVerityTest) {
test_utils::ScopedLoopbackDeviceBinder target_device(
part_file.path(), true, &target_path);
+ InstallPlan install_plan;
InstallPlan::Partition part;
part.name = "part";
part.target_path = target_path;
@@ -479,9 +376,9 @@ TEST_F(FilesystemVerifierActionTest, RunAsRootWriteVerityTest) {
part.hash_tree_salt = {0x9e, 0xcb, 0xf8, 0xd5, 0x0b, 0xb4, 0x43,
0x0a, 0x7a, 0x10, 0xad, 0x96, 0xd7, 0x15,
0x70, 0xba, 0xed, 0x27, 0xe2, 0xae};
- install_plan_.partitions = {part};
+ install_plan.partitions = {part};
- BuildActions(install_plan_);
+ BuildActions(install_plan);
FilesystemVerifierActionTestDelegate delegate;
processor_.set_delegate(&delegate);
@@ -510,7 +407,8 @@ TEST_F(FilesystemVerifierActionTest, RunAsRootSkipWriteVerityTest) {
test_utils::ScopedLoopbackDeviceBinder target_device(
part_file.path(), true, &target_path);
- install_plan_.write_verity = false;
+ InstallPlan install_plan;
+ install_plan.write_verity = false;
InstallPlan::Partition part;
part.name = "part";
part.target_path = target_path;
@@ -525,9 +423,9 @@ TEST_F(FilesystemVerifierActionTest, RunAsRootSkipWriteVerityTest) {
part.fec_offset = part.fec_data_size;
part.fec_size = 2 * 4096;
EXPECT_TRUE(HashCalculator::RawHashOfData(part_data, &part.target_hash));
- install_plan_.partitions = {part};
+ install_plan.partitions = {part};
- BuildActions(install_plan_);
+ BuildActions(install_plan);
FilesystemVerifierActionTestDelegate delegate;
processor_.set_delegate(&delegate);
@@ -544,146 +442,76 @@ TEST_F(FilesystemVerifierActionTest, RunAsRootSkipWriteVerityTest) {
ASSERT_EQ(ErrorCode::kSuccess, delegate.code());
}
-void FilesystemVerifierActionTest::DoTestVABC(bool clear_target_hash,
- bool enable_verity) {
- auto part_ptr = AddFakePartition(&install_plan_);
- if (::testing::Test::HasFailure()) {
- return;
- }
+TEST_F(FilesystemVerifierActionTest, RunWithVABCNoVerity) {
+ InstallPlan install_plan;
+ auto part_ptr = AddFakePartition(&install_plan);
ASSERT_NE(part_ptr, nullptr);
InstallPlan::Partition& part = *part_ptr;
part.target_path = "Shouldn't attempt to open this path";
- if (enable_verity) {
- install_plan_.write_verity = true;
- ASSERT_NO_FATAL_FAILURE(SetHashWithVerity(&part));
- }
- if (clear_target_hash) {
- part.target_hash.clear();
- }
NiceMock<MockDynamicPartitionControl> dynamic_control;
- EnableVABC(&dynamic_control, part.name);
- auto open_cow = [part]() {
- auto cow_fd = std::make_shared<EintrSafeFileDescriptor>();
- EXPECT_TRUE(cow_fd->Open(part.readonly_target_path.c_str(), O_RDWR))
- << "Failed to open part " << part.readonly_target_path
- << strerror(errno);
- return cow_fd;
- };
+ ON_CALL(dynamic_control, GetDynamicPartitionsFeatureFlag())
+ .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+ ON_CALL(dynamic_control, UpdateUsesSnapshotCompression())
+ .WillByDefault(Return(true));
+ ON_CALL(dynamic_control, IsDynamicPartition(part.name, _))
+ .WillByDefault(Return(true));
EXPECT_CALL(dynamic_control, UpdateUsesSnapshotCompression())
.Times(AtLeast(1));
- auto cow_fd = open_cow();
- if (HasFailure()) {
- return;
- }
-
- if (enable_verity) {
- ON_CALL(dynamic_control, OpenCowFd(part.name, {part.source_path}, _))
- .WillByDefault(open_cow);
- EXPECT_CALL(dynamic_control, OpenCowFd(part.name, {part.source_path}, _))
- .Times(AtLeast(1));
-
- // fs verification isn't supposed to write to |readonly_target_path|. All
- // writes should go through fd returned by |OpenCowFd|. Therefore we set
- // target part as read-only to make sure.
- ASSERT_EQ(0, chmod(part.readonly_target_path.c_str(), 0444))
- << " Failed to set " << part.readonly_target_path << " as read-only "
- << strerror(errno);
- } else {
- // Since we are not writing verity, we should not attempt to OpenCowFd()
- // reads should go through regular file descriptors on mapped partitions.
- EXPECT_CALL(dynamic_control, OpenCowFd(part.name, {part.source_path}, _))
- .Times(0);
- EXPECT_CALL(dynamic_control, MapAllPartitions()).Times(AtLeast(1));
- }
+ // Since we are not writing verity, we should not attempt to OpenCowFd()
+ // reads should go through regular file descriptors on mapped partitions.
+ EXPECT_CALL(dynamic_control, OpenCowFd(part.name, {part.source_path}, _))
+ .Times(0);
+ EXPECT_CALL(dynamic_control, MapAllPartitions()).Times(AtLeast(1));
EXPECT_CALL(dynamic_control, ListDynamicPartitionsForSlot(_, _, _))
.WillRepeatedly(
DoAll(SetArgPointee<2, std::vector<std::string>>({part.name}),
Return(true)));
- BuildActions(install_plan_, &dynamic_control);
+ BuildActions(install_plan, &dynamic_control);
FilesystemVerifierActionTestDelegate delegate;
processor_.set_delegate(&delegate);
- loop_.PostTask(FROM_HERE,
- base::Bind(&ActionProcessor::StartProcessing,
- base::Unretained(&processor_)));
+ loop_.PostTask(
+ FROM_HERE,
+ base::Bind(
+ [](ActionProcessor* processor) { processor->StartProcessing(); },
+ base::Unretained(&processor_)));
loop_.Run();
ASSERT_FALSE(processor_.IsRunning());
ASSERT_TRUE(delegate.ran());
- if (enable_verity) {
- std::vector<unsigned char> actual_fec(fec_size);
- ssize_t bytes_read = 0;
- ASSERT_TRUE(utils::PReadAll(cow_fd,
- actual_fec.data(),
- actual_fec.size(),
- fec_start_offset,
- &bytes_read));
- ASSERT_EQ(actual_fec, fec_data_);
- std::vector<unsigned char> actual_hash_tree(hash_tree_size);
- ASSERT_TRUE(utils::PReadAll(cow_fd,
- actual_hash_tree.data(),
- actual_hash_tree.size(),
- HASH_TREE_START_OFFSET,
- &bytes_read));
- ASSERT_EQ(actual_hash_tree, hash_tree_data_);
- }
- if (clear_target_hash) {
- ASSERT_EQ(ErrorCode::kNewRootfsVerificationError, delegate.code());
- } else {
- ASSERT_EQ(ErrorCode::kSuccess, delegate.code());
- }
-}
-
-TEST_F(FilesystemVerifierActionTest, VABC_NoVerity_Success) {
- DoTestVABC(false, false);
-}
-
-TEST_F(FilesystemVerifierActionTest, VABC_NoVerity_Target_Mismatch) {
- DoTestVABC(true, false);
-}
-
-TEST_F(FilesystemVerifierActionTest, VABC_Verity_Success) {
- DoTestVABC(false, true);
-}
-
-TEST_F(FilesystemVerifierActionTest, VABC_Verity_ReadAfterWrite) {
- ASSERT_NO_FATAL_FAILURE(DoTestVABC(false, true));
- // Run FS verification again, w/o writing verity. We have seen a bug where
- // attempting to run fs again will cause previously written verity data to be
- // dropped, so cover this scenario.
- ASSERT_GE(install_plan_.partitions.size(), 1UL);
- auto& part = install_plan_.partitions[0];
- install_plan_.write_verity = false;
- part.readonly_target_path = target_part_.path();
- NiceMock<MockDynamicPartitionControl> dynamic_control;
- EnableVABC(&dynamic_control, part.name);
-
- // b/186196758 is only visible if we repeatedely run FS verification w/o
- // writing verity
- for (int i = 0; i < 3; i++) {
- BuildActions(install_plan_, &dynamic_control);
-
- FilesystemVerifierActionTestDelegate delegate;
- processor_.set_delegate(&delegate);
- loop_.PostTask(
- FROM_HERE,
- base::Bind(
- [](ActionProcessor* processor) { processor->StartProcessing(); },
- base::Unretained(&processor_)));
- loop_.Run();
- ASSERT_FALSE(processor_.IsRunning());
- ASSERT_TRUE(delegate.ran());
- ASSERT_EQ(ErrorCode::kSuccess, delegate.code());
- }
+ ASSERT_EQ(ErrorCode::kSuccess, delegate.code());
}
-TEST_F(FilesystemVerifierActionTest, VABC_Verity_Target_Mismatch) {
- DoTestVABC(true, true);
+TEST_F(FilesystemVerifierActionTest, ReadAfterWrite) {
+ ScopedTempFile cow_device_file("cow_device.XXXXXX", true);
+ android::snapshot::CompressedSnapshotWriter snapshot_writer{
+ {.block_size = BLOCK_SIZE}};
+ snapshot_writer.SetCowDevice(android::base::unique_fd{cow_device_file.fd()});
+ snapshot_writer.Initialize();
+ std::vector<unsigned char> buffer;
+ buffer.resize(BLOCK_SIZE);
+ std::fill(buffer.begin(), buffer.end(), 123);
+
+ ASSERT_TRUE(snapshot_writer.AddRawBlocks(0, buffer.data(), buffer.size()));
+ ASSERT_TRUE(snapshot_writer.Finalize());
+ auto cow_reader = snapshot_writer.OpenReader();
+ ASSERT_NE(cow_reader, nullptr);
+ ASSERT_TRUE(snapshot_writer.AddRawBlocks(1, buffer.data(), buffer.size()));
+ ASSERT_TRUE(snapshot_writer.AddRawBlocks(2, buffer.data(), buffer.size()));
+ ASSERT_TRUE(snapshot_writer.Finalize());
+ cow_reader = snapshot_writer.OpenReader();
+ ASSERT_NE(cow_reader, nullptr);
+ std::vector<unsigned char> read_back;
+ read_back.resize(buffer.size());
+ cow_reader->Seek(BLOCK_SIZE, SEEK_SET);
+ const auto bytes_read = cow_reader->Read(read_back.data(), read_back.size());
+ ASSERT_EQ((size_t)(bytes_read), BLOCK_SIZE);
+ ASSERT_EQ(read_back, buffer);
}
} // namespace chromeos_update_engine
diff --git a/stable/Android.bp b/stable/Android.bp
index 1573ebd2..5e54e9a0 100644
--- a/stable/Android.bp
+++ b/stable/Android.bp
@@ -49,7 +49,6 @@ aidl_interface {
],
},
},
- versions: ["1"],
}
// update_engine_stable_client (type: executable)
diff --git a/stable/aidl_api/libupdate_engine_stable/1/.hash b/stable/aidl_api/libupdate_engine_stable/1/.hash
deleted file mode 100644
index f21562a4..00000000
--- a/stable/aidl_api/libupdate_engine_stable/1/.hash
+++ /dev/null
@@ -1 +0,0 @@
-526043ea6cb098d53a9c3e778420e64c4e864d8c
diff --git a/stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStable.aidl b/stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStable.aidl
deleted file mode 100644
index 67db18e9..00000000
--- a/stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStable.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-///////////////////////////////////////////////////////////////////////////////
-// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
-///////////////////////////////////////////////////////////////////////////////
-
-// This file is a snapshot of an AIDL file. Do not edit it manually. There are
-// two cases:
-// 1). this is a frozen version file - do not edit this in any case.
-// 2). this is a 'current' file. If you make a backwards compatible change to
-// the interface (from the latest frozen version), the build system will
-// prompt you to update this file with `m <name>-update-api`.
-//
-// You must not make a backward incompatible change to any AIDL file built
-// with the aidl_interface module type with versions property set. The module
-// type is used to build AIDL files in a way that they can be used across
-// independently updatable components of the system. If a device is shipped
-// with such a backward incompatible change, it has a high risk of breaking
-// later when a module using the interface is updated, e.g., Mainline modules.
-
-package android.os;
-interface IUpdateEngineStable {
- void applyPayloadFd(in ParcelFileDescriptor pfd, in long payload_offset, in long payload_size, in String[] headerKeyValuePairs);
- boolean bind(android.os.IUpdateEngineStableCallback callback);
- boolean unbind(android.os.IUpdateEngineStableCallback callback);
-}
diff --git a/stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStableCallback.aidl b/stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStableCallback.aidl
deleted file mode 100644
index dbca127c..00000000
--- a/stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStableCallback.aidl
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-///////////////////////////////////////////////////////////////////////////////
-// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
-///////////////////////////////////////////////////////////////////////////////
-
-// This file is a snapshot of an AIDL file. Do not edit it manually. There are
-// two cases:
-// 1). this is a frozen version file - do not edit this in any case.
-// 2). this is a 'current' file. If you make a backwards compatible change to
-// the interface (from the latest frozen version), the build system will
-// prompt you to update this file with `m <name>-update-api`.
-//
-// You must not make a backward incompatible change to any AIDL file built
-// with the aidl_interface module type with versions property set. The module
-// type is used to build AIDL files in a way that they can be used across
-// independently updatable components of the system. If a device is shipped
-// with such a backward incompatible change, it has a high risk of breaking
-// later when a module using the interface is updated, e.g., Mainline modules.
-
-package android.os;
-interface IUpdateEngineStableCallback {
- oneway void onStatusUpdate(int status_code, float percentage);
- oneway void onPayloadApplicationComplete(int error_code);
-}