summaryrefslogtreecommitdiff
path: root/services/incremental/IncrementalService.cpp
diff options
context:
space:
mode:
authorScott Lobdell <slobdell@google.com>2021-03-23 20:33:04 +0000
committerScott Lobdell <slobdell@google.com>2021-03-24 02:40:01 +0000
commit757dbb836469bbdd7eb8312deaf584fe0c99c17d (patch)
treea678b33ad5f0f024d0f942f127b91665f0616193 /services/incremental/IncrementalService.cpp
parent7710a95746be8dba8c6ffe7172f9c01334a2ca81 (diff)
parentf022dd1e6827ebf7c52b06aa40f2059a3f0f5cad (diff)
Merge SP1A.210311.001
Change-Id: Id1a205bf3f0609c0b13e4bea377056c3b06299fa
Diffstat (limited to 'services/incremental/IncrementalService.cpp')
-rw-r--r--services/incremental/IncrementalService.cpp125
1 files changed, 106 insertions, 19 deletions
diff --git a/services/incremental/IncrementalService.cpp b/services/incremental/IncrementalService.cpp
index 2fa927bcccfd..1fcc2843bd43 100644
--- a/services/incremental/IncrementalService.cpp
+++ b/services/incremental/IncrementalService.cpp
@@ -1617,7 +1617,7 @@ bool IncrementalService::configureNativeBinaries(StorageId storage, std::string_
// Need a shared pointer: will be passing it into all unpacking jobs.
std::shared_ptr<ZipArchive> zipFile(zipFileHandle, [](ZipArchiveHandle h) { CloseArchive(h); });
void* cookie = nullptr;
- const auto libFilePrefix = path::join(constants().libDir, abi) + "/";
+ const auto libFilePrefix = path::join(constants().libDir, abi) += "/";
if (StartIteration(zipFile.get(), &cookie, libFilePrefix, constants().libSuffix)) {
LOG(ERROR) << "Failed to start zip iteration for " << apkFullPath;
return false;
@@ -1627,6 +1627,17 @@ bool IncrementalService::configureNativeBinaries(StorageId storage, std::string_
auto openZipTs = Clock::now();
+ auto mapFiles = (mIncFs->features() & incfs::Features::v2);
+ incfs::FileId sourceId;
+ if (mapFiles) {
+ sourceId = mIncFs->getFileId(ifs->control, apkFullPath);
+ if (!incfs::isValidFileId(sourceId)) {
+ LOG(WARNING) << "Error getting IncFS file ID for apk path '" << apkFullPath
+ << "', mapping disabled";
+ mapFiles = false;
+ }
+ }
+
std::vector<Job> jobQueue;
ZipEntry entry;
std::string_view fileName;
@@ -1635,13 +1646,16 @@ bool IncrementalService::configureNativeBinaries(StorageId storage, std::string_
continue;
}
+ const auto entryUncompressed = entry.method == kCompressStored;
+ const auto entryPageAligned = (entry.offset & (constants().blockSize - 1)) == 0;
+
if (!extractNativeLibs) {
// ensure the file is properly aligned and unpacked
- if (entry.method != kCompressStored) {
+ if (!entryUncompressed) {
LOG(WARNING) << "Library " << fileName << " must be uncompressed to mmap it";
return false;
}
- if ((entry.offset & (constants().blockSize - 1)) != 0) {
+ if (!entryPageAligned) {
LOG(WARNING) << "Library " << fileName
<< " must be page-aligned to mmap it, offset = 0x" << std::hex
<< entry.offset;
@@ -1665,6 +1679,28 @@ bool IncrementalService::configureNativeBinaries(StorageId storage, std::string_
continue;
}
+ if (mapFiles && entryUncompressed && entryPageAligned && entry.uncompressed_length > 0) {
+ incfs::NewMappedFileParams mappedFileParams = {
+ .sourceId = sourceId,
+ .sourceOffset = entry.offset,
+ .size = entry.uncompressed_length,
+ };
+
+ if (auto res = mIncFs->makeMappedFile(ifs->control, targetLibPathAbsolute, 0755,
+ mappedFileParams);
+ res == 0) {
+ if (perfLoggingEnabled()) {
+ auto doneTs = Clock::now();
+ LOG(INFO) << "incfs: Mapped " << libName << ": "
+ << elapsedMcs(startFileTs, doneTs) << "mcs";
+ }
+ continue;
+ } else {
+ LOG(WARNING) << "Failed to map file for: '" << targetLibPath << "' errno: " << res
+ << "; falling back to full extraction";
+ }
+ }
+
// Create new lib file without signature info
incfs::NewFileParams libFileParams = {
.size = entry.uncompressed_length,
@@ -1673,7 +1709,7 @@ bool IncrementalService::configureNativeBinaries(StorageId storage, std::string_
.metadata = {targetLibPath.c_str(), (IncFsSize)targetLibPath.size()},
};
incfs::FileId libFileId = idFromMetadata(targetLibPath);
- if (auto res = mIncFs->makeFile(ifs->control, targetLibPathAbsolute, 0777, libFileId,
+ if (auto res = mIncFs->makeFile(ifs->control, targetLibPathAbsolute, 0755, libFileId,
libFileParams)) {
LOG(ERROR) << "Failed to make file for: " << targetLibPath << " errno: " << res;
// If one lib file fails to be created, abort others as well
@@ -1900,25 +1936,33 @@ IncrementalService::LoadingProgress IncrementalService::getLoadingProgress(
}
IncrementalService::LoadingProgress IncrementalService::getLoadingProgressFromPath(
- const IncFsMount& ifs, std::string_view storagePath, bool stopOnFirstIncomplete) const {
- ssize_t totalBlocks = 0, filledBlocks = 0;
- const auto filePaths = mFs->listFilesRecursive(storagePath);
- for (const auto& filePath : filePaths) {
+ const IncFsMount& ifs, std::string_view storagePath,
+ const bool stopOnFirstIncomplete) const {
+ ssize_t totalBlocks = 0, filledBlocks = 0, error = 0;
+ mFs->listFilesRecursive(storagePath, [&, this](auto filePath) {
const auto [filledBlocksCount, totalBlocksCount] =
mIncFs->countFilledBlocks(ifs.control, filePath);
+ if (filledBlocksCount == -EOPNOTSUPP || filledBlocksCount == -ENOTSUP ||
+ filledBlocksCount == -ENOENT) {
+ // a kind of a file that's not really being loaded, e.g. a mapped range
+ // an older IncFS used to return ENOENT in this case, so handle it the same way
+ return true;
+ }
if (filledBlocksCount < 0) {
LOG(ERROR) << "getLoadingProgress failed to get filled blocks count for: " << filePath
<< " errno: " << filledBlocksCount;
- return {filledBlocksCount, filledBlocksCount};
+ error = filledBlocksCount;
+ return false;
}
totalBlocks += totalBlocksCount;
filledBlocks += filledBlocksCount;
if (stopOnFirstIncomplete && filledBlocks < totalBlocks) {
- break;
+ return false;
}
- }
+ return true;
+ });
- return {filledBlocks, totalBlocks};
+ return error ? LoadingProgress{error, error} : LoadingProgress{filledBlocks, totalBlocks};
}
bool IncrementalService::updateLoadingProgress(
@@ -2074,6 +2118,29 @@ bool IncrementalService::removeTimedJobs(TimedQueueWrapper& timedQueue, MountId
return true;
}
+void IncrementalService::getMetrics(StorageId storageId, android::os::PersistableBundle* result) {
+ const auto duration = getMillsSinceOldestPendingRead(storageId);
+ if (duration >= 0) {
+ const auto kMetricsMillisSinceOldestPendingRead =
+ os::incremental::BnIncrementalService::METRICS_MILLIS_SINCE_OLDEST_PENDING_READ();
+ result->putLong(String16(kMetricsMillisSinceOldestPendingRead.data()), duration);
+ }
+}
+
+long IncrementalService::getMillsSinceOldestPendingRead(StorageId storageId) {
+ std::unique_lock l(mLock);
+ const auto ifs = getIfsLocked(storageId);
+ if (!ifs) {
+ LOG(ERROR) << "getMillsSinceOldestPendingRead failed, invalid storageId: " << storageId;
+ return -EINVAL;
+ }
+ if (!ifs->dataLoaderStub) {
+ LOG(ERROR) << "getMillsSinceOldestPendingRead failed, no data loader: " << storageId;
+ return -EINVAL;
+ }
+ return ifs->dataLoaderStub->elapsedMsSinceOldestPendingRead();
+}
+
IncrementalService::DataLoaderStub::DataLoaderStub(IncrementalService& service, MountId id,
DataLoaderParamsParcel&& params,
FileSystemControlParcel&& control,
@@ -2472,9 +2539,7 @@ void IncrementalService::DataLoaderStub::updateHealthStatus(bool baseline) {
std::max(1000ms,
std::chrono::milliseconds(mHealthCheckParams.unhealthyMonitoringMs));
- const auto kernelDeltaUs = kernelTsUs - mHealthBase.kernelTsUs;
- const auto userTs = mHealthBase.userTs + std::chrono::microseconds(kernelDeltaUs);
- const auto delta = std::chrono::duration_cast<std::chrono::milliseconds>(now - userTs);
+ const auto delta = elapsedMsSinceKernelTs(now, kernelTsUs);
Milliseconds checkBackAfter;
if (delta + kTolerance < blockedTimeout) {
@@ -2506,6 +2571,13 @@ void IncrementalService::DataLoaderStub::updateHealthStatus(bool baseline) {
fsmStep();
}
+Milliseconds IncrementalService::DataLoaderStub::elapsedMsSinceKernelTs(TimePoint now,
+ BootClockTsUs kernelTsUs) {
+ const auto kernelDeltaUs = kernelTsUs - mHealthBase.kernelTsUs;
+ const auto userTs = mHealthBase.userTs + std::chrono::microseconds(kernelDeltaUs);
+ return std::chrono::duration_cast<Milliseconds>(now - userTs);
+}
+
const incfs::UniqueControl& IncrementalService::DataLoaderStub::initializeHealthControl() {
if (mHealthPath.empty()) {
resetHealthControl();
@@ -2537,16 +2609,15 @@ BootClockTsUs IncrementalService::DataLoaderStub::getOldestPendingReadTs() {
if (mService.mIncFs->waitForPendingReads(control, 0ms, &mLastPendingReads) !=
android::incfs::WaitResult::HaveData ||
mLastPendingReads.empty()) {
+ // Clear previous pending reads
+ mLastPendingReads.clear();
return result;
}
LOG(DEBUG) << id() << ": pendingReads: " << control.pendingReads() << ", "
<< mLastPendingReads.size() << ": " << mLastPendingReads.front().bootClockTsUs;
- for (auto&& pendingRead : mLastPendingReads) {
- result = std::min(result, pendingRead.bootClockTsUs);
- }
- return result;
+ return getOldestTsFromLastPendingReads();
}
void IncrementalService::DataLoaderStub::registerForPendingReads() {
@@ -2568,6 +2639,22 @@ void IncrementalService::DataLoaderStub::registerForPendingReads() {
mService.mLooper->wake();
}
+BootClockTsUs IncrementalService::DataLoaderStub::getOldestTsFromLastPendingReads() {
+ auto result = kMaxBootClockTsUs;
+ for (auto&& pendingRead : mLastPendingReads) {
+ result = std::min(result, pendingRead.bootClockTsUs);
+ }
+ return result;
+}
+
+long IncrementalService::DataLoaderStub::elapsedMsSinceOldestPendingRead() {
+ const auto oldestPendingReadKernelTs = getOldestTsFromLastPendingReads();
+ if (oldestPendingReadKernelTs == kMaxBootClockTsUs) {
+ return 0;
+ }
+ return elapsedMsSinceKernelTs(Clock::now(), oldestPendingReadKernelTs).count();
+}
+
void IncrementalService::DataLoaderStub::unregisterFromPendingReads() {
const auto pendingReadsFd = mHealthControl.pendingReads();
if (pendingReadsFd < 0) {