summaryrefslogtreecommitdiff
path: root/neuralnetworks/utils/common/src
diff options
context:
space:
mode:
Diffstat (limited to 'neuralnetworks/utils/common/src')
-rw-r--r--neuralnetworks/utils/common/src/InvalidBurst.cpp38
-rw-r--r--neuralnetworks/utils/common/src/InvalidDevice.cpp9
-rw-r--r--neuralnetworks/utils/common/src/InvalidPreparedModel.cpp4
-rw-r--r--neuralnetworks/utils/common/src/ResilientBuffer.cpp48
-rw-r--r--neuralnetworks/utils/common/src/ResilientBurst.cpp109
-rw-r--r--neuralnetworks/utils/common/src/ResilientDevice.cpp31
-rw-r--r--neuralnetworks/utils/common/src/ResilientPreparedModel.cpp85
7 files changed, 307 insertions, 17 deletions
diff --git a/neuralnetworks/utils/common/src/InvalidBurst.cpp b/neuralnetworks/utils/common/src/InvalidBurst.cpp
new file mode 100644
index 0000000000..4ca6603eb7
--- /dev/null
+++ b/neuralnetworks/utils/common/src/InvalidBurst.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "InvalidBurst.h"
+
+#include <nnapi/IBurst.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <memory>
+#include <optional>
+#include <utility>
+
+namespace android::hardware::neuralnetworks::utils {
+
+InvalidBurst::OptionalCacheHold InvalidBurst::cacheMemory(const nn::Memory& /*memory*/) const {
+ return nullptr;
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> InvalidBurst::execute(
+ const nn::Request& /*request*/, nn::MeasureTiming /*measure*/) const {
+ return NN_ERROR() << "InvalidBurst";
+}
+
+} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/InvalidDevice.cpp b/neuralnetworks/utils/common/src/InvalidDevice.cpp
index 535ccb41c7..81bca7fad0 100644
--- a/neuralnetworks/utils/common/src/InvalidDevice.cpp
+++ b/neuralnetworks/utils/common/src/InvalidDevice.cpp
@@ -32,13 +32,14 @@
namespace android::hardware::neuralnetworks::utils {
InvalidDevice::InvalidDevice(std::string name, std::string versionString, nn::Version featureLevel,
- nn::DeviceType type, std::vector<nn::Extension> extensions,
- nn::Capabilities capabilities,
+ nn::DeviceType type, bool isUpdatable,
+ std::vector<nn::Extension> extensions, nn::Capabilities capabilities,
std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded)
: kName(std::move(name)),
kVersionString(std::move(versionString)),
kFeatureLevel(featureLevel),
kType(type),
+ kIsUpdatable(isUpdatable),
kExtensions(std::move(extensions)),
kCapabilities(std::move(capabilities)),
kNumberOfCacheFilesNeeded(numberOfCacheFilesNeeded) {}
@@ -59,6 +60,10 @@ nn::DeviceType InvalidDevice::getType() const {
return kType;
}
+bool InvalidDevice::isUpdatable() const {
+ return kIsUpdatable;
+}
+
const std::vector<nn::Extension>& InvalidDevice::getSupportedExtensions() const {
return kExtensions;
}
diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
index a46f4ac574..9081e1fdd1 100644
--- a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
@@ -42,6 +42,10 @@ InvalidPreparedModel::executeFenced(
return NN_ERROR() << "InvalidPreparedModel";
}
+nn::GeneralResult<nn::SharedBurst> InvalidPreparedModel::configureExecutionBurst() const {
+ return NN_ERROR() << "InvalidPreparedModel";
+}
+
std::any InvalidPreparedModel::getUnderlyingResource() const {
return {};
}
diff --git a/neuralnetworks/utils/common/src/ResilientBuffer.cpp b/neuralnetworks/utils/common/src/ResilientBuffer.cpp
index cf5496ac39..47abbe268f 100644
--- a/neuralnetworks/utils/common/src/ResilientBuffer.cpp
+++ b/neuralnetworks/utils/common/src/ResilientBuffer.cpp
@@ -20,6 +20,7 @@
#include <android-base/thread_annotations.h>
#include <nnapi/IBuffer.h>
#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
#include <functional>
@@ -29,6 +30,34 @@
#include <vector>
namespace android::hardware::neuralnetworks::utils {
+namespace {
+
+template <typename FnType>
+auto protect(const ResilientBuffer& resilientBuffer, const FnType& fn)
+ -> decltype(fn(*resilientBuffer.getBuffer())) {
+ auto buffer = resilientBuffer.getBuffer();
+ auto result = fn(*buffer);
+
+ // Immediately return if device is not dead.
+ if (result.has_value() || result.error().code != nn::ErrorStatus::DEAD_OBJECT) {
+ return result;
+ }
+
+ // Attempt recovery and return if it fails.
+ auto maybeBuffer = resilientBuffer.recover(buffer.get());
+ if (!maybeBuffer.has_value()) {
+ const auto& [resultErrorMessage, resultErrorCode] = result.error();
+ const auto& [recoveryErrorMessage, recoveryErrorCode] = maybeBuffer.error();
+ return nn::error(resultErrorCode)
+ << resultErrorMessage << ", and failed to recover dead buffer with error "
+ << recoveryErrorCode << ": " << recoveryErrorMessage;
+ }
+ buffer = std::move(maybeBuffer).value();
+
+ return fn(*buffer);
+}
+
+} // namespace
nn::GeneralResult<std::shared_ptr<const ResilientBuffer>> ResilientBuffer::create(
Factory makeBuffer) {
@@ -53,9 +82,16 @@ nn::SharedBuffer ResilientBuffer::getBuffer() const {
std::lock_guard guard(mMutex);
return mBuffer;
}
-nn::SharedBuffer ResilientBuffer::recover(const nn::IBuffer* /*failingBuffer*/,
- bool /*blocking*/) const {
+nn::GeneralResult<nn::SharedBuffer> ResilientBuffer::recover(
+ const nn::IBuffer* failingBuffer) const {
std::lock_guard guard(mMutex);
+
+ // Another caller updated the failing prepared model.
+ if (mBuffer.get() != failingBuffer) {
+ return mBuffer;
+ }
+
+ mBuffer = NN_TRY(kMakeBuffer());
return mBuffer;
}
@@ -64,12 +100,16 @@ nn::Request::MemoryDomainToken ResilientBuffer::getToken() const {
}
nn::GeneralResult<void> ResilientBuffer::copyTo(const nn::Memory& dst) const {
- return getBuffer()->copyTo(dst);
+ const auto fn = [&dst](const nn::IBuffer& buffer) { return buffer.copyTo(dst); };
+ return protect(*this, fn);
}
nn::GeneralResult<void> ResilientBuffer::copyFrom(const nn::Memory& src,
const nn::Dimensions& dimensions) const {
- return getBuffer()->copyFrom(src, dimensions);
+ const auto fn = [&src, &dimensions](const nn::IBuffer& buffer) {
+ return buffer.copyFrom(src, dimensions);
+ };
+ return protect(*this, fn);
}
} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientBurst.cpp b/neuralnetworks/utils/common/src/ResilientBurst.cpp
new file mode 100644
index 0000000000..0d3cb33a98
--- /dev/null
+++ b/neuralnetworks/utils/common/src/ResilientBurst.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResilientBurst.h"
+
+#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
+#include <nnapi/IBurst.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <optional>
+#include <utility>
+
+namespace android::hardware::neuralnetworks::utils {
+namespace {
+
+template <typename FnType>
+auto protect(const ResilientBurst& resilientBurst, const FnType& fn)
+ -> decltype(fn(*resilientBurst.getBurst())) {
+ auto burst = resilientBurst.getBurst();
+ auto result = fn(*burst);
+
+ // Immediately return if burst is not dead.
+ if (result.has_value() || result.error().code != nn::ErrorStatus::DEAD_OBJECT) {
+ return result;
+ }
+
+ // Attempt recovery and return if it fails.
+ auto maybeBurst = resilientBurst.recover(burst.get());
+ if (!maybeBurst.has_value()) {
+ auto [resultErrorMessage, resultErrorCode, resultOutputShapes] = std::move(result).error();
+ const auto& [recoveryErrorMessage, recoveryErrorCode] = maybeBurst.error();
+ return nn::error(resultErrorCode, std::move(resultOutputShapes))
+ << resultErrorMessage << ", and failed to recover dead burst object with error "
+ << recoveryErrorCode << ": " << recoveryErrorMessage;
+ }
+ burst = std::move(maybeBurst).value();
+
+ return fn(*burst);
+}
+
+} // namespace
+
+nn::GeneralResult<std::shared_ptr<const ResilientBurst>> ResilientBurst::create(Factory makeBurst) {
+ if (makeBurst == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+ << "utils::ResilientBurst::create must have non-empty makeBurst";
+ }
+ auto burst = NN_TRY(makeBurst());
+ CHECK(burst != nullptr);
+ return std::make_shared<ResilientBurst>(PrivateConstructorTag{}, std::move(makeBurst),
+ std::move(burst));
+}
+
+ResilientBurst::ResilientBurst(PrivateConstructorTag /*tag*/, Factory makeBurst,
+ nn::SharedBurst burst)
+ : kMakeBurst(std::move(makeBurst)), mBurst(std::move(burst)) {
+ CHECK(kMakeBurst != nullptr);
+ CHECK(mBurst != nullptr);
+}
+
+nn::SharedBurst ResilientBurst::getBurst() const {
+ std::lock_guard guard(mMutex);
+ return mBurst;
+}
+
+nn::GeneralResult<nn::SharedBurst> ResilientBurst::recover(const nn::IBurst* failingBurst) const {
+ std::lock_guard guard(mMutex);
+
+ // Another caller updated the failing burst.
+ if (mBurst.get() != failingBurst) {
+ return mBurst;
+ }
+
+ mBurst = NN_TRY(kMakeBurst());
+ return mBurst;
+}
+
+ResilientBurst::OptionalCacheHold ResilientBurst::cacheMemory(const nn::Memory& memory) const {
+ return getBurst()->cacheMemory(memory);
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> ResilientBurst::execute(
+ const nn::Request& request, nn::MeasureTiming measure) const {
+ const auto fn = [&request, measure](const nn::IBurst& burst) {
+ return burst.execute(request, measure);
+ };
+ return protect(*this, fn);
+}
+
+} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp
index 6ad3fadee6..13965afd37 100644
--- a/neuralnetworks/utils/common/src/ResilientDevice.cpp
+++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp
@@ -122,12 +122,14 @@ nn::GeneralResult<nn::SharedDevice> ResilientDevice::recover(const nn::IDevice*
};
if (compare(&IDevice::getName) || compare(&IDevice::getVersionString) ||
compare(&IDevice::getFeatureLevel) || compare(&IDevice::getType) ||
- compare(&IDevice::getSupportedExtensions) || compare(&IDevice::getCapabilities)) {
+ compare(&IDevice::isUpdatable) || compare(&IDevice::getSupportedExtensions) ||
+ compare(&IDevice::getCapabilities)) {
LOG(ERROR) << "Recovered device has different metadata than what is cached. Marking "
"IDevice object as invalid.";
device = std::make_shared<const InvalidDevice>(
- kName, kVersionString, mDevice->getFeatureLevel(), mDevice->getType(), kExtensions,
- kCapabilities, mDevice->getNumberOfCacheFilesNeeded());
+ kName, kVersionString, mDevice->getFeatureLevel(), mDevice->getType(),
+ mDevice->isUpdatable(), kExtensions, kCapabilities,
+ mDevice->getNumberOfCacheFilesNeeded());
mIsValid = false;
}
@@ -151,6 +153,10 @@ nn::DeviceType ResilientDevice::getType() const {
return getDevice()->getType();
}
+bool ResilientDevice::isUpdatable() const {
+ return getDevice()->isUpdatable();
+}
+
const std::vector<nn::Extension>& ResilientDevice::getSupportedExtensions() const {
return kExtensions;
}
@@ -180,6 +186,7 @@ nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+#if 0
auto self = shared_from_this();
ResilientPreparedModel::Factory makePreparedModel = [device = std::move(self), model,
preference, priority, deadline, modelCache,
@@ -188,29 +195,41 @@ nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModel(
dataCache, token);
};
return ResilientPreparedModel::create(std::move(makePreparedModel));
+#else
+ return prepareModelInternal(model, preference, priority, deadline, modelCache, dataCache,
+ token);
+#endif
}
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCache(
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+#if 0
auto self = shared_from_this();
ResilientPreparedModel::Factory makePreparedModel = [device = std::move(self), deadline,
modelCache, dataCache, token] {
return device->prepareModelFromCacheInternal(deadline, modelCache, dataCache, token);
};
return ResilientPreparedModel::create(std::move(makePreparedModel));
+#else
+ return prepareModelFromCacheInternal(deadline, modelCache, dataCache, token);
+#endif
}
nn::GeneralResult<nn::SharedBuffer> ResilientDevice::allocate(
const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
const std::vector<nn::BufferRole>& inputRoles,
const std::vector<nn::BufferRole>& outputRoles) const {
+#if 0
auto self = shared_from_this();
ResilientBuffer::Factory makeBuffer = [device = std::move(self), desc, preparedModels,
inputRoles, outputRoles] {
return device->allocateInternal(desc, preparedModels, inputRoles, outputRoles);
};
return ResilientBuffer::create(std::move(makeBuffer));
+#else
+ return allocateInternal(desc, preparedModels, inputRoles, outputRoles);
+#endif
}
bool ResilientDevice::isValidInternal() const {
@@ -225,8 +244,8 @@ nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal
if (!isValidInternal()) {
return std::make_shared<const InvalidPreparedModel>();
}
- const auto fn = [&model, preference, priority, deadline, &modelCache, &dataCache,
- token](const nn::IDevice& device) {
+ const auto fn = [&model, preference, priority, &deadline, &modelCache, &dataCache,
+ &token](const nn::IDevice& device) {
return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache,
token);
};
@@ -239,7 +258,7 @@ nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCach
if (!isValidInternal()) {
return std::make_shared<const InvalidPreparedModel>();
}
- const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) {
+ const auto fn = [&deadline, &modelCache, &dataCache, &token](const nn::IDevice& device) {
return device.prepareModelFromCache(deadline, modelCache, dataCache, token);
};
return protect(*this, fn, /*blocking=*/false);
diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
index b8acee16c9..5dd5f99f5f 100644
--- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
@@ -16,19 +16,52 @@
#include "ResilientPreparedModel.h"
+#include "InvalidBurst.h"
+#include "ResilientBurst.h"
+
#include <android-base/logging.h>
#include <android-base/thread_annotations.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
#include <functional>
#include <memory>
#include <mutex>
+#include <sstream>
#include <utility>
#include <vector>
namespace android::hardware::neuralnetworks::utils {
+namespace {
+
+template <typename FnType>
+auto protect(const ResilientPreparedModel& resilientPreparedModel, const FnType& fn)
+ -> decltype(fn(*resilientPreparedModel.getPreparedModel())) {
+ auto preparedModel = resilientPreparedModel.getPreparedModel();
+ auto result = fn(*preparedModel);
+
+ // Immediately return if prepared model is not dead.
+ if (result.has_value() || result.error().code != nn::ErrorStatus::DEAD_OBJECT) {
+ return result;
+ }
+
+ // Attempt recovery and return if it fails.
+ auto maybePreparedModel = resilientPreparedModel.recover(preparedModel.get());
+ if (!maybePreparedModel.has_value()) {
+ const auto& [message, code] = maybePreparedModel.error();
+ std::ostringstream oss;
+ oss << ", and failed to recover dead prepared model with error " << code << ": " << message;
+ result.error().message += oss.str();
+ return result;
+ }
+ preparedModel = std::move(maybePreparedModel).value();
+
+ return fn(*preparedModel);
+}
+
+} // namespace
nn::GeneralResult<std::shared_ptr<const ResilientPreparedModel>> ResilientPreparedModel::create(
Factory makePreparedModel) {
@@ -55,9 +88,16 @@ nn::SharedPreparedModel ResilientPreparedModel::getPreparedModel() const {
return mPreparedModel;
}
-nn::SharedPreparedModel ResilientPreparedModel::recover(
- const nn::IPreparedModel* /*failingPreparedModel*/, bool /*blocking*/) const {
+nn::GeneralResult<nn::SharedPreparedModel> ResilientPreparedModel::recover(
+ const nn::IPreparedModel* failingPreparedModel) const {
std::lock_guard guard(mMutex);
+
+ // Another caller updated the failing prepared model.
+ if (mPreparedModel.get() != failingPreparedModel) {
+ return mPreparedModel;
+ }
+
+ mPreparedModel = NN_TRY(kMakePreparedModel());
return mPreparedModel;
}
@@ -65,7 +105,11 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
ResilientPreparedModel::execute(const nn::Request& request, nn::MeasureTiming measure,
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const {
- return getPreparedModel()->execute(request, measure, deadline, loopTimeoutDuration);
+ const auto fn = [&request, measure, &deadline,
+ &loopTimeoutDuration](const nn::IPreparedModel& preparedModel) {
+ return preparedModel.execute(request, measure, deadline, loopTimeoutDuration);
+ };
+ return protect(*this, fn);
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
@@ -75,12 +119,43 @@ ResilientPreparedModel::executeFenced(const nn::Request& request,
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration,
const nn::OptionalDuration& timeoutDurationAfterFence) const {
- return getPreparedModel()->executeFenced(request, waitFor, measure, deadline,
- loopTimeoutDuration, timeoutDurationAfterFence);
+ const auto fn = [&request, &waitFor, measure, &deadline, &loopTimeoutDuration,
+ &timeoutDurationAfterFence](const nn::IPreparedModel& preparedModel) {
+ return preparedModel.executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
+ timeoutDurationAfterFence);
+ };
+ return protect(*this, fn);
+}
+
+nn::GeneralResult<nn::SharedBurst> ResilientPreparedModel::configureExecutionBurst() const {
+#if 0
+ auto self = shared_from_this();
+ ResilientBurst::Factory makeBurst =
+ [preparedModel = std::move(self)]() -> nn::GeneralResult<nn::SharedBurst> {
+ return preparedModel->configureExecutionBurst();
+ };
+ return ResilientBurst::create(std::move(makeBurst));
+#else
+ return configureExecutionBurstInternal();
+#endif
}
std::any ResilientPreparedModel::getUnderlyingResource() const {
return getPreparedModel()->getUnderlyingResource();
}
+bool ResilientPreparedModel::isValidInternal() const {
+ return true;
+}
+
+nn::GeneralResult<nn::SharedBurst> ResilientPreparedModel::configureExecutionBurstInternal() const {
+ if (!isValidInternal()) {
+ return std::make_shared<const InvalidBurst>();
+ }
+ const auto fn = [](const nn::IPreparedModel& preparedModel) {
+ return preparedModel.configureExecutionBurst();
+ };
+ return protect(*this, fn);
+}
+
} // namespace android::hardware::neuralnetworks::utils