summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Butler <butlermichael@google.com>2020-11-22 20:25:34 -0800
committerMichael Butler <butlermichael@google.com>2020-12-07 22:58:39 +0000
commit61f508e018cff89c93a7e08b3dbde1eb6a2155df (patch)
treea2f8548dad0e6895417427fcfca614ccf2d857c5
parentcb3498d91b3a666b852850781538f8f228a549db (diff)
Cleanup how transport errors are handled in NN utils
Prior to this change, whenever the NN utility code encountered a HIDL transport error, the error message would display the file and line number of the "handleTransportError" function itself. This change introduces a new macro "HANDLE_TRANSPORT_FAILURE" that handles the transport error in a similar way but now the error message displays the file and line number of where the macro is called. Bug: N/A Test: mma Change-Id: I35b34f8f5be52b7fcff0fbb58a37ab2b8c7dd8bb
-rw-r--r--neuralnetworks/1.0/utils/src/Device.cpp9
-rw-r--r--neuralnetworks/1.0/utils/src/PreparedModel.cpp3
-rw-r--r--neuralnetworks/1.1/utils/src/Device.cpp9
-rw-r--r--neuralnetworks/1.2/utils/src/Device.cpp19
-rw-r--r--neuralnetworks/1.2/utils/src/PreparedModel.cpp5
-rw-r--r--neuralnetworks/1.3/utils/src/Buffer.cpp4
-rw-r--r--neuralnetworks/1.3/utils/src/Device.cpp13
-rw-r--r--neuralnetworks/1.3/utils/src/PreparedModel.cpp9
-rw-r--r--neuralnetworks/utils/common/include/nnapi/hal/HandleError.h77
-rw-r--r--neuralnetworks/utils/common/src/ProtectCallback.cpp2
10 files changed, 66 insertions, 84 deletions
diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp
index ab3f5afb23..285c515c20 100644
--- a/neuralnetworks/1.0/utils/src/Device.cpp
+++ b/neuralnetworks/1.0/utils/src/Device.cpp
@@ -56,7 +56,7 @@ nn::GeneralResult<nn::Capabilities> initCapabilities(V1_0::IDevice* device) {
};
const auto ret = device->getCapabilities(cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -119,7 +119,8 @@ std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const {
nn::GeneralResult<void> Device::wait() const {
const auto ret = kDevice->ping();
- return hal::utils::handleTransportError(ret);
+ HANDLE_TRANSPORT_FAILURE(ret);
+ return {};
}
nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Model& model) const {
@@ -148,7 +149,7 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo
};
const auto ret = kDevice->getSupportedOperations(hidlModel, cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -168,7 +169,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const auto scoped = kDeathHandler.protectCallback(cb.get());
const auto ret = kDevice->prepareModel(hidlModel, cb);
- const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+ const auto status = HANDLE_TRANSPORT_FAILURE(ret);
if (status != ErrorStatus::NONE) {
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
return NN_ERROR(canonical) << "prepareModel failed with " << toString(status);
diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
index 80f885a64e..46dd3f8254 100644
--- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
@@ -67,8 +67,7 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare
const auto scoped = kDeathHandler.protectCallback(cb.get());
const auto ret = kPreparedModel->execute(hidlRequest, cb);
- const auto status =
- NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
+ const auto status = HANDLE_TRANSPORT_FAILURE(ret);
if (status != ErrorStatus::NONE) {
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
return NN_ERROR(canonical) << "execute failed with " << toString(status);
diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp
index e45b17ec6e..f73d3f8253 100644
--- a/neuralnetworks/1.1/utils/src/Device.cpp
+++ b/neuralnetworks/1.1/utils/src/Device.cpp
@@ -57,7 +57,7 @@ nn::GeneralResult<nn::Capabilities> initCapabilities(V1_1::IDevice* device) {
};
const auto ret = device->getCapabilities_1_1(cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -120,7 +120,8 @@ std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const {
nn::GeneralResult<void> Device::wait() const {
const auto ret = kDevice->ping();
- return hal::utils::handleTransportError(ret);
+ HANDLE_TRANSPORT_FAILURE(ret);
+ return {};
}
nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Model& model) const {
@@ -150,7 +151,7 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo
};
const auto ret = kDevice->getSupportedOperations_1_1(hidlModel, cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -171,7 +172,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const auto scoped = kDeathHandler.protectCallback(cb.get());
const auto ret = kDevice->prepareModel_1_1(hidlModel, hidlPreference, cb);
- const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+ const auto status = HANDLE_TRANSPORT_FAILURE(ret);
if (status != V1_0::ErrorStatus::NONE) {
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
return NN_ERROR(canonical) << "prepareModel failed with " << toString(status);
diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp
index 967a252c88..0061065f0b 100644
--- a/neuralnetworks/1.2/utils/src/Device.cpp
+++ b/neuralnetworks/1.2/utils/src/Device.cpp
@@ -59,7 +59,7 @@ nn::GeneralResult<nn::Capabilities> initCapabilities(V1_2::IDevice* device) {
};
const auto ret = device->getCapabilities_1_2(cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -81,7 +81,7 @@ nn::GeneralResult<std::string> initVersionString(V1_2::IDevice* device) {
};
const auto ret = device->getVersionString(cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -101,7 +101,7 @@ nn::GeneralResult<nn::DeviceType> initDeviceType(V1_2::IDevice* device) {
};
const auto ret = device->getType(cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -121,7 +121,7 @@ nn::GeneralResult<std::vector<nn::Extension>> initExtensions(V1_2::IDevice* devi
};
const auto ret = device->getSupportedExtensions(cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -144,7 +144,7 @@ nn::GeneralResult<std::pair<uint32_t, uint32_t>> initNumberOfCacheFilesNeeded(
};
const auto ret = device->getNumberOfCacheFilesNeeded(cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -217,7 +217,8 @@ std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const {
nn::GeneralResult<void> Device::wait() const {
const auto ret = kDevice->ping();
- return hal::utils::handleTransportError(ret);
+ HANDLE_TRANSPORT_FAILURE(ret);
+ return {};
}
nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Model& model) const {
@@ -247,7 +248,7 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo
};
const auto ret = kDevice->getSupportedOperations_1_2(hidlModel, cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -272,7 +273,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const auto ret = kDevice->prepareModel_1_2(hidlModel, hidlPreference, hidlModelCache,
hidlDataCache, hidlToken, cb);
- const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+ const auto status = HANDLE_TRANSPORT_FAILURE(ret);
if (status != V1_0::ErrorStatus::NONE) {
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
return NN_ERROR(canonical) << "prepareModel_1_2 failed with " << toString(status);
@@ -292,7 +293,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
const auto scoped = kDeathHandler.protectCallback(cb.get());
const auto ret = kDevice->prepareModelFromCache(hidlModelCache, hidlDataCache, hidlToken, cb);
- const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+ const auto status = HANDLE_TRANSPORT_FAILURE(ret);
if (status != V1_0::ErrorStatus::NONE) {
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
return NN_ERROR(canonical) << "prepareModelFromCache failed with " << toString(status);
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index b5a3389e6c..dad9a7e74b 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -83,7 +83,7 @@ PreparedModel::executeSynchronously(const V1_0::Request& request, MeasureTiming
};
const auto ret = kPreparedModel->executeSynchronously(request, measure, cb);
- NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -94,8 +94,7 @@ PreparedModel::executeAsynchronously(const V1_0::Request& request, MeasureTiming
const auto scoped = kDeathHandler.protectCallback(cb.get());
const auto ret = kPreparedModel->execute_1_2(request, measure, cb);
- const auto status =
- NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
+ const auto status = HANDLE_TRANSPORT_FAILURE(ret);
if (status != V1_0::ErrorStatus::NONE) {
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
return NN_ERROR(canonical) << "execute failed with " << toString(status);
diff --git a/neuralnetworks/1.3/utils/src/Buffer.cpp b/neuralnetworks/1.3/utils/src/Buffer.cpp
index a880031ad7..ffdeccdf62 100644
--- a/neuralnetworks/1.3/utils/src/Buffer.cpp
+++ b/neuralnetworks/1.3/utils/src/Buffer.cpp
@@ -64,7 +64,7 @@ nn::GeneralResult<void> Buffer::copyTo(const nn::Memory& dst) const {
const auto hidlDst = NN_TRY(convert(dst));
const auto ret = kBuffer->copyTo(hidlDst);
- const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+ const auto status = HANDLE_TRANSPORT_FAILURE(ret);
if (status != ErrorStatus::NONE) {
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
return NN_ERROR(canonical) << "IBuffer::copyTo failed with " << toString(status);
@@ -79,7 +79,7 @@ nn::GeneralResult<void> Buffer::copyFrom(const nn::Memory& src,
const auto hidlDimensions = hidl_vec<uint32_t>(dimensions);
const auto ret = kBuffer->copyFrom(hidlSrc, hidlDimensions);
- const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+ const auto status = HANDLE_TRANSPORT_FAILURE(ret);
if (status != ErrorStatus::NONE) {
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
return NN_ERROR(canonical) << "IBuffer::copyFrom failed with " << toString(status);
diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp
index 7a7e2514ef..82837bac73 100644
--- a/neuralnetworks/1.3/utils/src/Device.cpp
+++ b/neuralnetworks/1.3/utils/src/Device.cpp
@@ -86,7 +86,7 @@ nn::GeneralResult<nn::Capabilities> initCapabilities(V1_3::IDevice* device) {
};
const auto ret = device->getCapabilities_1_3(cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -162,7 +162,8 @@ std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const {
nn::GeneralResult<void> Device::wait() const {
const auto ret = kDevice->ping();
- return hal::utils::handleTransportError(ret);
+ HANDLE_TRANSPORT_FAILURE(ret);
+ return {};
}
nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Model& model) const {
@@ -191,7 +192,7 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo
};
const auto ret = kDevice->getSupportedOperations_1_3(hidlModel, cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -219,7 +220,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const auto ret =
kDevice->prepareModel_1_3(hidlModel, hidlPreference, hidlPriority, hidlDeadline,
hidlModelCache, hidlDataCache, hidlToken, cb);
- const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+ const auto status = HANDLE_TRANSPORT_FAILURE(ret);
if (status != ErrorStatus::NONE) {
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
return NN_ERROR(canonical) << "prepareModel_1_3 failed with " << toString(status);
@@ -241,7 +242,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
const auto ret = kDevice->prepareModelFromCache_1_3(hidlDeadline, hidlModelCache, hidlDataCache,
hidlToken, cb);
- const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+ const auto status = HANDLE_TRANSPORT_FAILURE(ret);
if (status != ErrorStatus::NONE) {
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
return NN_ERROR(canonical) << "prepareModelFromCache_1_3 failed with " << toString(status);
@@ -277,7 +278,7 @@ nn::GeneralResult<nn::SharedBuffer> Device::allocate(
const auto ret =
kDevice->allocate(hidlDesc, hidlPreparedModels, hidlInputRoles, hidlOutputRoles, cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index 5d82110829..49b9b0bcc3 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -89,7 +89,7 @@ convertExecuteFencedResults(const hidl_handle& syncFence,
};
const auto ret = callback->getExecutionInfo(cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
};
@@ -133,7 +133,7 @@ PreparedModel::executeSynchronously(const Request& request, V1_2::MeasureTiming
const auto ret = kPreparedModel->executeSynchronously_1_3(request, measure, deadline,
loopTimeoutDuration, cb);
- NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
+ HANDLE_TRANSPORT_FAILURE(ret);
return result;
}
@@ -147,8 +147,7 @@ PreparedModel::executeAsynchronously(const Request& request, V1_2::MeasureTiming
const auto ret =
kPreparedModel->execute_1_3(request, measure, deadline, loopTimeoutDuration, cb);
- const auto status =
- NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
+ const auto status = HANDLE_TRANSPORT_FAILURE(ret);
if (status != ErrorStatus::NONE) {
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
return NN_ERROR(canonical) << "executeAsynchronously failed with " << toString(status);
@@ -230,7 +229,7 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::S
const auto ret = kPreparedModel->executeFenced(hidlRequest, hidlWaitFor, hidlMeasure,
hidlDeadline, hidlLoopTimeoutDuration,
hidlTimeoutDurationAfterFence, cb);
- NN_TRY(hal::utils::handleTransportError(ret));
+ HANDLE_TRANSPORT_FAILURE(ret);
auto [syncFence, callback] = NN_TRY(std::move(result));
// If executeFenced required the request memory to be moved into shared memory, block here until
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
index e4046b5407..78b2a12918 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
@@ -19,65 +19,46 @@
#include <nnapi/Result.h>
#include <nnapi/Types.h>
+#include <type_traits>
+
namespace android::hardware::neuralnetworks::utils {
template <typename Type>
nn::GeneralResult<Type> handleTransportError(const hardware::Return<Type>& ret) {
if (ret.isDeadObject()) {
- return NN_ERROR(nn::ErrorStatus::DEAD_OBJECT)
- << "Return<>::isDeadObject returned true: " << ret.description();
- }
- if (!ret.isOk()) {
- return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
- << "Return<>::isOk returned false: " << ret.description();
- }
- return ret;
-}
-
-template <>
-inline nn::GeneralResult<void> handleTransportError(const hardware::Return<void>& ret) {
- if (ret.isDeadObject()) {
- return NN_ERROR(nn::ErrorStatus::DEAD_OBJECT)
+ return nn::error(nn::ErrorStatus::DEAD_OBJECT)
<< "Return<>::isDeadObject returned true: " << ret.description();
}
if (!ret.isOk()) {
- return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+ return nn::error(nn::ErrorStatus::GENERAL_FAILURE)
<< "Return<>::isOk returned false: " << ret.description();
}
- return {};
-}
-
-template <typename Type>
-nn::GeneralResult<Type> makeGeneralFailure(nn::Result<Type> result, nn::ErrorStatus status) {
- if (!result.has_value()) {
- return nn::error(status) << std::move(result).error();
+ if constexpr (!std::is_same_v<Type, void>) {
+ return static_cast<Type>(ret);
+ } else {
+ return {};
}
- return std::move(result).value();
}
-template <>
-inline nn::GeneralResult<void> makeGeneralFailure(nn::Result<void> result, nn::ErrorStatus status) {
- if (!result.has_value()) {
- return nn::error(status) << std::move(result).error();
- }
- return {};
-}
+#define HANDLE_TRANSPORT_FAILURE(ret) \
+ ({ \
+ auto result = ::android::hardware::neuralnetworks::utils::handleTransportError(ret); \
+ if (!result.has_value()) { \
+ return NN_ERROR(result.error().code) << result.error().message; \
+ } \
+ std::move(result).value(); \
+ })
template <typename Type>
-nn::ExecutionResult<Type> makeExecutionFailure(nn::Result<Type> result, nn::ErrorStatus status) {
+nn::GeneralResult<Type> makeGeneralFailure(nn::Result<Type> result, nn::ErrorStatus status) {
if (!result.has_value()) {
return nn::error(status) << std::move(result).error();
}
- return std::move(result).value();
-}
-
-template <>
-inline nn::ExecutionResult<void> makeExecutionFailure(nn::Result<void> result,
- nn::ErrorStatus status) {
- if (!result.has_value()) {
- return nn::error(status) << std::move(result).error();
+ if constexpr (!std::is_same_v<Type, void>) {
+ return std::move(result).value();
+ } else {
+ return {};
}
- return {};
}
template <typename Type>
@@ -86,16 +67,16 @@ nn::ExecutionResult<Type> makeExecutionFailure(nn::GeneralResult<Type> result) {
const auto [message, status] = std::move(result).error();
return nn::error(status) << message;
}
- return std::move(result).value();
+ if constexpr (!std::is_same_v<Type, void>) {
+ return std::move(result).value();
+ } else {
+ return {};
+ }
}
-template <>
-inline nn::ExecutionResult<void> makeExecutionFailure(nn::GeneralResult<void> result) {
- if (!result.has_value()) {
- const auto [message, status] = std::move(result).error();
- return nn::error(status) << message;
- }
- return {};
+template <typename Type>
+nn::ExecutionResult<Type> makeExecutionFailure(nn::Result<Type> result, nn::ErrorStatus status) {
+ return makeExecutionFailure(makeGeneralFailure(result, status));
}
} // namespace android::hardware::neuralnetworks::utils \ No newline at end of file
diff --git a/neuralnetworks/utils/common/src/ProtectCallback.cpp b/neuralnetworks/utils/common/src/ProtectCallback.cpp
index 1d9a3074db..abe4cb675e 100644
--- a/neuralnetworks/utils/common/src/ProtectCallback.cpp
+++ b/neuralnetworks/utils/common/src/ProtectCallback.cpp
@@ -58,7 +58,7 @@ nn::GeneralResult<DeathHandler> DeathHandler::create(sp<hidl::base::V1_0::IBase>
auto deathRecipient = sp<DeathRecipient>::make();
const auto ret = object->linkToDeath(deathRecipient, /*cookie=*/0);
- const bool success = NN_TRY(handleTransportError(ret));
+ const bool success = HANDLE_TRANSPORT_FAILURE(ret);
if (!success) {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IBase::linkToDeath returned false";
}