diff options
author | Lev Proleev <levp@google.com> | 2021-01-26 19:40:20 +0000 |
---|---|---|
committer | Xusong Wang <xusongw@google.com> | 2021-03-16 11:57:04 -0700 |
commit | 900c28a250297964cf9aa65653b9adce4e6db27a (patch) | |
tree | a2fe246f0c84e9a676a26edc892eeaefa8f06b2c /neuralnetworks/aidl/utils/test/PreparedModelTest.cpp | |
parent | 7b0b54697cf9d9f9afbf4d4c97bd5292057c732e (diff) |
Add canonical types adapters for NNAPI AIDL interface
Also:
* Add missing AIDL<->CT conversions
* Add AIDL-specific info to neuralnetworks/utils/README.md
* Add mock classes and tests AIDL adapters
Bug: 179015258
Test: neuralnetworks_utils_hal_test
Change-Id: Ifa98fadd46dca5dbf9b3ceb4da811aa8da45b6e4
Merged-In: Ifa98fadd46dca5dbf9b3ceb4da811aa8da45b6e4
(cherry picked from commit 3b93b0b56a4f5128eaa942d804dd490317c0abcb)
Diffstat (limited to 'neuralnetworks/aidl/utils/test/PreparedModelTest.cpp')
-rw-r--r-- | neuralnetworks/aidl/utils/test/PreparedModelTest.cpp | 272 |
1 files changed, 272 insertions, 0 deletions
diff --git a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp new file mode 100644 index 0000000000..7e28861054 --- /dev/null +++ b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp @@ -0,0 +1,272 @@ +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "MockFencedExecutionCallback.h" +#include "MockPreparedModel.h" + +#include <aidl/android/hardware/neuralnetworks/IFencedExecutionCallback.h> +#include <gmock/gmock.h> +#include <gtest/gtest.h> +#include <nnapi/IPreparedModel.h> +#include <nnapi/TypeUtils.h> +#include <nnapi/Types.h> +#include <nnapi/hal/aidl/PreparedModel.h> + +#include <functional> +#include <memory> + +namespace aidl::android::hardware::neuralnetworks::utils { +namespace { + +using ::testing::_; +using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::InvokeWithoutArgs; +using ::testing::SetArgPointee; + +const std::shared_ptr<IPreparedModel> kInvalidPreparedModel; +constexpr auto kNoTiming = Timing{.timeOnDevice = -1, .timeInDriver = -1}; + +constexpr auto makeStatusOk = [] { return ndk::ScopedAStatus::ok(); }; + +constexpr auto makeGeneralFailure = [] { + return ndk::ScopedAStatus::fromServiceSpecificError( + static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE)); +}; +constexpr auto makeGeneralTransportFailure = [] { + return ndk::ScopedAStatus::fromStatus(STATUS_NO_MEMORY); +}; +constexpr auto makeDeadObjectFailure = [] { + return ndk::ScopedAStatus::fromStatus(STATUS_DEAD_OBJECT); +}; + +auto makeFencedExecutionResult(const std::shared_ptr<MockFencedExecutionCallback>& callback) { + return [callback](const Request& /*request*/, + const std::vector<ndk::ScopedFileDescriptor>& /*waitFor*/, + bool /*measureTiming*/, int64_t /*deadline*/, int64_t /*loopTimeoutDuration*/, + int64_t /*duration*/, FencedExecutionResult* fencedExecutionResult) { + *fencedExecutionResult = FencedExecutionResult{.callback = callback, + .syncFence = ndk::ScopedFileDescriptor(-1)}; + return ndk::ScopedAStatus::ok(); + }; +} + +} // namespace + +TEST(PreparedModelTest, invalidPreparedModel) { + // run test + const auto result = PreparedModel::create(kInvalidPreparedModel); + + // verify result + ASSERT_FALSE(result.has_value()); + EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE); +} + +TEST(PreparedModelTest, executeSync) { + // setup call + const auto mockPreparedModel = MockPreparedModel::create(); + const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); + const auto mockExecutionResult = ExecutionResult{ + .outputSufficientSize = true, + .outputShapes = {}, + .timing = kNoTiming, + }; + EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<4>(mockExecutionResult), InvokeWithoutArgs(makeStatusOk))); + + // run test + const auto result = preparedModel->execute({}, {}, {}, {}); + + // verify result + EXPECT_TRUE(result.has_value()) + << "Failed with " << result.error().code << ": " << result.error().message; +} + +TEST(PreparedModelTest, executeSyncError) { + // setup test + const auto mockPreparedModel = MockPreparedModel::create(); + const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); + EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(makeGeneralFailure)); + + // run test + const auto result = preparedModel->execute({}, {}, {}, {}); + + // verify result + ASSERT_FALSE(result.has_value()); + EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE); +} + +TEST(PreparedModelTest, executeSyncTransportFailure) { + // setup test + const auto mockPreparedModel = MockPreparedModel::create(); + const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); + EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto result = preparedModel->execute({}, {}, {}, {}); + + // verify result + ASSERT_FALSE(result.has_value()); + EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE); +} + +TEST(PreparedModelTest, executeSyncDeadObject) { + // setup test + const auto mockPreparedModel = MockPreparedModel::create(); + const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); + EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); + + // run test + const auto result = preparedModel->execute({}, {}, {}, {}); + + // verify result + ASSERT_FALSE(result.has_value()); + EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT); +} + +TEST(PreparedModelTest, executeFenced) { + // setup call + const auto mockPreparedModel = MockPreparedModel::create(); + const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); + const auto mockCallback = MockFencedExecutionCallback::create(); + EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming), + SetArgPointee<2>(ErrorStatus::NONE), Invoke(makeStatusOk))); + EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(makeFencedExecutionResult(mockCallback))); + + // run test + const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify result + ASSERT_TRUE(result.has_value()) + << "Failed with " << result.error().code << ": " << result.error().message; + const auto& [syncFence, callback] = result.value(); + EXPECT_EQ(syncFence.syncWait({}), nn::SyncFence::FenceState::SIGNALED); + ASSERT_NE(callback, nullptr); + + // get results from callback + const auto callbackResult = callback(); + ASSERT_TRUE(callbackResult.has_value()) << "Failed with " << callbackResult.error().code << ": " + << callbackResult.error().message; +} + +TEST(PreparedModelTest, executeFencedCallbackError) { + // setup call + const auto mockPreparedModel = MockPreparedModel::create(); + const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); + const auto mockCallback = MockFencedExecutionCallback::create(); + EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _)) + .Times(1) + .WillOnce(Invoke(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming), + SetArgPointee<2>(ErrorStatus::GENERAL_FAILURE), + Invoke(makeStatusOk)))); + EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(makeFencedExecutionResult(mockCallback))); + + // run test + const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify result + ASSERT_TRUE(result.has_value()) + << "Failed with " << result.error().code << ": " << result.error().message; + const auto& [syncFence, callback] = result.value(); + EXPECT_NE(syncFence.syncWait({}), nn::SyncFence::FenceState::ACTIVE); + ASSERT_NE(callback, nullptr); + + // verify callback failure + const auto callbackResult = callback(); + ASSERT_FALSE(callbackResult.has_value()); + EXPECT_EQ(callbackResult.error().code, nn::ErrorStatus::GENERAL_FAILURE); +} + +TEST(PreparedModelTest, executeFencedError) { + // setup test + const auto mockPreparedModel = MockPreparedModel::create(); + const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); + EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralFailure)); + + // run test + const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify result + ASSERT_FALSE(result.has_value()); + EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE); +} + +TEST(PreparedModelTest, executeFencedTransportFailure) { + // setup test + const auto mockPreparedModel = MockPreparedModel::create(); + const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); + EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify result + ASSERT_FALSE(result.has_value()); + EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE); +} + +TEST(PreparedModelTest, executeFencedDeadObject) { + // setup test + const auto mockPreparedModel = MockPreparedModel::create(); + const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); + EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); + + // run test + const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify result + ASSERT_FALSE(result.has_value()); + EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT); +} + +// TODO: test burst execution if/when it is added to nn::IPreparedModel. + +TEST(PreparedModelTest, getUnderlyingResource) { + // setup test + const auto mockPreparedModel = MockPreparedModel::create(); + const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); + + // run test + const auto resource = preparedModel->getUnderlyingResource(); + + // verify resource + const std::shared_ptr<IPreparedModel>* maybeMock = + std::any_cast<std::shared_ptr<IPreparedModel>>(&resource); + ASSERT_NE(maybeMock, nullptr); + EXPECT_EQ(maybeMock->get(), mockPreparedModel.get()); +} + +} // namespace aidl::android::hardware::neuralnetworks::utils |