summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp9
1 files changed, 6 insertions, 3 deletions
diff --git a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
index 40f6cd1573..dcf8451919 100644
--- a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
@@ -659,6 +659,7 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
ASSERT_NE(nullptr, burst.get());
// associate a unique slot with each memory pool
+ constexpr int64_t kIgnoreSlot = -1;
int64_t currentSlot = 0;
std::vector<int64_t> slots;
slots.reserve(request.pools.size());
@@ -667,7 +668,7 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
slots.push_back(currentSlot++);
} else {
EXPECT_EQ(pool.getTag(), RequestMemoryPool::Tag::token);
- slots.push_back(-1);
+ slots.push_back(kIgnoreSlot);
}
}
@@ -698,8 +699,10 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
// Mark each slot as unused after the execution. This is unnecessary because the
// burst is freed after this scope ends, but this is here to test the functionality.
for (int64_t slot : slots) {
- ret = burst->releaseMemoryResource(slot);
- ASSERT_TRUE(ret.isOk()) << ret.getDescription();
+ if (slot != kIgnoreSlot) {
+ ret = burst->releaseMemoryResource(slot);
+ ASSERT_TRUE(ret.isOk()) << ret.getDescription();
+ }
}
break;