summaryrefslogtreecommitdiff
path: root/benchmarks/stdlib_benchmark.cpp
diff options
context:
space:
mode:
authorElliott Hughes <enh@google.com>2020-10-22 13:43:59 -0700
committerElliott Hughes <enh@google.com>2020-10-22 13:43:59 -0700
commite8693e78711e8f45ccd2b610e4dbe0b94d551cc9 (patch)
tree5b7a67244a6a122e27fca20f84d0a4f78d487fe2 /benchmarks/stdlib_benchmark.cpp
parent9aa6b15d799ac246e842552fca555920a93ce46b (diff)
Make more use of benchmark::DoNotOptimize in benchmarks.
A lot of these benchmarks predate DoNotOptimize and rolled their own hacks. Bug: http://b/148307629 Test: ran benchmarks before & after and got similar results Change-Id: If44699d261b687f6253af709edda58f4c90fb285
Diffstat (limited to 'benchmarks/stdlib_benchmark.cpp')
-rw-r--r--benchmarks/stdlib_benchmark.cpp3
1 files changed, 1 insertions, 2 deletions
diff --git a/benchmarks/stdlib_benchmark.cpp b/benchmarks/stdlib_benchmark.cpp
index 45b953f61..b6ea58db3 100644
--- a/benchmarks/stdlib_benchmark.cpp
+++ b/benchmarks/stdlib_benchmark.cpp
@@ -189,9 +189,8 @@ static void BM_stdlib_mbstowcs(benchmark::State& state) {
buf[l++] = i, buf[l++] = j, buf[l++] = 0x80, buf[l++] = k;
buf[l++] = 0;
- volatile size_t c __attribute__((unused)) = 0;
for (auto _ : state) {
- c = mbstowcs(widebuf_aligned, buf_aligned, 500000);
+ benchmark::DoNotOptimize(mbstowcs(widebuf_aligned, buf_aligned, 500000));
}
state.SetBytesProcessed(uint64_t(state.iterations()) * uint64_t(500000));