diff options
author | android-build-team Robot <android-build-team-robot@google.com> | 2019-10-21 06:14:03 +0000 |
---|---|---|
committer | android-build-team Robot <android-build-team-robot@google.com> | 2019-10-21 06:14:03 +0000 |
commit | 6e4056ca9599bc29a99f6aed2ad1f79f263c28ac (patch) | |
tree | 093daa9a0b65a13cacd118dd7b7ff30d37abcdd7 | |
parent | 586aead5271e37734fe7e56f042a14e459711350 (diff) | |
parent | 51146452ac287412715d45e2ab59e826e45b0328 (diff) |
Snap for 5954522 from 51146452ac287412715d45e2ab59e826e45b0328 to rvc-release
Change-Id: I448210dbd293262eb2ab0efa7dc2c37171bb5f44
-rw-r--r-- | benchmarks/Android.bp | 1 | ||||
-rw-r--r-- | benchmarks/dlfcn_benchmark.cpp | 39 | ||||
-rw-r--r-- | docs/native_allocator.md | 13 | ||||
-rw-r--r-- | linker/linker.cpp | 172 | ||||
-rw-r--r-- | linker/linker_relocs.h | 10 |
5 files changed, 77 insertions, 158 deletions
diff --git a/benchmarks/Android.bp b/benchmarks/Android.bp index 70f7bab12..a7be96553 100644 --- a/benchmarks/Android.bp +++ b/benchmarks/Android.bp @@ -29,6 +29,7 @@ cc_defaults { "bionic_benchmarks.cpp", "atomic_benchmark.cpp", "ctype_benchmark.cpp", + "dlfcn_benchmark.cpp", "get_heap_size_benchmark.cpp", "inttypes_benchmark.cpp", "malloc_benchmark.cpp", diff --git a/benchmarks/dlfcn_benchmark.cpp b/benchmarks/dlfcn_benchmark.cpp new file mode 100644 index 000000000..6a2bb575d --- /dev/null +++ b/benchmarks/dlfcn_benchmark.cpp @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <android-base/strings.h> +#include <benchmark/benchmark.h> +#include <dlfcn.h> + +#include "util.h" + +void local_function() {} + +template<typename F> +int bm_dladdr(F fun) { + const void* addr = reinterpret_cast<void*>(fun); + Dl_info info; + int res = dladdr(addr, &info); + if (res == 0) abort(); + if (info.dli_fname == nullptr) abort(); + + // needed for DoNotOptimize + return res; +} +BIONIC_TRIVIAL_BENCHMARK(BM_dladdr_libc_printf, bm_dladdr(printf)); +BIONIC_TRIVIAL_BENCHMARK(BM_dladdr_libdl_dladdr, bm_dladdr(dladdr)); +BIONIC_TRIVIAL_BENCHMARK(BM_dladdr_local_function, bm_dladdr(local_function)); +BIONIC_TRIVIAL_BENCHMARK(BM_dladdr_libbase_split, bm_dladdr(android::base::Split)); diff --git a/docs/native_allocator.md b/docs/native_allocator.md index 82a98fe48..76954700f 100644 --- a/docs/native_allocator.md +++ b/docs/native_allocator.md @@ -263,21 +263,22 @@ so it is not possible to create a completely accurate replay. To generate these traces, see the [Malloc Debug documentation](https://android.googlesource.com/platform/bionic/+/master/libc/malloc_debug/README.md), the option [record\_allocs](https://android.googlesource.com/platform/bionic/+/master/libc/malloc_debug/README.md#record_allocs_total_entries). -To run these benchmarks, first copy the trace files to the target and -unzip them using these commands: +To run these benchmarks, first copy the trace files to the target using +these commands: adb shell push system/extras/traces /data/local/tmp - adb shell 'cd /data/local/tmp/traces && for name in *.zip; do unzip $name; done' Since all of the traces come from applications, the `memory_replay` program will always call `mallopt(M_DECAY_TIME, 1)' before running the trace. Run the benchmark thusly: - adb shell memory_replay64 /data/local/tmp/traces/XXX.txt - adb shell memory_replay32 /data/local/tmp/traces/XXX.txt + adb shell memory_replay64 /data/local/tmp/traces/XXX.zip + adb shell memory_replay32 /data/local/tmp/traces/XXX.zip -Where XXX.txt is the name of a trace file. +Where XXX.zip is the name of a zipped trace file. The `memory_replay` +program also can process text files, but all trace files are currently +checked in as zip files. Every 100000 allocation operations, a dump of the RSS and VA space will be performed. At the end, a final RSS and VA space number will be printed. diff --git a/linker/linker.cpp b/linker/linker.cpp index 37a318916..dec575baf 100644 --- a/linker/linker.cpp +++ b/linker/linker.cpp @@ -2931,8 +2931,11 @@ static ElfW(Addr) get_addend(ElfW(Rela)* rela, ElfW(Addr) reloc_addr __unused) { } #else static ElfW(Addr) get_addend(ElfW(Rel)* rel, ElfW(Addr) reloc_addr) { + // The i386 psABI specifies that R_386_GLOB_DAT doesn't have an addend. The ARM ELF ABI document + // (IHI0044F) specifies that R_ARM_GLOB_DAT has an addend, but Bionic isn't adding it. if (ELFW(R_TYPE)(rel->r_info) == R_GENERIC_RELATIVE || ELFW(R_TYPE)(rel->r_info) == R_GENERIC_IRELATIVE || + ELFW(R_TYPE)(rel->r_info) == R_GENERIC_ABSOLUTE || ELFW(R_TYPE)(rel->r_info) == R_GENERIC_TLS_DTPREL || ELFW(R_TYPE)(rel->r_info) == R_GENERIC_TLS_TPREL) { return *reinterpret_cast<ElfW(Addr)*>(reloc_addr); @@ -3056,6 +3059,7 @@ bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& r switch (type) { case R_GENERIC_JUMP_SLOT: + case R_GENERIC_ABSOLUTE: case R_GENERIC_GLOB_DAT: case R_GENERIC_RELATIVE: case R_GENERIC_IRELATIVE: @@ -3063,17 +3067,8 @@ bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& r case R_GENERIC_TLS_DTPREL: case R_GENERIC_TLS_TPREL: case R_GENERIC_TLSDESC: -#if defined(__aarch64__) - case R_AARCH64_ABS64: - case R_AARCH64_ABS32: - case R_AARCH64_ABS16: -#elif defined(__x86_64__) +#if defined(__x86_64__) case R_X86_64_32: - case R_X86_64_64: -#elif defined(__arm__) - case R_ARM_ABS32: -#elif defined(__i386__) - case R_386_32: #endif /* * The sym_addr was initialized to be zero above, or the relocation @@ -3153,10 +3148,11 @@ bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& r *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + addend); break; + case R_GENERIC_ABSOLUTE: case R_GENERIC_GLOB_DAT: count_relocation(kRelocAbsolute); MARK(rel->r_offset); - TRACE_TYPE(RELO, "RELO GLOB_DAT %16p <- %16p %s\n", + TRACE_TYPE(RELO, "RELO ABSOLUTE/GLOB_DAT %16p <- %16p %s\n", reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(sym_addr + addend), sym_name); *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + addend); @@ -3202,6 +3198,17 @@ bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& r *reinterpret_cast<ElfW(Addr)*>(reloc) = ifunc_addr; } break; + case R_GENERIC_COPY: + // Copy relocations allow read-only data or code in a non-PIE executable to access a + // variable from a DSO. The executable reserves extra space in its .bss section, and the + // linker copies the variable into the extra space. The executable then exports its copy + // to interpose the copy in the DSO. + // + // Bionic only supports PIE executables, so copy relocations aren't supported. The ARM and + // AArch64 ABI documents only allow them for ET_EXEC (non-PIE) objects. See IHI0056B and + // IHI0044F. + DL_ERR("%s COPY relocations are not supported", get_realpath()); + return false; case R_GENERIC_TLS_TPREL: count_relocation(kRelocRelative); MARK(rel->r_offset); @@ -3296,121 +3303,14 @@ bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& r break; #endif // defined(__aarch64__) -#if defined(__aarch64__) - case R_AARCH64_ABS64: - count_relocation(kRelocAbsolute); - MARK(rel->r_offset); - TRACE_TYPE(RELO, "RELO ABS64 %16llx <- %16llx %s\n", - reloc, sym_addr + addend, sym_name); - *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + addend; - break; - case R_AARCH64_ABS32: - count_relocation(kRelocAbsolute); - MARK(rel->r_offset); - TRACE_TYPE(RELO, "RELO ABS32 %16llx <- %16llx %s\n", - reloc, sym_addr + addend, sym_name); - { - const ElfW(Addr) min_value = static_cast<ElfW(Addr)>(INT32_MIN); - const ElfW(Addr) max_value = static_cast<ElfW(Addr)>(UINT32_MAX); - if ((min_value <= (sym_addr + addend)) && - ((sym_addr + addend) <= max_value)) { - *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + addend; - } else { - DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx", - sym_addr + addend, min_value, max_value); - return false; - } - } - break; - case R_AARCH64_ABS16: - count_relocation(kRelocAbsolute); - MARK(rel->r_offset); - TRACE_TYPE(RELO, "RELO ABS16 %16llx <- %16llx %s\n", - reloc, sym_addr + addend, sym_name); - { - const ElfW(Addr) min_value = static_cast<ElfW(Addr)>(INT16_MIN); - const ElfW(Addr) max_value = static_cast<ElfW(Addr)>(UINT16_MAX); - if ((min_value <= (sym_addr + addend)) && - ((sym_addr + addend) <= max_value)) { - *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + addend); - } else { - DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx", - sym_addr + addend, min_value, max_value); - return false; - } - } - break; - case R_AARCH64_PREL64: - count_relocation(kRelocRelative); - MARK(rel->r_offset); - TRACE_TYPE(RELO, "RELO REL64 %16llx <- %16llx - %16llx %s\n", - reloc, sym_addr + addend, rel->r_offset, sym_name); - *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + addend - rel->r_offset; - break; - case R_AARCH64_PREL32: - count_relocation(kRelocRelative); - MARK(rel->r_offset); - TRACE_TYPE(RELO, "RELO REL32 %16llx <- %16llx - %16llx %s\n", - reloc, sym_addr + addend, rel->r_offset, sym_name); - { - const ElfW(Addr) min_value = static_cast<ElfW(Addr)>(INT32_MIN); - const ElfW(Addr) max_value = static_cast<ElfW(Addr)>(UINT32_MAX); - if ((min_value <= (sym_addr + addend - rel->r_offset)) && - ((sym_addr + addend - rel->r_offset) <= max_value)) { - *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + addend - rel->r_offset; - } else { - DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx", - sym_addr + addend - rel->r_offset, min_value, max_value); - return false; - } - } - break; - case R_AARCH64_PREL16: - count_relocation(kRelocRelative); - MARK(rel->r_offset); - TRACE_TYPE(RELO, "RELO REL16 %16llx <- %16llx - %16llx %s\n", - reloc, sym_addr + addend, rel->r_offset, sym_name); - { - const ElfW(Addr) min_value = static_cast<ElfW(Addr)>(INT16_MIN); - const ElfW(Addr) max_value = static_cast<ElfW(Addr)>(UINT16_MAX); - if ((min_value <= (sym_addr + addend - rel->r_offset)) && - ((sym_addr + addend - rel->r_offset) <= max_value)) { - *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + addend - rel->r_offset; - } else { - DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx", - sym_addr + addend - rel->r_offset, min_value, max_value); - return false; - } - } - break; - - case R_AARCH64_COPY: - /* - * ET_EXEC is not supported so this should not happen. - * - * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056b/IHI0056B_aaelf64.pdf - * - * Section 4.6.11 "Dynamic relocations" - * R_AARCH64_COPY may only appear in executable objects where e_type is - * set to ET_EXEC. - */ - DL_ERR("%s R_AARCH64_COPY relocations are not supported", get_realpath()); - return false; -#elif defined(__x86_64__) +#if defined(__x86_64__) case R_X86_64_32: - count_relocation(kRelocRelative); + count_relocation(kRelocAbsolute); MARK(rel->r_offset); TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc), static_cast<size_t>(sym_addr), sym_name); *reinterpret_cast<Elf32_Addr*>(reloc) = sym_addr + addend; break; - case R_X86_64_64: - count_relocation(kRelocRelative); - MARK(rel->r_offset); - TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc), - static_cast<size_t>(sym_addr), sym_name); - *reinterpret_cast<Elf64_Addr*>(reloc) = sym_addr + addend; - break; case R_X86_64_PC32: count_relocation(kRelocRelative); MARK(rel->r_offset); @@ -3419,39 +3319,7 @@ bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& r static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name); *reinterpret_cast<Elf32_Addr*>(reloc) = sym_addr + addend - reloc; break; -#elif defined(__arm__) - case R_ARM_ABS32: - count_relocation(kRelocAbsolute); - MARK(rel->r_offset); - TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name); - *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr; - break; - case R_ARM_REL32: - count_relocation(kRelocRelative); - MARK(rel->r_offset); - TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s", - reloc, sym_addr, rel->r_offset, sym_name); - *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset; - break; - case R_ARM_COPY: - /* - * ET_EXEC is not supported so this should not happen. - * - * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf - * - * Section 4.6.1.10 "Dynamic relocations" - * R_ARM_COPY may only appear in executable objects where e_type is - * set to ET_EXEC. - */ - DL_ERR("%s R_ARM_COPY relocations are not supported", get_realpath()); - return false; #elif defined(__i386__) - case R_386_32: - count_relocation(kRelocRelative); - MARK(rel->r_offset); - TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name); - *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr; - break; case R_386_PC32: count_relocation(kRelocRelative); MARK(rel->r_offset); diff --git a/linker/linker_relocs.h b/linker/linker_relocs.h index 1da5ebe34..93d899e0f 100644 --- a/linker/linker_relocs.h +++ b/linker/linker_relocs.h @@ -35,9 +35,12 @@ #if defined (__aarch64__) #define R_GENERIC_JUMP_SLOT R_AARCH64_JUMP_SLOT +// R_AARCH64_ABS64 is classified as a static relocation but it is common in DSOs. +#define R_GENERIC_ABSOLUTE R_AARCH64_ABS64 #define R_GENERIC_GLOB_DAT R_AARCH64_GLOB_DAT #define R_GENERIC_RELATIVE R_AARCH64_RELATIVE #define R_GENERIC_IRELATIVE R_AARCH64_IRELATIVE +#define R_GENERIC_COPY R_AARCH64_COPY #define R_GENERIC_TLS_DTPMOD R_AARCH64_TLS_DTPMOD #define R_GENERIC_TLS_DTPREL R_AARCH64_TLS_DTPREL #define R_GENERIC_TLS_TPREL R_AARCH64_TLS_TPREL @@ -46,9 +49,12 @@ #elif defined (__arm__) #define R_GENERIC_JUMP_SLOT R_ARM_JUMP_SLOT +// R_ARM_ABS32 is classified as a static relocation but it is common in DSOs. +#define R_GENERIC_ABSOLUTE R_ARM_ABS32 #define R_GENERIC_GLOB_DAT R_ARM_GLOB_DAT #define R_GENERIC_RELATIVE R_ARM_RELATIVE #define R_GENERIC_IRELATIVE R_ARM_IRELATIVE +#define R_GENERIC_COPY R_ARM_COPY #define R_GENERIC_TLS_DTPMOD R_ARM_TLS_DTPMOD32 #define R_GENERIC_TLS_DTPREL R_ARM_TLS_DTPOFF32 #define R_GENERIC_TLS_TPREL R_ARM_TLS_TPOFF32 @@ -57,9 +63,11 @@ #elif defined (__i386__) #define R_GENERIC_JUMP_SLOT R_386_JMP_SLOT +#define R_GENERIC_ABSOLUTE R_386_32 #define R_GENERIC_GLOB_DAT R_386_GLOB_DAT #define R_GENERIC_RELATIVE R_386_RELATIVE #define R_GENERIC_IRELATIVE R_386_IRELATIVE +#define R_GENERIC_COPY R_386_COPY #define R_GENERIC_TLS_DTPMOD R_386_TLS_DTPMOD32 #define R_GENERIC_TLS_DTPREL R_386_TLS_DTPOFF32 #define R_GENERIC_TLS_TPREL R_386_TLS_TPOFF @@ -68,9 +76,11 @@ #elif defined (__x86_64__) #define R_GENERIC_JUMP_SLOT R_X86_64_JUMP_SLOT +#define R_GENERIC_ABSOLUTE R_X86_64_64 #define R_GENERIC_GLOB_DAT R_X86_64_GLOB_DAT #define R_GENERIC_RELATIVE R_X86_64_RELATIVE #define R_GENERIC_IRELATIVE R_X86_64_IRELATIVE +#define R_GENERIC_COPY R_X86_64_COPY #define R_GENERIC_TLS_DTPMOD R_X86_64_DTPMOD64 #define R_GENERIC_TLS_DTPREL R_X86_64_DTPOFF64 #define R_GENERIC_TLS_TPREL R_X86_64_TPOFF64 |