summaryrefslogtreecommitdiff
path: root/libs/androidfw/LoadedArsc.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'libs/androidfw/LoadedArsc.cpp')
-rw-r--r--libs/androidfw/LoadedArsc.cpp493
1 files changed, 289 insertions, 204 deletions
diff --git a/libs/androidfw/LoadedArsc.cpp b/libs/androidfw/LoadedArsc.cpp
index bd7b80469ddc..28548e27baf0 100644
--- a/libs/androidfw/LoadedArsc.cpp
+++ b/libs/androidfw/LoadedArsc.cpp
@@ -18,6 +18,7 @@
#include "androidfw/LoadedArsc.h"
+#include <algorithm>
#include <cstddef>
#include <limits>
@@ -37,7 +38,7 @@
#include "androidfw/ResourceUtils.h"
#include "androidfw/Util.h"
-using android::base::StringPrintf;
+using ::android::base::StringPrintf;
namespace android {
@@ -61,6 +62,10 @@ struct TypeSpec {
// and under which configurations it varies.
const ResTable_typeSpec* type_spec;
+ // Pointer to the mmapped data where the IDMAP mappings for this type
+ // exist. May be nullptr if no IDMAP exists.
+ const IdmapEntry_header* idmap_entries;
+
// The number of types that follow this struct.
// There is a type for each configuration
// that entries are defined for.
@@ -84,7 +89,10 @@ namespace {
// the Type structs.
class TypeSpecPtrBuilder {
public:
- TypeSpecPtrBuilder(const ResTable_typeSpec* header) : header_(header) {}
+ explicit TypeSpecPtrBuilder(const ResTable_typeSpec* header,
+ const IdmapEntry_header* idmap_header)
+ : header_(header), idmap_header_(idmap_header) {
+ }
void AddType(const ResTable_type* type) {
ResTable_config config;
@@ -99,6 +107,7 @@ class TypeSpecPtrBuilder {
}
TypeSpec* type_spec = (TypeSpec*)::malloc(sizeof(TypeSpec) + (types_.size() * sizeof(Type)));
type_spec->type_spec = header_;
+ type_spec->idmap_entries = idmap_header_;
type_spec->type_count = types_.size();
memcpy(type_spec + 1, types_.data(), types_.size() * sizeof(Type));
return TypeSpecPtr(type_spec);
@@ -108,220 +117,232 @@ class TypeSpecPtrBuilder {
DISALLOW_COPY_AND_ASSIGN(TypeSpecPtrBuilder);
const ResTable_typeSpec* header_;
+ const IdmapEntry_header* idmap_header_;
std::vector<Type> types_;
};
} // namespace
-bool LoadedPackage::FindEntry(uint8_t type_idx, uint16_t entry_idx, const ResTable_config& config,
- LoadedArscEntry* out_entry, ResTable_config* out_selected_config,
- uint32_t* out_flags) const {
- ATRACE_CALL();
+LoadedPackage::LoadedPackage() = default;
+LoadedPackage::~LoadedPackage() = default;
- // If the type IDs are offset in this package, we need to take that into account when searching
- // for a type.
- const TypeSpecPtr& ptr = type_specs_[type_idx - type_id_offset_];
- if (ptr == nullptr) {
+// Precondition: The header passed in has already been verified, so reading any fields and trusting
+// the ResChunk_header is safe.
+static bool VerifyResTableType(const ResTable_type* header) {
+ if (header->id == 0) {
+ LOG(ERROR) << "RES_TABLE_TYPE_TYPE has invalid ID 0.";
return false;
}
- // Don't bother checking if the entry ID is larger than
- // the number of entries.
- if (entry_idx >= dtohl(ptr->type_spec->entryCount)) {
+ const size_t entry_count = dtohl(header->entryCount);
+ if (entry_count > std::numeric_limits<uint16_t>::max()) {
+ LOG(ERROR) << "RES_TABLE_TYPE_TYPE has too many entries (" << entry_count << ").";
return false;
}
- const ResTable_config* best_config = nullptr;
- const ResTable_type* best_type = nullptr;
- uint32_t best_offset = 0;
-
- for (uint32_t i = 0; i < ptr->type_count; i++) {
- const Type* type = &ptr->types[i];
-
- if (type->configuration.match(config) &&
- (best_config == nullptr || type->configuration.isBetterThan(*best_config, &config))) {
- // The configuration matches and is better than the previous selection.
- // Find the entry value if it exists for this configuration.
- size_t entry_count = dtohl(type->type->entryCount);
- if (entry_idx < entry_count) {
- const uint32_t* entry_offsets = reinterpret_cast<const uint32_t*>(
- reinterpret_cast<const uint8_t*>(type->type) + dtohs(type->type->header.headerSize));
- const uint32_t offset = dtohl(entry_offsets[entry_idx]);
- if (offset != ResTable_type::NO_ENTRY) {
- // There is an entry for this resource, record it.
- best_config = &type->configuration;
- best_type = type->type;
- best_offset = offset + dtohl(type->type->entriesStart);
- }
- }
- }
- }
+ // Make sure that there is enough room for the entry offsets.
+ const size_t offsets_offset = dtohs(header->header.headerSize);
+ const size_t entries_offset = dtohl(header->entriesStart);
+ const size_t offsets_length = sizeof(uint32_t) * entry_count;
- if (best_type == nullptr) {
+ if (offsets_offset > entries_offset || entries_offset - offsets_offset < offsets_length) {
+ LOG(ERROR) << "RES_TABLE_TYPE_TYPE entry offsets overlap actual entry data.";
return false;
}
- const uint32_t* flags = reinterpret_cast<const uint32_t*>(ptr->type_spec + 1);
- *out_flags = dtohl(flags[entry_idx]);
- *out_selected_config = *best_config;
-
- const ResTable_entry* best_entry = reinterpret_cast<const ResTable_entry*>(
- reinterpret_cast<const uint8_t*>(best_type) + best_offset);
- out_entry->entry = best_entry;
- out_entry->type_string_ref = StringPoolRef(&type_string_pool_, best_type->id - 1);
- out_entry->entry_string_ref = StringPoolRef(&key_string_pool_, dtohl(best_entry->key.index));
- return true;
-}
-
-// The destructor gets generated into arbitrary translation units
-// if left implicit, which causes the compiler to complain about
-// forward declarations and incomplete types.
-LoadedArsc::~LoadedArsc() {}
-
-bool LoadedArsc::FindEntry(uint32_t resid, const ResTable_config& config,
- LoadedArscEntry* out_entry, ResTable_config* out_selected_config,
- uint32_t* out_flags) const {
- ATRACE_CALL();
- const uint8_t package_id = get_package_id(resid);
- const uint8_t type_id = get_type_id(resid);
- const uint16_t entry_id = get_entry_id(resid);
-
- if (type_id == 0) {
- LOG(ERROR) << "Invalid ID 0x" << std::hex << resid << std::dec << ".";
+ if (entries_offset > dtohl(header->header.size)) {
+ LOG(ERROR) << "RES_TABLE_TYPE_TYPE entry offsets extend beyond chunk.";
return false;
}
- for (const auto& loaded_package : packages_) {
- if (loaded_package->package_id_ == package_id) {
- return loaded_package->FindEntry(type_id - 1, entry_id, config, out_entry,
- out_selected_config, out_flags);
- }
+ if (entries_offset & 0x03) {
+ LOG(ERROR) << "RES_TABLE_TYPE_TYPE entries start at unaligned address.";
+ return false;
}
- return false;
+ return true;
}
-const LoadedPackage* LoadedArsc::GetPackageForId(uint32_t resid) const {
- const uint8_t package_id = get_package_id(resid);
- for (const auto& loaded_package : packages_) {
- if (loaded_package->package_id_ == package_id) {
- return loaded_package.get();
- }
+static bool VerifyResTableEntry(const ResTable_type* type, uint32_t entry_offset,
+ size_t entry_idx) {
+ // Check that the offset is aligned.
+ if (entry_offset & 0x03) {
+ LOG(ERROR) << "Entry offset at index " << entry_idx << " is not 4-byte aligned.";
+ return false;
}
- return nullptr;
-}
-
-static bool VerifyType(const Chunk& chunk) {
- ATRACE_CALL();
- const ResTable_type* header = chunk.header<ResTable_type, kResTableTypeMinSize>();
- const size_t entry_count = dtohl(header->entryCount);
- if (entry_count > std::numeric_limits<uint16_t>::max()) {
- LOG(ERROR) << "Too many entries in RES_TABLE_TYPE_TYPE.";
+ // Check that the offset doesn't overflow.
+ if (entry_offset > std::numeric_limits<uint32_t>::max() - dtohl(type->entriesStart)) {
+ // Overflow in offset.
+ LOG(ERROR) << "Entry offset at index " << entry_idx << " is too large.";
return false;
}
- // Make sure that there is enough room for the entry offsets.
- const size_t offsets_offset = chunk.header_size();
- const size_t entries_offset = dtohl(header->entriesStart);
- const size_t offsets_length = sizeof(uint32_t) * entry_count;
+ const size_t chunk_size = dtohl(type->header.size);
- if (offsets_offset + offsets_length > entries_offset) {
- LOG(ERROR) << "Entry offsets overlap actual entry data.";
+ entry_offset += dtohl(type->entriesStart);
+ if (entry_offset > chunk_size - sizeof(ResTable_entry)) {
+ LOG(ERROR) << "Entry offset at index " << entry_idx
+ << " is too large. No room for ResTable_entry.";
return false;
}
- if (entries_offset > chunk.size()) {
- LOG(ERROR) << "Entry offsets extend beyond chunk.";
+ const ResTable_entry* entry = reinterpret_cast<const ResTable_entry*>(
+ reinterpret_cast<const uint8_t*>(type) + entry_offset);
+
+ const size_t entry_size = dtohs(entry->size);
+ if (entry_size < sizeof(*entry)) {
+ LOG(ERROR) << "ResTable_entry size " << entry_size << " at index " << entry_idx
+ << " is too small.";
return false;
}
- if (entries_offset & 0x03) {
- LOG(ERROR) << "Entries start at unaligned address.";
+ if (entry_size > chunk_size || entry_offset > chunk_size - entry_size) {
+ LOG(ERROR) << "ResTable_entry size " << entry_size << " at index " << entry_idx
+ << " is too large.";
return false;
}
- // Check each entry offset.
- const uint32_t* offsets =
- reinterpret_cast<const uint32_t*>(reinterpret_cast<const uint8_t*>(header) + offsets_offset);
- for (size_t i = 0; i < entry_count; i++) {
- uint32_t offset = dtohl(offsets[i]);
- if (offset != ResTable_type::NO_ENTRY) {
- // Check that the offset is aligned.
- if (offset & 0x03) {
- LOG(ERROR) << "Entry offset at index " << i << " is not 4-byte aligned.";
- return false;
- }
-
- // Check that the offset doesn't overflow.
- if (offset > std::numeric_limits<uint32_t>::max() - entries_offset) {
- // Overflow in offset.
- LOG(ERROR) << "Entry offset at index " << i << " is too large.";
- return false;
- }
+ if (entry_size < sizeof(ResTable_map_entry)) {
+ // There needs to be room for one Res_value struct.
+ if (entry_offset + entry_size > chunk_size - sizeof(Res_value)) {
+ LOG(ERROR) << "No room for Res_value after ResTable_entry at index " << entry_idx
+ << " for type " << (int)type->id << ".";
+ return false;
+ }
- offset += entries_offset;
- if (offset > chunk.size() - sizeof(ResTable_entry)) {
- LOG(ERROR) << "Entry offset at index " << i << " is too large. No room for ResTable_entry.";
- return false;
- }
+ const Res_value* value =
+ reinterpret_cast<const Res_value*>(reinterpret_cast<const uint8_t*>(entry) + entry_size);
+ const size_t value_size = dtohs(value->size);
+ if (value_size < sizeof(Res_value)) {
+ LOG(ERROR) << "Res_value at index " << entry_idx << " is too small.";
+ return false;
+ }
- const ResTable_entry* entry = reinterpret_cast<const ResTable_entry*>(
- reinterpret_cast<const uint8_t*>(header) + offset);
- const size_t entry_size = dtohs(entry->size);
- if (entry_size < sizeof(*entry)) {
- LOG(ERROR) << "ResTable_entry size " << entry_size << " is too small.";
- return false;
- }
+ if (value_size > chunk_size || entry_offset + entry_size > chunk_size - value_size) {
+ LOG(ERROR) << "Res_value size " << value_size << " at index " << entry_idx
+ << " is too large.";
+ return false;
+ }
+ } else {
+ const ResTable_map_entry* map = reinterpret_cast<const ResTable_map_entry*>(entry);
+ const size_t map_entry_count = dtohl(map->count);
+ size_t map_entries_start = entry_offset + entry_size;
+ if (map_entries_start & 0x03) {
+ LOG(ERROR) << "Map entries at index " << entry_idx << " start at unaligned offset.";
+ return false;
+ }
- // Check the declared entrySize.
- if (entry_size > chunk.size() || offset > chunk.size() - entry_size) {
- LOG(ERROR) << "ResTable_entry size " << entry_size << " is too large.";
- return false;
- }
+ // Each entry is sizeof(ResTable_map) big.
+ if (map_entry_count > ((chunk_size - map_entries_start) / sizeof(ResTable_map))) {
+ LOG(ERROR) << "Too many map entries in ResTable_map_entry at index " << entry_idx << ".";
+ return false;
+ }
+ }
+ return true;
+}
- // If this is a map entry, then keep validating.
- if (entry_size >= sizeof(ResTable_map_entry)) {
- const ResTable_map_entry* map = reinterpret_cast<const ResTable_map_entry*>(entry);
- const size_t map_entry_count = dtohl(map->count);
+bool LoadedPackage::FindEntry(const TypeSpecPtr& type_spec_ptr, uint16_t entry_idx,
+ const ResTable_config& config, FindEntryResult* out_entry) const {
+ const ResTable_config* best_config = nullptr;
+ const ResTable_type* best_type = nullptr;
+ uint32_t best_offset = 0;
- size_t map_entries_start = offset + entry_size;
- if (map_entries_start & 0x03) {
- LOG(ERROR) << "Map entries start at unaligned offset.";
- return false;
- }
+ for (uint32_t i = 0; i < type_spec_ptr->type_count; i++) {
+ const Type* type = &type_spec_ptr->types[i];
+ const ResTable_type* type_chunk = type->type;
- // Each entry is sizeof(ResTable_map) big.
- if (map_entry_count > ((chunk.size() - map_entries_start) / sizeof(ResTable_map))) {
- LOG(ERROR) << "Too many map entries in ResTable_map_entry.";
- return false;
+ if (type->configuration.match(config) &&
+ (best_config == nullptr || type->configuration.isBetterThan(*best_config, &config))) {
+ // The configuration matches and is better than the previous selection.
+ // Find the entry value if it exists for this configuration.
+ const size_t entry_count = dtohl(type_chunk->entryCount);
+ const size_t offsets_offset = dtohs(type_chunk->header.headerSize);
+
+ // Check if there is the desired entry in this type.
+
+ if (type_chunk->flags & ResTable_type::FLAG_SPARSE) {
+ // This is encoded as a sparse map, so perform a binary search.
+ const ResTable_sparseTypeEntry* sparse_indices =
+ reinterpret_cast<const ResTable_sparseTypeEntry*>(
+ reinterpret_cast<const uint8_t*>(type_chunk) + offsets_offset);
+ const ResTable_sparseTypeEntry* sparse_indices_end = sparse_indices + entry_count;
+ const ResTable_sparseTypeEntry* result =
+ std::lower_bound(sparse_indices, sparse_indices_end, entry_idx,
+ [](const ResTable_sparseTypeEntry& entry, uint16_t entry_idx) {
+ return dtohs(entry.idx) < entry_idx;
+ });
+
+ if (result == sparse_indices_end || dtohs(result->idx) != entry_idx) {
+ // No entry found.
+ continue;
}
- // Great, all the map entries fit!.
+ // Extract the offset from the entry. Each offset must be a multiple of 4 so we store it as
+ // the real offset divided by 4.
+ best_offset = uint32_t{dtohs(result->offset)} * 4u;
} else {
- // There needs to be room for one Res_value struct.
- if (offset + entry_size > chunk.size() - sizeof(Res_value)) {
- LOG(ERROR) << "No room for Res_value after ResTable_entry.";
- return false;
+ if (entry_idx >= entry_count) {
+ // This entry cannot be here.
+ continue;
}
- const Res_value* value = reinterpret_cast<const Res_value*>(
- reinterpret_cast<const uint8_t*>(entry) + entry_size);
- const size_t value_size = dtohs(value->size);
- if (value_size < sizeof(Res_value)) {
- LOG(ERROR) << "Res_value is too small.";
- return false;
+ const uint32_t* entry_offsets = reinterpret_cast<const uint32_t*>(
+ reinterpret_cast<const uint8_t*>(type_chunk) + offsets_offset);
+ const uint32_t offset = dtohl(entry_offsets[entry_idx]);
+ if (offset == ResTable_type::NO_ENTRY) {
+ continue;
}
- if (value_size > chunk.size() || offset + entry_size > chunk.size() - value_size) {
- LOG(ERROR) << "Res_value size is too large.";
- return false;
- }
+ // There is an entry for this resource, record it.
+ best_offset = offset;
}
+
+ best_config = &type->configuration;
+ best_type = type_chunk;
}
}
+
+ if (best_type == nullptr) {
+ return false;
+ }
+
+ if (UNLIKELY(!VerifyResTableEntry(best_type, best_offset, entry_idx))) {
+ return false;
+ }
+
+ const ResTable_entry* best_entry = reinterpret_cast<const ResTable_entry*>(
+ reinterpret_cast<const uint8_t*>(best_type) + best_offset + dtohl(best_type->entriesStart));
+
+ const uint32_t* flags = reinterpret_cast<const uint32_t*>(type_spec_ptr->type_spec + 1);
+ out_entry->type_flags = dtohl(flags[entry_idx]);
+ out_entry->entry = best_entry;
+ out_entry->config = best_config;
+ out_entry->type_string_ref = StringPoolRef(&type_string_pool_, best_type->id - 1);
+ out_entry->entry_string_ref = StringPoolRef(&key_string_pool_, dtohl(best_entry->key.index));
return true;
}
+bool LoadedPackage::FindEntry(uint8_t type_idx, uint16_t entry_idx, const ResTable_config& config,
+ FindEntryResult* out_entry) const {
+ ATRACE_CALL();
+
+ // If the type IDs are offset in this package, we need to take that into account when searching
+ // for a type.
+ const TypeSpecPtr& ptr = type_specs_[type_idx - type_id_offset_];
+ if (UNLIKELY(ptr == nullptr)) {
+ return false;
+ }
+
+ // If there is an IDMAP supplied with this package, translate the entry ID.
+ if (ptr->idmap_entries != nullptr) {
+ if (!LoadedIdmap::Lookup(ptr->idmap_entries, entry_idx, &entry_idx)) {
+ // There is no mapping, so the resource is not meant to be in this overlay package.
+ return false;
+ }
+ }
+ return FindEntry(ptr, entry_idx, config, out_entry);
+}
+
void LoadedPackage::CollectConfigurations(bool exclude_mipmap,
std::set<ResTable_config>* out_configs) const {
const static std::u16string kMipMap = u"mipmap";
@@ -412,28 +433,51 @@ uint32_t LoadedPackage::FindEntryByName(const std::u16string& type_name,
return 0u;
}
-std::unique_ptr<LoadedPackage> LoadedPackage::Load(const Chunk& chunk) {
+const LoadedPackage* LoadedArsc::GetPackageForId(uint32_t resid) const {
+ const uint8_t package_id = get_package_id(resid);
+ for (const auto& loaded_package : packages_) {
+ if (loaded_package->GetPackageId() == package_id) {
+ return loaded_package.get();
+ }
+ }
+ return nullptr;
+}
+
+std::unique_ptr<const LoadedPackage> LoadedPackage::Load(const Chunk& chunk,
+ const LoadedIdmap* loaded_idmap,
+ bool system, bool load_as_shared_library) {
ATRACE_CALL();
- std::unique_ptr<LoadedPackage> loaded_package{new LoadedPackage()};
+ std::unique_ptr<LoadedPackage> loaded_package(new LoadedPackage());
+ // typeIdOffset was added at some point, but we still must recognize apps built before this
+ // was added.
constexpr size_t kMinPackageSize =
sizeof(ResTable_package) - sizeof(ResTable_package::typeIdOffset);
const ResTable_package* header = chunk.header<ResTable_package, kMinPackageSize>();
if (header == nullptr) {
- LOG(ERROR) << "Chunk RES_TABLE_PACKAGE_TYPE is too small.";
+ LOG(ERROR) << "RES_TABLE_PACKAGE_TYPE too small.";
return {};
}
+ loaded_package->system_ = system;
+
loaded_package->package_id_ = dtohl(header->id);
- if (loaded_package->package_id_ == 0) {
+ if (loaded_package->package_id_ == 0 ||
+ (loaded_package->package_id_ == kAppPackageId && load_as_shared_library)) {
// Package ID of 0 means this is a shared library.
loaded_package->dynamic_ = true;
}
+ if (loaded_idmap != nullptr) {
+ // This is an overlay and so it needs to pretend to be the target package.
+ loaded_package->package_id_ = loaded_idmap->TargetPackageId();
+ loaded_package->overlay_ = true;
+ }
+
if (header->header.headerSize >= sizeof(ResTable_package)) {
uint32_t type_id_offset = dtohl(header->typeIdOffset);
if (type_id_offset > std::numeric_limits<uint8_t>::max()) {
- LOG(ERROR) << "Type ID offset in RES_TABLE_PACKAGE_TYPE is too large.";
+ LOG(ERROR) << "RES_TABLE_PACKAGE_TYPE type ID offset too large.";
return {};
}
loaded_package->type_id_offset_ = static_cast<int>(type_id_offset);
@@ -464,7 +508,7 @@ std::unique_ptr<LoadedPackage> LoadedPackage::Load(const Chunk& chunk) {
status_t err = loaded_package->type_string_pool_.setTo(
child_chunk.header<ResStringPool_header>(), child_chunk.size());
if (err != NO_ERROR) {
- LOG(ERROR) << "Corrupt package type string pool.";
+ LOG(ERROR) << "RES_STRING_POOL_TYPE for types corrupt.";
return {};
}
} else if (pool_address == header_address + dtohl(header->keyStrings)) {
@@ -472,11 +516,11 @@ std::unique_ptr<LoadedPackage> LoadedPackage::Load(const Chunk& chunk) {
status_t err = loaded_package->key_string_pool_.setTo(
child_chunk.header<ResStringPool_header>(), child_chunk.size());
if (err != NO_ERROR) {
- LOG(ERROR) << "Corrupt package key string pool.";
+ LOG(ERROR) << "RES_STRING_POOL_TYPE for keys corrupt.";
return {};
}
} else {
- LOG(WARNING) << "Too many string pool chunks found in package.";
+ LOG(WARNING) << "Too many RES_STRING_POOL_TYPEs found in RES_TABLE_PACKAGE_TYPE.";
}
} break;
@@ -490,7 +534,16 @@ std::unique_ptr<LoadedPackage> LoadedPackage::Load(const Chunk& chunk) {
LOG(ERROR) << "Too many type configurations, overflow detected.";
return {};
}
- loaded_package->type_specs_.editItemAt(last_type_idx) = std::move(type_spec_ptr);
+
+ // We only add the type to the package if there is no IDMAP, or if the type is
+ // overlaying something.
+ if (loaded_idmap == nullptr || type_spec_ptr->idmap_entries != nullptr) {
+ // If this is an overlay, insert it at the target type ID.
+ if (type_spec_ptr->idmap_entries != nullptr) {
+ last_type_idx = dtohs(type_spec_ptr->idmap_entries->target_type_id) - 1;
+ }
+ loaded_package->type_specs_.editItemAt(last_type_idx) = std::move(type_spec_ptr);
+ }
types_builder = {};
last_type_idx = 0;
@@ -498,18 +551,18 @@ std::unique_ptr<LoadedPackage> LoadedPackage::Load(const Chunk& chunk) {
const ResTable_typeSpec* type_spec = child_chunk.header<ResTable_typeSpec>();
if (type_spec == nullptr) {
- LOG(ERROR) << "Chunk RES_TABLE_TYPE_SPEC_TYPE is too small.";
+ LOG(ERROR) << "RES_TABLE_TYPE_SPEC_TYPE too small.";
return {};
}
if (type_spec->id == 0) {
- LOG(ERROR) << "Chunk RES_TABLE_TYPE_SPEC_TYPE has invalid ID 0.";
+ LOG(ERROR) << "RES_TABLE_TYPE_SPEC_TYPE has invalid ID 0.";
return {};
}
if (loaded_package->type_id_offset_ + static_cast<int>(type_spec->id) >
std::numeric_limits<uint8_t>::max()) {
- LOG(ERROR) << "Chunk RES_TABLE_TYPE_SPEC_TYPE has out of range ID.";
+ LOG(ERROR) << "RES_TABLE_TYPE_SPEC_TYPE has out of range ID.";
return {};
}
@@ -521,39 +574,41 @@ std::unique_ptr<LoadedPackage> LoadedPackage::Load(const Chunk& chunk) {
// There can only be 2^16 entries in a type, because that is the ID
// space for entries (EEEE) in the resource ID 0xPPTTEEEE.
if (entry_count > std::numeric_limits<uint16_t>::max()) {
- LOG(ERROR) << "Too many entries in RES_TABLE_TYPE_SPEC_TYPE: " << entry_count << ".";
+ LOG(ERROR) << "RES_TABLE_TYPE_SPEC_TYPE has too many entries (" << entry_count << ").";
return {};
}
if (entry_count * sizeof(uint32_t) > chunk.data_size()) {
- LOG(ERROR) << "Chunk too small to hold entries in RES_TABLE_TYPE_SPEC_TYPE.";
+ LOG(ERROR) << "RES_TABLE_TYPE_SPEC_TYPE too small to hold entries.";
return {};
}
last_type_idx = type_spec->id - 1;
- types_builder = util::make_unique<TypeSpecPtrBuilder>(type_spec);
+
+ // If this is an overlay, associate the mapping of this type to the target type
+ // from the IDMAP.
+ const IdmapEntry_header* idmap_entry_header = nullptr;
+ if (loaded_idmap != nullptr) {
+ idmap_entry_header = loaded_idmap->GetEntryMapForType(type_spec->id);
+ }
+
+ types_builder = util::make_unique<TypeSpecPtrBuilder>(type_spec, idmap_entry_header);
} break;
case RES_TABLE_TYPE_TYPE: {
const ResTable_type* type = child_chunk.header<ResTable_type, kResTableTypeMinSize>();
if (type == nullptr) {
- LOG(ERROR) << "Chunk RES_TABLE_TYPE_TYPE is too small.";
+ LOG(ERROR) << "RES_TABLE_TYPE_TYPE too small.";
return {};
}
- if (type->id == 0) {
- LOG(ERROR) << "Chunk RES_TABLE_TYPE_TYPE has invalid ID 0.";
+ if (!VerifyResTableType(type)) {
return {};
}
// Type chunks must be preceded by their TypeSpec chunks.
if (!types_builder || type->id - 1 != last_type_idx) {
- LOG(ERROR) << "Found RES_TABLE_TYPE_TYPE chunk without "
- "RES_TABLE_TYPE_SPEC_TYPE.";
- return {};
- }
-
- if (!VerifyType(child_chunk)) {
+ LOG(ERROR) << "RES_TABLE_TYPE_TYPE found without preceding RES_TABLE_TYPE_SPEC_TYPE.";
return {};
}
@@ -563,12 +618,12 @@ std::unique_ptr<LoadedPackage> LoadedPackage::Load(const Chunk& chunk) {
case RES_TABLE_LIBRARY_TYPE: {
const ResTable_lib_header* lib = child_chunk.header<ResTable_lib_header>();
if (lib == nullptr) {
- LOG(ERROR) << "Chunk RES_TABLE_LIBRARY_TYPE is too small.";
+ LOG(ERROR) << "RES_TABLE_LIBRARY_TYPE too small.";
return {};
}
if (child_chunk.data_size() / sizeof(ResTable_lib_entry) < dtohl(lib->count)) {
- LOG(ERROR) << "Chunk too small to hold entries in RES_TABLE_LIBRARY_TYPE.";
+ LOG(ERROR) << "RES_TABLE_LIBRARY_TYPE too small to hold entries.";
return {};
}
@@ -608,21 +663,52 @@ std::unique_ptr<LoadedPackage> LoadedPackage::Load(const Chunk& chunk) {
LOG(ERROR) << "Too many type configurations, overflow detected.";
return {};
}
- loaded_package->type_specs_.editItemAt(last_type_idx) = std::move(type_spec_ptr);
+
+ // We only add the type to the package if there is no IDMAP, or if the type is
+ // overlaying something.
+ if (loaded_idmap == nullptr || type_spec_ptr->idmap_entries != nullptr) {
+ // If this is an overlay, insert it at the target type ID.
+ if (type_spec_ptr->idmap_entries != nullptr) {
+ last_type_idx = dtohs(type_spec_ptr->idmap_entries->target_type_id) - 1;
+ }
+ loaded_package->type_specs_.editItemAt(last_type_idx) = std::move(type_spec_ptr);
+ }
}
if (iter.HadError()) {
LOG(ERROR) << iter.GetLastError();
return {};
}
- return loaded_package;
+ return std::move(loaded_package);
}
-bool LoadedArsc::LoadTable(const Chunk& chunk, bool load_as_shared_library) {
+bool LoadedArsc::FindEntry(uint32_t resid, const ResTable_config& config,
+ FindEntryResult* out_entry) const {
+ ATRACE_CALL();
+
+ const uint8_t package_id = get_package_id(resid);
+ const uint8_t type_id = get_type_id(resid);
+ const uint16_t entry_id = get_entry_id(resid);
+
+ if (UNLIKELY(type_id == 0)) {
+ LOG(ERROR) << base::StringPrintf("Invalid ID 0x%08x.", resid);
+ return false;
+ }
+
+ for (const auto& loaded_package : packages_) {
+ if (loaded_package->GetPackageId() == package_id) {
+ return loaded_package->FindEntry(type_id - 1, entry_id, config, out_entry);
+ }
+ }
+ return false;
+}
+
+bool LoadedArsc::LoadTable(const Chunk& chunk, const LoadedIdmap* loaded_idmap,
+ bool load_as_shared_library) {
ATRACE_CALL();
const ResTable_header* header = chunk.header<ResTable_header>();
if (header == nullptr) {
- LOG(ERROR) << "Chunk RES_TABLE_TYPE is too small.";
+ LOG(ERROR) << "RES_TABLE_TYPE too small.";
return false;
}
@@ -641,33 +727,27 @@ bool LoadedArsc::LoadTable(const Chunk& chunk, bool load_as_shared_library) {
status_t err = global_string_pool_.setTo(child_chunk.header<ResStringPool_header>(),
child_chunk.size());
if (err != NO_ERROR) {
- LOG(ERROR) << "Corrupt string pool.";
+ LOG(ERROR) << "RES_STRING_POOL_TYPE corrupt.";
return false;
}
} else {
- LOG(WARNING) << "Multiple string pool chunks found in resource table.";
+ LOG(WARNING) << "Multiple RES_STRING_POOL_TYPEs found in RES_TABLE_TYPE.";
}
break;
case RES_TABLE_PACKAGE_TYPE: {
if (packages_seen + 1 > package_count) {
LOG(ERROR) << "More package chunks were found than the " << package_count
- << " declared in the "
- "header.";
+ << " declared in the header.";
return false;
}
packages_seen++;
- std::unique_ptr<LoadedPackage> loaded_package = LoadedPackage::Load(child_chunk);
+ std::unique_ptr<const LoadedPackage> loaded_package =
+ LoadedPackage::Load(child_chunk, loaded_idmap, system_, load_as_shared_library);
if (!loaded_package) {
return false;
}
-
- // Mark the package as dynamic if we are forcefully loading the Apk as a shared library.
- if (loaded_package->package_id_ == kAppPackageId) {
- loaded_package->dynamic_ = load_as_shared_library;
- }
- loaded_package->system_ = system_;
packages_.push_back(std::move(loaded_package));
} break;
@@ -684,7 +764,8 @@ bool LoadedArsc::LoadTable(const Chunk& chunk, bool load_as_shared_library) {
return true;
}
-std::unique_ptr<const LoadedArsc> LoadedArsc::Load(const void* data, size_t len, bool system,
+std::unique_ptr<const LoadedArsc> LoadedArsc::Load(const StringPiece& data,
+ const LoadedIdmap* loaded_idmap, bool system,
bool load_as_shared_library) {
ATRACE_CALL();
@@ -692,12 +773,12 @@ std::unique_ptr<const LoadedArsc> LoadedArsc::Load(const void* data, size_t len,
std::unique_ptr<LoadedArsc> loaded_arsc(new LoadedArsc());
loaded_arsc->system_ = system;
- ChunkIterator iter(data, len);
+ ChunkIterator iter(data.data(), data.size());
while (iter.HasNext()) {
const Chunk chunk = iter.Next();
switch (chunk.type()) {
case RES_TABLE_TYPE:
- if (!loaded_arsc->LoadTable(chunk, load_as_shared_library)) {
+ if (!loaded_arsc->LoadTable(chunk, loaded_idmap, load_as_shared_library)) {
return {};
}
break;
@@ -717,4 +798,8 @@ std::unique_ptr<const LoadedArsc> LoadedArsc::Load(const void* data, size_t len,
return std::move(loaded_arsc);
}
+std::unique_ptr<const LoadedArsc> LoadedArsc::CreateEmpty() {
+ return std::unique_ptr<LoadedArsc>(new LoadedArsc());
+}
+
} // namespace android