diff options
Diffstat (limited to 'scripts/update_payload/checker.py')
-rw-r--r-- | scripts/update_payload/checker.py | 318 |
1 files changed, 80 insertions, 238 deletions
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py index 6d17fbe5..56a93708 100644 --- a/scripts/update_payload/checker.py +++ b/scripts/update_payload/checker.py @@ -24,6 +24,7 @@ follows: checker.Run(...) """ +from __future__ import absolute_import from __future__ import print_function import array @@ -34,22 +35,22 @@ import itertools import os import subprocess +# pylint: disable=redefined-builtin +from six.moves import range + from update_payload import common from update_payload import error from update_payload import format_utils from update_payload import histogram from update_payload import update_metadata_pb2 - # # Constants. # -_CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents' _CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block' _CHECK_PAYLOAD_SIG = 'payload-sig' CHECKS_TO_DISABLE = ( - _CHECK_DST_PSEUDO_EXTENTS, _CHECK_MOVE_SAME_SRC_DST_BLOCK, _CHECK_PAYLOAD_SIG, ) @@ -66,15 +67,14 @@ _DEFAULT_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__), # Supported minor version map to payload types allowed to be using them. _SUPPORTED_MINOR_VERSIONS = { 0: (_TYPE_FULL,), - 1: (_TYPE_DELTA,), 2: (_TYPE_DELTA,), 3: (_TYPE_DELTA,), 4: (_TYPE_DELTA,), 5: (_TYPE_DELTA,), 6: (_TYPE_DELTA,), + 7: (_TYPE_DELTA,), } -_OLD_DELTA_USABLE_PART_SIZE = 2 * 1024 * 1024 * 1024 # # Helper functions. @@ -323,8 +323,6 @@ class PayloadChecker(object): self.allow_unhashed = allow_unhashed # Disable specific tests. - self.check_dst_pseudo_extents = ( - _CHECK_DST_PSEUDO_EXTENTS not in disabled_tests) self.check_move_same_src_dst_block = ( _CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests) self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests @@ -609,7 +607,7 @@ class PayloadChecker(object): """ self.major_version = self.payload.header.version - part_sizes = collections.defaultdict(int, part_sizes) + part_sizes = part_sizes or collections.defaultdict(int) manifest = self.payload.manifest report.AddSection('manifest') @@ -628,35 +626,23 @@ class PayloadChecker(object): self._CheckPresentIff(self.sigs_offset, self.sigs_size, 'signatures_offset', 'signatures_size', 'manifest') - if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION: - for real_name, proto_name in common.CROS_PARTITIONS: - self.old_part_info[real_name] = self._CheckOptionalSubMsg( - manifest, 'old_%s_info' % proto_name, report) - self.new_part_info[real_name] = self._CheckMandatorySubMsg( - manifest, 'new_%s_info' % proto_name, report, 'manifest') - - # Check: old_kernel_info <==> old_rootfs_info. - self._CheckPresentIff(self.old_part_info[common.KERNEL].msg, - self.old_part_info[common.ROOTFS].msg, - 'old_kernel_info', 'old_rootfs_info', 'manifest') - else: - for part in manifest.partitions: - name = part.partition_name - self.old_part_info[name] = self._CheckOptionalSubMsg( - part, 'old_partition_info', report) - self.new_part_info[name] = self._CheckMandatorySubMsg( - part, 'new_partition_info', report, 'manifest.partitions') - - # Check: Old-style partition infos should not be specified. - for _, part in common.CROS_PARTITIONS: - self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest') - self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest') - - # Check: If old_partition_info is specified anywhere, it must be - # specified everywhere. - old_part_msgs = [part.msg for part in self.old_part_info.values() if part] - self._CheckPresentIffMany(old_part_msgs, 'old_partition_info', - 'manifest.partitions') + for part in manifest.partitions: + name = part.partition_name + self.old_part_info[name] = self._CheckOptionalSubMsg( + part, 'old_partition_info', report) + self.new_part_info[name] = self._CheckMandatorySubMsg( + part, 'new_partition_info', report, 'manifest.partitions') + + # Check: Old-style partition infos should not be specified. + for _, part in common.CROS_PARTITIONS: + self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest') + self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest') + + # Check: If old_partition_info is specified anywhere, it must be + # specified everywhere. + old_part_msgs = [part.msg for part in self.old_part_info.values() if part] + self._CheckPresentIffMany(old_part_msgs, 'old_partition_info', + 'manifest.partitions') is_delta = any(part and part.msg for part in self.old_part_info.values()) if is_delta: @@ -666,7 +652,7 @@ class PayloadChecker(object): 'Apparent full payload contains old_{kernel,rootfs}_info.') self.payload_type = _TYPE_DELTA - for part, (msg, part_report) in self.old_part_info.iteritems(): + for part, (msg, part_report) in self.old_part_info.items(): # Check: {size, hash} present in old_{kernel,rootfs}_info. field = 'old_%s_info' % part self.old_fs_sizes[part] = self._CheckMandatoryField(msg, 'size', @@ -687,7 +673,7 @@ class PayloadChecker(object): self.payload_type = _TYPE_FULL # Check: new_{kernel,rootfs}_info present; contains {size, hash}. - for part, (msg, part_report) in self.new_part_info.iteritems(): + for part, (msg, part_report) in self.new_part_info.items(): field = 'new_%s_info' % part self.new_fs_sizes[part] = self._CheckMandatoryField(msg, 'size', part_report, field) @@ -724,8 +710,7 @@ class PayloadChecker(object): self._CheckBlocksFitLength(length, total_blocks, self.block_size, '%s: %s' % (op_name, length_name)) - def _CheckExtents(self, extents, usable_size, block_counters, name, - allow_pseudo=False, allow_signature=False): + def _CheckExtents(self, extents, usable_size, block_counters, name): """Checks a sequence of extents. Args: @@ -733,8 +718,6 @@ class PayloadChecker(object): usable_size: The usable size of the partition to which the extents apply. block_counters: Array of counters corresponding to the number of blocks. name: The name of the extent block. - allow_pseudo: Whether or not pseudo block numbers are allowed. - allow_signature: Whether or not the extents are used for a signature. Returns: The total number of blocks in the extents. @@ -755,20 +738,15 @@ class PayloadChecker(object): if num_blocks == 0: raise error.PayloadError('%s: extent length is zero.' % ex_name) - if start_block != common.PSEUDO_EXTENT_MARKER: - # Check: Make sure we're within the partition limit. - if usable_size and end_block * self.block_size > usable_size: - raise error.PayloadError( - '%s: extent (%s) exceeds usable partition size (%d).' % - (ex_name, common.FormatExtent(ex, self.block_size), usable_size)) + # Check: Make sure we're within the partition limit. + if usable_size and end_block * self.block_size > usable_size: + raise error.PayloadError( + '%s: extent (%s) exceeds usable partition size (%d).' % + (ex_name, common.FormatExtent(ex, self.block_size), usable_size)) - # Record block usage. - for i in xrange(start_block, end_block): - block_counters[i] += 1 - elif not (allow_pseudo or (allow_signature and len(extents) == 1)): - # Pseudo-extents must be allowed explicitly, or otherwise be part of a - # signature operation (in which case there has to be exactly one). - raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name) + # Record block usage. + for i in range(start_block, end_block): + block_counters[i] += 1 total_num_blocks += num_blocks @@ -786,6 +764,11 @@ class PayloadChecker(object): Raises: error.PayloadError if any check fails. """ + # Check: total_dst_blocks is not a floating point. + if isinstance(total_dst_blocks, float): + raise error.PayloadError('%s: contains invalid data type of ' + 'total_dst_blocks.' % op_name) + # Check: Does not contain src extents. if op.src_extents: raise error.PayloadError('%s: contains src_extents.' % op_name) @@ -806,89 +789,6 @@ class PayloadChecker(object): 'space (%d * %d).' % (op_name, data_length, total_dst_blocks, self.block_size)) - def _CheckMoveOperation(self, op, data_offset, total_src_blocks, - total_dst_blocks, op_name): - """Specific checks for MOVE operations. - - Args: - op: The operation object from the manifest. - data_offset: The offset of a data blob for the operation. - total_src_blocks: Total number of blocks in src_extents. - total_dst_blocks: Total number of blocks in dst_extents. - op_name: Operation name for error reporting. - - Raises: - error.PayloadError if any check fails. - """ - # Check: No data_{offset,length}. - if data_offset is not None: - raise error.PayloadError('%s: contains data_{offset,length}.' % op_name) - - # Check: total_src_blocks == total_dst_blocks. - if total_src_blocks != total_dst_blocks: - raise error.PayloadError( - '%s: total src blocks (%d) != total dst blocks (%d).' % - (op_name, total_src_blocks, total_dst_blocks)) - - # Check: For all i, i-th src block index != i-th dst block index. - i = 0 - src_extent_iter = iter(op.src_extents) - dst_extent_iter = iter(op.dst_extents) - src_extent = dst_extent = None - src_idx = src_num = dst_idx = dst_num = 0 - while i < total_src_blocks: - # Get the next source extent, if needed. - if not src_extent: - try: - src_extent = src_extent_iter.next() - except StopIteration: - raise error.PayloadError('%s: ran out of src extents (%d/%d).' % - (op_name, i, total_src_blocks)) - src_idx = src_extent.start_block - src_num = src_extent.num_blocks - - # Get the next dest extent, if needed. - if not dst_extent: - try: - dst_extent = dst_extent_iter.next() - except StopIteration: - raise error.PayloadError('%s: ran out of dst extents (%d/%d).' % - (op_name, i, total_dst_blocks)) - dst_idx = dst_extent.start_block - dst_num = dst_extent.num_blocks - - # Check: start block is not 0. See crbug/480751; there are still versions - # of update_engine which fail when seeking to 0 in PReadAll and PWriteAll, - # so we need to fail payloads that try to MOVE to/from block 0. - if src_idx == 0 or dst_idx == 0: - raise error.PayloadError( - '%s: MOVE operation cannot have extent with start block 0' % - op_name) - - if self.check_move_same_src_dst_block and src_idx == dst_idx: - raise error.PayloadError( - '%s: src/dst block number %d is the same (%d).' % - (op_name, i, src_idx)) - - advance = min(src_num, dst_num) - i += advance - - src_idx += advance - src_num -= advance - if src_num == 0: - src_extent = None - - dst_idx += advance - dst_num -= advance - if dst_num == 0: - dst_extent = None - - # Make sure we've exhausted all src/dst extents. - if src_extent: - raise error.PayloadError('%s: excess src blocks.' % op_name) - if dst_extent: - raise error.PayloadError('%s: excess dst blocks.' % op_name) - def _CheckZeroOperation(self, op, op_name): """Specific checks for ZERO operations. @@ -908,7 +808,7 @@ class PayloadChecker(object): raise error.PayloadError('%s: contains data_offset.' % op_name) def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name): - """Specific checks for BSDIFF, SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF + """Specific checks for SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF operations. Args: @@ -933,8 +833,7 @@ class PayloadChecker(object): total_dst_blocks * self.block_size)) # Check the existence of src_length and dst_length for legacy bsdiffs. - if (op.type == common.OpType.BSDIFF or - (op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3)): + if op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3: if not op.HasField('src_length') or not op.HasField('dst_length'): raise error.PayloadError('%s: require {src,dst}_length.' % op_name) else: @@ -983,21 +882,19 @@ class PayloadChecker(object): if self.minor_version >= 3 and op.src_sha256_hash is None: raise error.PayloadError('%s: source hash missing.' % op_name) - def _CheckOperation(self, op, op_name, is_last, old_block_counters, - new_block_counters, old_usable_size, new_usable_size, - prev_data_offset, allow_signature, blob_hash_counts): + def _CheckOperation(self, op, op_name, old_block_counters, new_block_counters, + old_usable_size, new_usable_size, prev_data_offset, + blob_hash_counts): """Checks a single update operation. Args: op: The operation object. op_name: Operation name string for error reporting. - is_last: Whether this is the last operation in the sequence. old_block_counters: Arrays of block read counters. new_block_counters: Arrays of block write counters. old_usable_size: The overall usable size for src data in bytes. new_usable_size: The overall usable size for dst data in bytes. prev_data_offset: Offset of last used data bytes. - allow_signature: Whether this may be a signature operation. blob_hash_counts: Counters for hashed/unhashed blobs. Returns: @@ -1009,14 +906,10 @@ class PayloadChecker(object): # Check extents. total_src_blocks = self._CheckExtents( op.src_extents, old_usable_size, old_block_counters, - op_name + '.src_extents', allow_pseudo=True) - allow_signature_in_extents = (allow_signature and is_last and - op.type == common.OpType.REPLACE) + op_name + '.src_extents') total_dst_blocks = self._CheckExtents( op.dst_extents, new_usable_size, new_block_counters, - op_name + '.dst_extents', - allow_pseudo=(not self.check_dst_pseudo_extents), - allow_signature=allow_signature_in_extents) + op_name + '.dst_extents') # Check: data_offset present <==> data_length present. data_offset = self._CheckOptionalField(op, 'data_offset', None) @@ -1052,9 +945,7 @@ class PayloadChecker(object): (op_name, common.FormatSha256(op.data_sha256_hash), common.FormatSha256(actual_hash.digest()))) elif data_offset is not None: - if allow_signature_in_extents: - blob_hash_counts['signature'] += 1 - elif self.allow_unhashed: + if self.allow_unhashed: blob_hash_counts['unhashed'] += 1 else: raise error.PayloadError('%s: unhashed operation not allowed.' % @@ -1068,19 +959,11 @@ class PayloadChecker(object): (op_name, data_offset, prev_data_offset)) # Type-specific checks. - if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ): - self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name) - elif (op.type == common.OpType.REPLACE_XZ and - (self.minor_version >= 3 or - self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION)): + if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ, + common.OpType.REPLACE_XZ): self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name) - elif op.type == common.OpType.MOVE and self.minor_version == 1: - self._CheckMoveOperation(op, data_offset, total_src_blocks, - total_dst_blocks, op_name) elif op.type == common.OpType.ZERO and self.minor_version >= 4: self._CheckZeroOperation(op, op_name) - elif op.type == common.OpType.BSDIFF and self.minor_version == 1: - self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name) elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2: self._CheckSourceCopyOperation(data_offset, total_src_blocks, total_dst_blocks, op_name) @@ -1102,7 +985,7 @@ class PayloadChecker(object): def _SizeToNumBlocks(self, size): """Returns the number of blocks needed to contain a given byte size.""" - return (size + self.block_size - 1) / self.block_size + return (size + self.block_size - 1) // self.block_size def _AllocBlockCounters(self, total_size): """Returns a freshly initialized array of block counters. @@ -1122,7 +1005,7 @@ class PayloadChecker(object): def _CheckOperations(self, operations, report, base_name, old_fs_size, new_fs_size, old_usable_size, new_usable_size, - prev_data_offset, allow_signature): + prev_data_offset): """Checks a sequence of update operations. Args: @@ -1134,7 +1017,6 @@ class PayloadChecker(object): old_usable_size: The overall usable size of the old partition in bytes. new_usable_size: The overall usable size of the new partition in bytes. prev_data_offset: Offset of last used data bytes. - allow_signature: Whether this sequence may contain signature operations. Returns: The total data blob size used. @@ -1149,9 +1031,7 @@ class PayloadChecker(object): common.OpType.REPLACE: 0, common.OpType.REPLACE_BZ: 0, common.OpType.REPLACE_XZ: 0, - common.OpType.MOVE: 0, common.OpType.ZERO: 0, - common.OpType.BSDIFF: 0, common.OpType.SOURCE_COPY: 0, common.OpType.SOURCE_BSDIFF: 0, common.OpType.PUFFDIFF: 0, @@ -1162,8 +1042,6 @@ class PayloadChecker(object): common.OpType.REPLACE: 0, common.OpType.REPLACE_BZ: 0, common.OpType.REPLACE_XZ: 0, - # MOVE operations don't have blobs. - common.OpType.BSDIFF: 0, # SOURCE_COPY operations don't have blobs. common.OpType.SOURCE_BSDIFF: 0, common.OpType.PUFFDIFF: 0, @@ -1174,8 +1052,6 @@ class PayloadChecker(object): 'hashed': 0, 'unhashed': 0, } - if allow_signature: - blob_hash_counts['signature'] = 0 # Allocate old and new block counters. old_block_counters = (self._AllocBlockCounters(old_usable_size) @@ -1188,16 +1064,14 @@ class PayloadChecker(object): op_num += 1 # Check: Type is valid. - if op.type not in op_counts.keys(): + if op.type not in op_counts: raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type)) op_counts[op.type] += 1 - is_last = op_num == len(operations) curr_data_used = self._CheckOperation( - op, op_name, is_last, old_block_counters, new_block_counters, + op, op_name, old_block_counters, new_block_counters, old_usable_size, new_usable_size, - prev_data_offset + total_data_used, allow_signature, - blob_hash_counts) + prev_data_offset + total_data_used, blob_hash_counts) if curr_data_used: op_blob_totals[op.type] += curr_data_used total_data_used += curr_data_used @@ -1251,21 +1125,17 @@ class PayloadChecker(object): if not sigs.signatures: raise error.PayloadError('Signature block is empty.') - last_ops_section = (self.payload.manifest.kernel_install_operations or - self.payload.manifest.install_operations) - - # Only major version 1 has the fake signature OP at the end. - if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION: - fake_sig_op = last_ops_section[-1] + # Check that we don't have the signature operation blob at the end (used to + # be for major version 1). + last_partition = self.payload.manifest.partitions[-1] + if last_partition.operations: + last_op = last_partition.operations[-1] # Check: signatures_{offset,size} must match the last (fake) operation. - if not (fake_sig_op.type == common.OpType.REPLACE and - self.sigs_offset == fake_sig_op.data_offset and - self.sigs_size == fake_sig_op.data_length): - raise error.PayloadError('Signatures_{offset,size} (%d+%d) does not' - ' match last operation (%d+%d).' % - (self.sigs_offset, self.sigs_size, - fake_sig_op.data_offset, - fake_sig_op.data_length)) + if (last_op.type == common.OpType.REPLACE and + last_op.data_offset == self.sigs_offset and + last_op.data_length == self.sigs_size): + raise error.PayloadError('It seems like the last operation is the ' + 'signature blob. This is an invalid payload.') # Compute the checksum of all data up to signature blob. # TODO(garnold) we're re-reading the whole data section into a string @@ -1280,17 +1150,13 @@ class PayloadChecker(object): sig_report = report.AddSubReport(sig_name) # Check: Signature contains mandatory fields. - self._CheckMandatoryField(sig, 'version', sig_report, sig_name) self._CheckMandatoryField(sig, 'data', None, sig_name) sig_report.AddField('data len', len(sig.data)) # Check: Signatures pertains to actual payload hash. - if sig.version == 1: + if sig.data: self._CheckSha256Signature(sig.data, pubkey_file_name, payload_hasher.digest(), sig_name) - else: - raise error.PayloadError('Unknown signature version (%d).' % - sig.version) def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0, part_sizes=None, report_out_file=None): @@ -1344,62 +1210,38 @@ class PayloadChecker(object): self._CheckManifest(report, part_sizes) assert self.payload_type, 'payload type should be known by now' - manifest = self.payload.manifest - - # Part 3: Examine partition operations. - install_operations = [] - if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION: - # partitions field should not ever exist in major version 1 payloads - self._CheckRepeatedElemNotPresent(manifest, 'partitions', 'manifest') - - install_operations.append((common.ROOTFS, manifest.install_operations)) - install_operations.append((common.KERNEL, - manifest.kernel_install_operations)) - - else: - self._CheckRepeatedElemNotPresent(manifest, 'install_operations', + # Make sure deprecated values are not present in the payload. + for field in ('install_operations', 'kernel_install_operations'): + self._CheckRepeatedElemNotPresent(self.payload.manifest, field, 'manifest') - self._CheckRepeatedElemNotPresent(manifest, 'kernel_install_operations', - 'manifest') - - for update in manifest.partitions: - install_operations.append((update.partition_name, update.operations)) + for field in ('old_kernel_info', 'old_rootfs_info', + 'new_kernel_info', 'new_rootfs_info'): + self._CheckElemNotPresent(self.payload.manifest, field, 'manifest') total_blob_size = 0 - for part, operations in install_operations: + for part, operations in ((p.partition_name, p.operations) + for p in self.payload.manifest.partitions): report.AddSection('%s operations' % part) new_fs_usable_size = self.new_fs_sizes[part] old_fs_usable_size = self.old_fs_sizes[part] - if part_sizes.get(part, None): + if part_sizes is not None and part_sizes.get(part, None): new_fs_usable_size = old_fs_usable_size = part_sizes[part] - # Infer the usable partition size when validating rootfs operations: - # - If rootfs partition size was provided, use that. - # - Otherwise, if this is an older delta (minor version < 2), stick with - # a known constant size. This is necessary because older deltas may - # exceed the filesystem size when moving data blocks around. - # - Otherwise, use the encoded filesystem size. - elif self.payload_type == _TYPE_DELTA and part == common.ROOTFS and \ - self.minor_version in (None, 1): - new_fs_usable_size = old_fs_usable_size = _OLD_DELTA_USABLE_PART_SIZE - - # TODO(garnold)(chromium:243559) only default to the filesystem size if - # no explicit size provided *and* the partition size is not embedded in - # the payload; see issue for more details. + + # TODO(chromium:243559) only default to the filesystem size if no + # explicit size provided *and* the partition size is not embedded in the + # payload; see issue for more details. total_blob_size += self._CheckOperations( operations, report, '%s_install_operations' % part, self.old_fs_sizes[part], self.new_fs_sizes[part], - old_fs_usable_size, new_fs_usable_size, total_blob_size, - (self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION - and part == common.KERNEL)) + old_fs_usable_size, new_fs_usable_size, total_blob_size) # Check: Operations data reach the end of the payload file. used_payload_size = self.payload.data_offset + total_blob_size # Major versions 2 and higher have a signature at the end, so it should be # considered in the total size of the image. - if (self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION and - self.sigs_size): + if self.sigs_size: used_payload_size += self.sigs_size if used_payload_size != payload_file_size: |