diff options
Diffstat (limited to 'startop/scripts/app_startup')
19 files changed, 2604 insertions, 314 deletions
diff --git a/startop/scripts/app_startup/app_startup_runner.py b/startop/scripts/app_startup/app_startup_runner.py index 780bb4eaeeef..25ee6f7368c8 100755 --- a/startop/scripts/app_startup/app_startup_runner.py +++ b/startop/scripts/app_startup/app_startup_runner.py @@ -30,131 +30,117 @@ import argparse import csv import itertools import os -import subprocess import sys import tempfile -from typing import Any, Callable, Dict, Generic, Iterable, List, NamedTuple, TextIO, Tuple, TypeVar, Optional, Union +from datetime import timedelta +from typing import Any, Callable, Iterable, List, NamedTuple, TextIO, Tuple, \ + TypeVar, Union, Optional + +# local import +DIR = os.path.abspath(os.path.dirname(__file__)) +sys.path.append(os.path.dirname(DIR)) +import lib.cmd_utils as cmd_utils +import lib.print_utils as print_utils +from app_startup.run_app_with_prefetch import PrefetchAppRunner +import app_startup.lib.args_utils as args_utils +from app_startup.lib.data_frame import DataFrame +from app_startup.lib.perfetto_trace_collector import PerfettoTraceCollector +from iorap.compiler import CompilerType +import iorap.compiler as compiler # The following command line options participate in the combinatorial generation. # All other arguments have a global effect. -_COMBINATORIAL_OPTIONS=['packages', 'readaheads', 'compiler_filters'] -_TRACING_READAHEADS=['mlock', 'fadvise'] -_FORWARD_OPTIONS={'loop_count': '--count'} -_RUN_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'run_app_with_prefetch') - -RunCommandArgs = NamedTuple('RunCommandArgs', [('package', str), ('readahead', str), ('compiler_filter', Optional[str])]) -CollectorPackageInfo = NamedTuple('CollectorPackageInfo', [('package', str), ('compiler_filter', str)]) -_COLLECTOR_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'collector') -_COLLECTOR_TIMEOUT_MULTIPLIER = 2 # take the regular --timeout and multiply by 2; systrace starts up slowly. - -_UNLOCK_SCREEN_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'unlock_screen') +_COMBINATORIAL_OPTIONS = ['package', 'readahead', 'compiler_filter', + 'activity', 'trace_duration'] +_TRACING_READAHEADS = ['mlock', 'fadvise'] +_FORWARD_OPTIONS = {'loop_count': '--count'} +_RUN_SCRIPT = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'run_app_with_prefetch.py') + +CollectorPackageInfo = NamedTuple('CollectorPackageInfo', + [('package', str), ('compiler_filter', str)]) +# by 2; systrace starts up slowly. + +_UNLOCK_SCREEN_SCRIPT = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'unlock_screen') + +RunCommandArgs = NamedTuple('RunCommandArgs', + [('package', str), + ('readahead', str), + ('activity', Optional[str]), + ('compiler_filter', Optional[str]), + ('timeout', Optional[int]), + ('debug', bool), + ('simulate', bool), + ('input', Optional[str]), + ('trace_duration', Optional[timedelta])]) # This must be the only mutable global variable. All other global variables are constants to avoid magic literals. _debug = False # See -d/--debug flag. _DEBUG_FORCE = None # Ignore -d/--debug if this is not none. +_PERFETTO_TRACE_DURATION_MS = 5000 # milliseconds +_PERFETTO_TRACE_DURATION = timedelta(milliseconds=_PERFETTO_TRACE_DURATION_MS) # Type hinting names. T = TypeVar('T') -NamedTupleMeta = Callable[..., T] # approximation of a (S : NamedTuple<T> where S() == T) metatype. +NamedTupleMeta = Callable[ + ..., T] # approximation of a (S : NamedTuple<T> where S() == T) metatype. def parse_options(argv: List[str] = None): """Parse command line arguments and return an argparse Namespace object.""" - parser = argparse.ArgumentParser(description="Run one or more Android applications under various settings in order to measure startup time.") + parser = argparse.ArgumentParser(description="Run one or more Android " + "applications under various " + "settings in order to measure " + "startup time.") # argparse considers args starting with - and -- optional in --help, even though required=True. # by using a named argument group --help will clearly say that it's required instead of optional. required_named = parser.add_argument_group('required named arguments') - required_named.add_argument('-p', '--package', action='append', dest='packages', help='package of the application', required=True) - required_named.add_argument('-r', '--readahead', action='append', dest='readaheads', help='which readahead mode to use', choices=('warm', 'cold', 'mlock', 'fadvise'), required=True) + required_named.add_argument('-p', '--package', action='append', + dest='packages', + help='package of the application', required=True) + required_named.add_argument('-r', '--readahead', action='append', + dest='readaheads', + help='which readahead mode to use', + choices=('warm', 'cold', 'mlock', 'fadvise'), + required=True) # optional arguments # use a group here to get the required arguments to appear 'above' the optional arguments in help. optional_named = parser.add_argument_group('optional named arguments') - optional_named.add_argument('-c', '--compiler-filter', action='append', dest='compiler_filters', help='which compiler filter to use. if omitted it does not enforce the app\'s compiler filter', choices=('speed', 'speed-profile', 'quicken')) - optional_named.add_argument('-s', '--simulate', dest='simulate', action='store_true', help='Print which commands will run, but don\'t run the apps') - optional_named.add_argument('-d', '--debug', dest='debug', action='store_true', help='Add extra debugging output') - optional_named.add_argument('-o', '--output', dest='output', action='store', help='Write CSV output to file.') - optional_named.add_argument('-t', '--timeout', dest='timeout', action='store', type=int, help='Timeout after this many seconds when executing a single run.') - optional_named.add_argument('-lc', '--loop-count', dest='loop_count', default=1, type=int, action='store', help='How many times to loop a single run.') - optional_named.add_argument('-in', '--inodes', dest='inodes', type=str, action='store', help='Path to inodes file (system/extras/pagecache/pagecache.py -d inodes)') + optional_named.add_argument('-c', '--compiler-filter', action='append', + dest='compiler_filters', + help='which compiler filter to use. if omitted it does not enforce the app\'s compiler filter', + choices=('speed', 'speed-profile', 'quicken')) + optional_named.add_argument('-s', '--simulate', dest='simulate', + action='store_true', + help='Print which commands will run, but don\'t run the apps') + optional_named.add_argument('-d', '--debug', dest='debug', + action='store_true', + help='Add extra debugging output') + optional_named.add_argument('-o', '--output', dest='output', action='store', + help='Write CSV output to file.') + optional_named.add_argument('-t', '--timeout', dest='timeout', action='store', + type=int, default=10, + help='Timeout after this many seconds when executing a single run.') + optional_named.add_argument('-lc', '--loop-count', dest='loop_count', + default=1, type=int, action='store', + help='How many times to loop a single run.') + optional_named.add_argument('-in', '--inodes', dest='inodes', type=str, + action='store', + help='Path to inodes file (system/extras/pagecache/pagecache.py -d inodes)') + optional_named.add_argument('--compiler-trace-duration-ms', + dest='trace_duration', + type=lambda ms_str: timedelta(milliseconds=int(ms_str)), + action='append', + help='The trace duration (milliseconds) in ' + 'compilation') + optional_named.add_argument('--compiler-type', dest='compiler_type', + type=CompilerType, choices=list(CompilerType), + default=CompilerType.DEVICE, + help='The type of compiler.') return parser.parse_args(argv) -# TODO: refactor this with a common library file with analyze_metrics.py -def _debug_print(*args, **kwargs): - """Print the args to sys.stderr if the --debug/-d flag was passed in.""" - if _debug: - print(*args, **kwargs, file=sys.stderr) - -def _expand_gen_repr(args): - """Like repr but any generator-like object has its iterator consumed - and then called repr on.""" - new_args_list = [] - for i in args: - # detect iterable objects that do not have their own override of __str__ - if hasattr(i, '__iter__'): - to_str = getattr(i, '__str__') - if to_str.__objclass__ == object: - # the repr for a generator is just type+address, expand it out instead. - new_args_list.append([_expand_gen_repr([j])[0] for j in i]) - continue - # normal case: uses the built-in to-string - new_args_list.append(i) - return new_args_list - -def _debug_print_gen(*args, **kwargs): - """Like _debug_print but will turn any iterable args into a list.""" - if not _debug: - return - - new_args_list = _expand_gen_repr(args) - _debug_print(*new_args_list, **kwargs) - -def _debug_print_nd(*args, **kwargs): - """Like _debug_print but will turn any NamedTuple-type args into a string.""" - if not _debug: - return - - new_args_list = [] - for i in args: - if hasattr(i, '_field_types'): - new_args_list.append("%s: %s" %(i.__name__, i._field_types)) - else: - new_args_list.append(i) - - _debug_print(*new_args_list, **kwargs) - -def dict_lookup_any_key(dictionary: dict, *keys: List[Any]): - for k in keys: - if k in dictionary: - return dictionary[k] - raise KeyError("None of the keys %s were in the dictionary" %(keys)) - -def generate_run_combinations(named_tuple: NamedTupleMeta[T], opts_dict: Dict[str, List[Optional[str]]])\ - -> Iterable[T]: - """ - Create all possible combinations given the values in opts_dict[named_tuple._fields]. - - :type T: type annotation for the named_tuple type. - :param named_tuple: named tuple type, whose fields are used to make combinations for - :param opts_dict: dictionary of keys to value list. keys correspond to the named_tuple fields. - :return: an iterable over named_tuple instances. - """ - combinations_list = [] - for k in named_tuple._fields: - # the key can be either singular or plural , e.g. 'package' or 'packages' - val = dict_lookup_any_key(opts_dict, k, k + "s") - - # treat {'x': None} key value pairs as if it was [None] - # otherwise itertools.product throws an exception about not being able to iterate None. - combinations_list.append(val or [None]) - - _debug_print("opts_dict: ", opts_dict) - _debug_print_nd("named_tuple: ", named_tuple) - _debug_print("combinations_list: ", combinations_list) - - for combo in itertools.product(*combinations_list): - yield named_tuple(*combo) - def key_to_cmdline_flag(key: str) -> str: """Convert key into a command line flag, e.g. 'foo-bars' -> '--foo-bar' """ if key.endswith("s"): @@ -175,120 +161,194 @@ def as_run_command(tpl: NamedTuple) -> List[Union[str, Any]]: args.append(value) return args -def generate_group_run_combinations(run_combinations: Iterable[NamedTuple], dst_nt: NamedTupleMeta[T])\ - -> Iterable[Tuple[T, Iterable[NamedTuple]]]: +def run_perfetto_collector(collector_info: CollectorPackageInfo, + timeout: int, + simulate: bool) -> Tuple[bool, TextIO]: + """Run collector to collect prefetching trace. - def group_by_keys(src_nt): - src_d = src_nt._asdict() - # now remove the keys that aren't legal in dst. - for illegal_key in set(src_d.keys()) - set(dst_nt._fields): - if illegal_key in src_d: - del src_d[illegal_key] + Returns: + A tuple of whether the collection succeeds and the generated trace file. + """ + tmp_output_file = tempfile.NamedTemporaryFile() - return dst_nt(**src_d) + collector = PerfettoTraceCollector(package=collector_info.package, + activity=None, + compiler_filter=collector_info.compiler_filter, + timeout=timeout, + simulate=simulate, + trace_duration=_PERFETTO_TRACE_DURATION, + save_destination_file_path=tmp_output_file.name) + result = collector.run() - for args_list_it in itertools.groupby(run_combinations, group_by_keys): - (group_key_value, args_it) = args_list_it - yield (group_key_value, args_it) + return result is not None, tmp_output_file -def parse_run_script_csv_file(csv_file: TextIO) -> List[int]: - """Parse a CSV file full of integers into a flat int list.""" +def parse_run_script_csv_file(csv_file: TextIO) -> DataFrame: + """Parse a CSV file full of integers into a DataFrame.""" csv_reader = csv.reader(csv_file) - arr = [] + + try: + header_list = next(csv_reader) + except StopIteration: + header_list = [] + + if not header_list: + return None + + headers = [i for i in header_list] + + d = {} for row in csv_reader: + header_idx = 0 + for i in row: + v = i if i: - arr.append(int(i)) - return arr - -def make_script_command_with_temp_output(script: str, args: List[str], **kwargs)\ - -> Tuple[str, TextIO]: - """ - Create a command to run a script given the args. - Appends --count <loop_count> --output <tmp-file-name>. - Returns a tuple (cmd, tmp_file) - """ - tmp_output_file = tempfile.NamedTemporaryFile(mode='r') - cmd = [script] + args - for key, value in kwargs.items(): - cmd += ['--%s' %(key), "%s" %(value)] - if _debug: - cmd += ['--verbose'] - cmd = cmd + ["--output", tmp_output_file.name] - return cmd, tmp_output_file - -def execute_arbitrary_command(cmd: List[str], simulate: bool, timeout: int) -> Tuple[bool, str]: - if simulate: - print(" ".join(cmd)) - return (True, "") - else: - _debug_print("[EXECUTE]", cmd) - proc = subprocess.Popen(cmd, - stderr=subprocess.STDOUT, - stdout=subprocess.PIPE, - universal_newlines=True) - try: - script_output = proc.communicate(timeout=timeout)[0] - except subprocess.TimeoutExpired: - print("[TIMEDOUT]") - proc.kill() - script_output = proc.communicate()[0] - - _debug_print("[STDOUT]", script_output) - return_code = proc.wait() - passed = (return_code == 0) - _debug_print("[$?]", return_code) + v = int(i) + + header_key = headers[header_idx] + l = d.get(header_key, []) + l.append(v) + d[header_key] = l + + header_idx = header_idx + 1 + + return DataFrame(d) + +def build_ri_compiler_argv(inodes_path: str, + perfetto_trace_file: str, + trace_duration: Optional[timedelta] + ) -> str: + argv = ['-i', inodes_path, '--perfetto-trace', + perfetto_trace_file] + + if trace_duration is not None: + argv += ['--duration', str(int(trace_duration.total_seconds() + * PerfettoTraceCollector.MS_PER_SEC))] + + print_utils.debug_print(argv) + return argv + +def execute_run_using_perfetto_trace(collector_info, + run_combos: Iterable[RunCommandArgs], + simulate: bool, + inodes_path: str, + timeout: int, + compiler_type: CompilerType, + requires_trace_collection: bool) -> DataFrame: + """ Executes run based on perfetto trace. """ + if requires_trace_collection: + passed, perfetto_trace_file = run_perfetto_collector(collector_info, + timeout, + simulate) if not passed: - print("[FAILED, code:%s]" %(return_code), script_output, file=sys.stderr) - - return (passed, script_output) - -def execute_run_combos(grouped_run_combos: Iterable[Tuple[CollectorPackageInfo, Iterable[RunCommandArgs]]], simulate: bool, inodes_path: str, timeout: int, loop_count: int, need_trace: bool): + raise RuntimeError('Cannot run perfetto collector!') + else: + perfetto_trace_file = tempfile.NamedTemporaryFile() + + with perfetto_trace_file: + for combos in run_combos: + if combos.readahead in _TRACING_READAHEADS: + if simulate: + compiler_trace_file = tempfile.NamedTemporaryFile() + else: + ri_compiler_argv = build_ri_compiler_argv(inodes_path, + perfetto_trace_file.name, + combos.trace_duration) + compiler_trace_file = compiler.compile(compiler_type, + inodes_path, + ri_compiler_argv, + combos.package, + combos.activity) + + with compiler_trace_file: + combos = combos._replace(input=compiler_trace_file.name) + print_utils.debug_print(combos) + output = PrefetchAppRunner(**combos._asdict()).run() + else: + print_utils.debug_print(combos) + output = PrefetchAppRunner(**combos._asdict()).run() + + yield DataFrame(dict((x, [y]) for x, y in output)) if output else None + +def execute_run_combos( + grouped_run_combos: Iterable[Tuple[CollectorPackageInfo, Iterable[RunCommandArgs]]], + simulate: bool, + inodes_path: str, + timeout: int, + compiler_type: CompilerType, + requires_trace_collection: bool): # nothing will work if the screen isn't unlocked first. - execute_arbitrary_command([_UNLOCK_SCREEN_SCRIPT], simulate, timeout) + cmd_utils.execute_arbitrary_command([_UNLOCK_SCREEN_SCRIPT], + timeout, + simulate=simulate, + shell=False) for collector_info, run_combos in grouped_run_combos: - #collector_args = ["--package", package_name] - collector_args = as_run_command(collector_info) - # TODO: forward --wait_time for how long systrace runs? - # TODO: forward --trace_buffer_size for size of systrace buffer size? - collector_cmd, collector_tmp_output_file = make_script_command_with_temp_output(_COLLECTOR_SCRIPT, collector_args, inodes=inodes_path) - - with collector_tmp_output_file: - collector_passed = True - if need_trace: - collector_timeout = timeout and _COLLECTOR_TIMEOUT_MULTIPLIER * timeout - (collector_passed, collector_script_output) = execute_arbitrary_command(collector_cmd, simulate, collector_timeout) - # TODO: consider to print a ; collector wrote file to <...> into the CSV file so we know it was ran. - - for combos in run_combos: - args = as_run_command(combos) - - cmd, tmp_output_file = make_script_command_with_temp_output(_RUN_SCRIPT, args, count=loop_count, input=collector_tmp_output_file.name) - with tmp_output_file: - (passed, script_output) = execute_arbitrary_command(cmd, simulate, timeout) - parsed_output = simulate and [1,2,3] or parse_run_script_csv_file(tmp_output_file) - yield (passed, script_output, parsed_output) - -def gather_results(commands: Iterable[Tuple[bool, str, List[int]]], key_list: List[str], value_list: List[Tuple[str, ...]]): - _debug_print("gather_results: key_list = ", key_list) - yield key_list + ["time(ms)"] - + yield from execute_run_using_perfetto_trace(collector_info, + run_combos, + simulate, + inodes_path, + timeout, + compiler_type, + requires_trace_collection) + +def gather_results(commands: Iterable[Tuple[DataFrame]], + key_list: List[str], value_list: List[Tuple[str, ...]]): + print_utils.debug_print("gather_results: key_list = ", key_list) stringify_none = lambda s: s is None and "<none>" or s + # yield key_list + ["time(ms)"] + for (run_result_list, values) in itertools.zip_longest(commands, value_list): + print_utils.debug_print("run_result_list = ", run_result_list) + print_utils.debug_print("values = ", values) - for ((passed, script_output, run_result_list), values) in itertools.zip_longest(commands, value_list): - if not passed: + if not run_result_list: continue - for result in run_result_list: - yield [stringify_none(i) for i in values] + [result] - yield ["; avg(%s), min(%s), max(%s), count(%s)" %(sum(run_result_list, 0.0) / len(run_result_list), min(run_result_list), max(run_result_list), len(run_result_list)) ] + # RunCommandArgs(package='com.whatever', readahead='warm', compiler_filter=None) + # -> {'package':['com.whatever'], 'readahead':['warm'], 'compiler_filter':[None]} + values_dict = {} + for k, v in values._asdict().items(): + if not k in key_list: + continue + values_dict[k] = [stringify_none(v)] + + values_df = DataFrame(values_dict) + # project 'values_df' to be same number of rows as run_result_list. + values_df = values_df.repeat(run_result_list.data_row_len) + + # the results are added as right-hand-side columns onto the existing labels for the table. + values_df.merge_data_columns(run_result_list) + + yield values_df def eval_and_save_to_csv(output, annotated_result_values): + printed_header = False + csv_writer = csv.writer(output) for row in annotated_result_values: - csv_writer.writerow(row) - output.flush() # see the output live. + if not printed_header: + headers = row.headers + csv_writer.writerow(headers) + printed_header = True + # TODO: what about when headers change? + + for data_row in row.data_table: + data_row = [d for d in data_row] + csv_writer.writerow(data_row) + + output.flush() # see the output live. + +def coerce_to_list(opts: dict): + """Tranform values of the dictionary to list. + For example: + 1 -> [1], None -> [None], [1,2,3] -> [1,2,3] + [[1],[2]] -> [[1],[2]], {1:1, 2:2} -> [{1:1, 2:2}] + """ + result = {} + for key in opts: + val = opts[key] + result[key] = val if issubclass(type(val), list) else [val] + return result def main(): global _debug @@ -297,26 +357,37 @@ def main(): _debug = opts.debug if _DEBUG_FORCE is not None: _debug = _DEBUG_FORCE - _debug_print("parsed options: ", opts) - need_trace = not not set(opts.readaheads).intersection(set(_TRACING_READAHEADS)) - if need_trace and not opts.inodes: - print("Error: Missing -in/--inodes, required when using a readahead of %s" %(_TRACING_READAHEADS), file=sys.stderr) - return 1 + + print_utils.DEBUG = _debug + cmd_utils.SIMULATE = opts.simulate + + print_utils.debug_print("parsed options: ", opts) output_file = opts.output and open(opts.output, 'w') or sys.stdout - combos = lambda: generate_run_combinations(RunCommandArgs, vars(opts)) - _debug_print_gen("run combinations: ", combos()) + combos = lambda: args_utils.generate_run_combinations( + RunCommandArgs, + coerce_to_list(vars(opts)), + opts.loop_count) + print_utils.debug_print_gen("run combinations: ", combos()) + + grouped_combos = lambda: args_utils.generate_group_run_combinations(combos(), + CollectorPackageInfo) - grouped_combos = lambda: generate_group_run_combinations(combos(), CollectorPackageInfo) - _debug_print_gen("grouped run combinations: ", grouped_combos()) + print_utils.debug_print_gen("grouped run combinations: ", grouped_combos()) + requires_trace_collection = any(i in _TRACING_READAHEADS for i in opts.readaheads) + exec = execute_run_combos(grouped_combos(), + opts.simulate, + opts.inodes, + opts.timeout, + opts.compiler_type, + requires_trace_collection) - exec = execute_run_combos(grouped_combos(), opts.simulate, opts.inodes, opts.timeout, opts.loop_count, need_trace) results = gather_results(exec, _COMBINATORIAL_OPTIONS, combos()) - eval_and_save_to_csv(output_file, results) - return 0 + eval_and_save_to_csv(output_file, results) + return 1 if __name__ == '__main__': sys.exit(main()) diff --git a/startop/scripts/app_startup/app_startup_runner_test.py b/startop/scripts/app_startup/app_startup_runner_test.py index f96f802a3aef..0c2bbea04f6a 100755 --- a/startop/scripts/app_startup/app_startup_runner_test.py +++ b/startop/scripts/app_startup/app_startup_runner_test.py @@ -31,18 +31,17 @@ Usage: See also https://docs.pytest.org/en/latest/usage.html """ -# global imports -from contextlib import contextmanager import io import shlex import sys import typing - -# pip imports -import pytest +# global imports +from contextlib import contextmanager # local imports import app_startup_runner as asr +# pip imports +import pytest # # Argument Parsing Helpers @@ -91,7 +90,9 @@ def default_dict_for_parsed_args(**kwargs): """ # Combine it with all of the "optional" parameters' default values. """ - d = {'compiler_filters': None, 'simulate': False, 'debug': False, 'output': None, 'timeout': None, 'loop_count': 1, 'inodes': None} + d = {'compiler_filters': None, 'simulate': False, 'debug': False, + 'output': None, 'timeout': 10, 'loop_count': 1, 'inodes': None, + 'trace_duration': None, 'compiler_type': asr.CompilerType.DEVICE} d.update(kwargs) return d @@ -111,7 +112,7 @@ def parse_optional_args(str): in default_mock_dict_for_parsed_args. """ req = "--package com.fake.package --readahead warm" - return parse_args("%s %s" %(req, str)) + return parse_args("%s %s" % (req, str)) def test_argparse(): # missing arguments @@ -124,15 +125,22 @@ def test_argparse(): # required arguments are parsed correctly ad = default_dict_for_parsed_args # assert dict - assert parse_args("--package xyz --readahead warm") == ad(packages=['xyz'], readaheads=['warm']) - assert parse_args("-p xyz -r warm") == ad(packages=['xyz'], readaheads=['warm']) + assert parse_args("--package xyz --readahead warm") == ad(packages=['xyz'], + readaheads=['warm']) + assert parse_args("-p xyz -r warm") == ad(packages=['xyz'], + readaheads=['warm']) - assert parse_args("-p xyz -r warm -s") == ad(packages=['xyz'], readaheads=['warm'], simulate=True) - assert parse_args("-p xyz -r warm --simulate") == ad(packages=['xyz'], readaheads=['warm'], simulate=True) + assert parse_args("-p xyz -r warm -s") == ad(packages=['xyz'], + readaheads=['warm'], + simulate=True) + assert parse_args("-p xyz -r warm --simulate") == ad(packages=['xyz'], + readaheads=['warm'], + simulate=True) # optional arguments are parsed correctly. mad = default_mock_dict_for_parsed_args # mock assert dict - assert parse_optional_args("--output filename.csv") == mad(output='filename.csv') + assert parse_optional_args("--output filename.csv") == mad( + output='filename.csv') assert parse_optional_args("-o filename.csv") == mad(output='filename.csv') assert parse_optional_args("--timeout 123") == mad(timeout=123) @@ -145,36 +153,6 @@ def test_argparse(): assert parse_optional_args("-in baz") == mad(inodes="baz") -def generate_run_combinations(*args): - # expand out the generator values so that assert x == y works properly. - return [i for i in asr.generate_run_combinations(*args)] - -def test_generate_run_combinations(): - blank_nd = typing.NamedTuple('Blank') - assert generate_run_combinations(blank_nd, {}) == [()], "empty" - assert generate_run_combinations(blank_nd, {'a' : ['a1', 'a2']}) == [()], "empty filter" - a_nd = typing.NamedTuple('A', [('a', str)]) - assert generate_run_combinations(a_nd, {'a': None}) == [(None,)], "None" - assert generate_run_combinations(a_nd, {'a': ['a1', 'a2']}) == [('a1',), ('a2',)], "one item" - assert generate_run_combinations(a_nd, - {'a' : ['a1', 'a2'], 'b': ['b1', 'b2']}) == [('a1',), ('a2',)],\ - "one item filter" - ab_nd = typing.NamedTuple('AB', [('a', str), ('b', str)]) - assert generate_run_combinations(ab_nd, - {'a': ['a1', 'a2'], - 'b': ['b1', 'b2']}) == [ab_nd('a1', 'b1'), - ab_nd('a1', 'b2'), - ab_nd('a2', 'b1'), - ab_nd('a2', 'b2')],\ - "two items" - - assert generate_run_combinations(ab_nd, - {'as': ['a1', 'a2'], - 'bs': ['b1', 'b2']}) == [ab_nd('a1', 'b1'), - ab_nd('a1', 'b2'), - ab_nd('a2', 'b1'), - ab_nd('a2', 'b2')],\ - "two items plural" def test_key_to_cmdline_flag(): assert asr.key_to_cmdline_flag("abc") == "--abc" @@ -182,29 +160,17 @@ def test_key_to_cmdline_flag(): assert asr.key_to_cmdline_flag("ba_r") == "--ba-r" assert asr.key_to_cmdline_flag("ba_zs") == "--ba-z" - -def test_make_script_command_with_temp_output(): - cmd_str, tmp_file = asr.make_script_command_with_temp_output("fake_script", args=[], count=1) - with tmp_file: - assert cmd_str == ["fake_script", "--count", "1", "--output", tmp_file.name] - - cmd_str, tmp_file = asr.make_script_command_with_temp_output("fake_script", args=['a', 'b'], count=2) - with tmp_file: - assert cmd_str == ["fake_script", "a", "b", "--count", "2", "--output", tmp_file.name] - def test_parse_run_script_csv_file(): # empty file -> empty list f = io.StringIO("") - assert asr.parse_run_script_csv_file(f) == [] + assert asr.parse_run_script_csv_file(f) == None # common case - f = io.StringIO("1,2,3") - assert asr.parse_run_script_csv_file(f) == [1,2,3] - - # ignore trailing comma - f = io.StringIO("1,2,3,4,5,") - assert asr.parse_run_script_csv_file(f) == [1,2,3,4,5] + f = io.StringIO("TotalTime_ms,Displayed_ms\n1,2") + df = asr.DataFrame({'TotalTime_ms': [1], 'Displayed_ms': [2]}) + pf = asr.parse_run_script_csv_file(f) + assert pf == df if __name__ == '__main__': pytest.main() diff --git a/startop/scripts/app_startup/launch_application b/startop/scripts/app_startup/launch_application index 8a68e5016190..6704a5a97aa0 100755 --- a/startop/scripts/app_startup/launch_application +++ b/startop/scripts/app_startup/launch_application @@ -17,6 +17,19 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "$DIR/lib/common" +launch_application_usage() { + cat <<EOF +Usage: $(basename $0) <package> <activity> + + Positional Arguments: + <package> package of the app to test + <activity> activity to use + + Named Arguments: + -h, --help usage information (this) +EOF +} + launch_application() { local package="$1" local activity="$2" @@ -26,22 +39,14 @@ launch_application() { # if the $ is not escaped, adb shell will try to evaluate $HomeActivity to a variable. activity=${activity//\$/\\$} - local am_output="$(adb shell am start -S -W "$package"/"$activity")" - verbose_print adb shell am start -S -W "$package"/"$activity" - if [[ $? -ne 0 ]]; then - echo "am start failed" >&2 + adb shell am start -S -W "$package"/"$activity" - return 1 - fi - - # for everything else use the am start "TotalTime" output. - verbose_print "$am_output" - local total_time="$(echo "$am_output" | grep 'TotalTime:' | sed 's/TotalTime: //g')" - verbose_print "total time: $total_time" - - # TODO: Extract alternative metrics such as the #reportFullyDrawn. - - echo "$total_time" + # pipe this into 'parse_metrics' to parse the output. } +if [[ $# -lt 2 ]]; then + launch_application_usage + exit 1 +fi + launch_application "$@" diff --git a/startop/scripts/app_startup/lib/adb_utils.py b/startop/scripts/app_startup/lib/adb_utils.py new file mode 100644 index 000000000000..3cebc9a97a50 --- /dev/null +++ b/startop/scripts/app_startup/lib/adb_utils.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +# +# Copyright 2019, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helper util libraries for calling adb command line.""" + +import datetime +import os +import re +import sys +import time +from typing import Optional + +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__))))) +import lib.cmd_utils as cmd_utils +import lib.logcat_utils as logcat_utils + + +def logcat_save_timestamp() -> str: + """Gets the current logcat timestamp. + + Returns: + A string of timestamp. + """ + _, output = cmd_utils.run_adb_shell_command( + "date -u +\'%Y-%m-%d %H:%M:%S.%N\'") + return output + +def vm_drop_cache(): + """Free pagecache and slab object.""" + cmd_utils.run_adb_shell_command('echo 3 > /proc/sys/vm/drop_caches') + # Sleep a little bit to provide enough time for cache cleanup. + time.sleep(1) + +def root(): + """Roots adb and successive adb commands will run under root.""" + cmd_utils.run_shell_command('adb root') + +def disable_selinux(): + """Disables selinux setting.""" + _, output = cmd_utils.run_adb_shell_command('getenforce') + if output == 'Permissive': + return + + print('Disable selinux permissions and restart framework.') + cmd_utils.run_adb_shell_command('setenforce 0') + cmd_utils.run_adb_shell_command('stop') + cmd_utils.run_adb_shell_command('start') + cmd_utils.run_shell_command('adb wait-for-device') + +def pkill(procname: str): + """Kills a process on device specified by the substring pattern in procname""" + _, pids = cmd_utils.run_shell_command('adb shell ps | grep "{}" | ' + 'awk \'{{print $2;}}\''. + format(procname)) + + for pid in pids.split('\n'): + pid = pid.strip() + if pid: + passed,_ = cmd_utils.run_adb_shell_command('kill {}'.format(pid)) + time.sleep(1) + +def parse_time_to_milliseconds(time: str) -> int: + """Parses the time string to milliseconds.""" + # Example: +1s56ms, +56ms + regex = r'\+((?P<second>\d+?)s)?(?P<millisecond>\d+?)ms' + result = re.search(regex, time) + second = 0 + if result.group('second'): + second = int(result.group('second')) + ms = int(result.group('millisecond')) + return second * 1000 + ms + +def blocking_wait_for_logcat_displayed_time(timestamp: datetime.datetime, + package: str, + timeout: int) -> Optional[int]: + """Parses the displayed time in the logcat. + + Returns: + the displayed time. + """ + pattern = re.compile('.*ActivityTaskManager: Displayed {}.*'.format(package)) + # 2019-07-02 22:28:34.469453349 -> 2019-07-02 22:28:34.469453 + timestamp = datetime.datetime.strptime(timestamp[:-3], + '%Y-%m-%d %H:%M:%S.%f') + timeout_dt = timestamp + datetime.timedelta(0, timeout) + # 2019-07-01 14:54:21.946 27365 27392 I ActivityTaskManager: + # Displayed com.android.settings/.Settings: +927ms + result = logcat_utils.blocking_wait_for_logcat_pattern(timestamp, + pattern, + timeout_dt) + if not result or not '+' in result: + return None + displayed_time = result[result.rfind('+'):] + + return parse_time_to_milliseconds(displayed_time) + +def delete_file_on_device(file_path: str) -> None: + """ Deletes a file on the device. """ + cmd_utils.run_adb_shell_command( + "[[ -f '{file_path}' ]] && rm -f '{file_path}' || " + "exit 0".format(file_path=file_path)) + +def set_prop(property: str, value: str) -> None: + """ Sets property using adb shell. """ + cmd_utils.run_adb_shell_command('setprop "{property}" "{value}"'.format( + property=property, value=value)) + +def pull_file(device_file_path: str, output_file_path: str) -> None: + """ Pulls file from device to output """ + cmd_utils.run_shell_command('adb pull "{device_file_path}" "{output_file_path}"'. + format(device_file_path=device_file_path, + output_file_path=output_file_path)) diff --git a/startop/scripts/app_startup/lib/adb_utils_test.py b/startop/scripts/app_startup/lib/adb_utils_test.py new file mode 100644 index 000000000000..e590fed568e3 --- /dev/null +++ b/startop/scripts/app_startup/lib/adb_utils_test.py @@ -0,0 +1,16 @@ +import adb_utils + +# pip imports +import pytest + +def test_parse_time_to_milliseconds(): + # Act + result1 = adb_utils.parse_time_to_milliseconds('+1s7ms') + result2 = adb_utils.parse_time_to_milliseconds('+523ms') + + # Assert + assert result1 == 1007 + assert result2 == 523 + +if __name__ == '__main__': + pytest.main() diff --git a/startop/scripts/app_startup/lib/app_runner.py b/startop/scripts/app_startup/lib/app_runner.py new file mode 100644 index 000000000000..78873fa51ab5 --- /dev/null +++ b/startop/scripts/app_startup/lib/app_runner.py @@ -0,0 +1,266 @@ +# Copyright 2019, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Class to run an app.""" +import os +import sys +from typing import Optional, List, Tuple + +# local import +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__))))) + +import app_startup.lib.adb_utils as adb_utils +import lib.cmd_utils as cmd_utils +import lib.print_utils as print_utils + +class AppRunnerListener(object): + """Interface for lisenter of AppRunner. """ + + def preprocess(self) -> None: + """Preprocess callback to initialized before the app is running. """ + pass + + def postprocess(self, pre_launch_timestamp: str) -> None: + """Postprocess callback to cleanup after the app is running. + + param: + 'pre_launch_timestamp': indicates the timestamp when the app is + launching.. """ + pass + + def metrics_selector(self, am_start_output: str, + pre_launch_timestamp: str) -> None: + """A metrics selection callback that waits for the desired metrics to + show up in logcat. + params: + 'am_start_output': indicates the output of app startup. + 'pre_launch_timestamp': indicates the timestamp when the app is + launching. + returns: + a string in the format of "<metric>=<value>\n<metric>=<value>\n..." + for further parsing. For example "TotalTime=123\nDisplayedTime=121". + Return an empty string if no metrics need to be parsed further. + """ + pass + +class AppRunner(object): + """ Class to run an app. """ + # static variables + DIR = os.path.abspath(os.path.dirname(__file__)) + APP_STARTUP_DIR = os.path.dirname(DIR) + IORAP_COMMON_BASH_SCRIPT = os.path.realpath(os.path.join(DIR, + '../../iorap/common')) + DEFAULT_TIMEOUT = 30 # seconds + + def __init__(self, + package: str, + activity: Optional[str], + compiler_filter: Optional[str], + timeout: Optional[int], + simulate: bool): + self.package = package + self.simulate = simulate + + # If the argument activity is None, try to set it. + self.activity = activity + if self.simulate: + self.activity = 'act' + if self.activity is None: + self.activity = AppRunner.get_activity(self.package) + + self.compiler_filter = compiler_filter + self.timeout = timeout if timeout else AppRunner.DEFAULT_TIMEOUT + + self.listeners = [] + + def add_callbacks(self, listener: AppRunnerListener): + self.listeners.append(listener) + + def remove_callbacks(self, listener: AppRunnerListener): + self.listeners.remove(listener) + + @staticmethod + def get_activity(package: str) -> str: + """ Tries to set the activity based on the package. """ + passed, activity = cmd_utils.run_shell_func( + AppRunner.IORAP_COMMON_BASH_SCRIPT, + 'get_activity_name', + [package]) + + if not passed or not activity: + raise ValueError( + 'Activity name could not be found, invalid package name?!') + + return activity + + def configure_compiler_filter(self) -> bool: + """Configures compiler filter (e.g. speed). + + Returns: + A bool indicates whether configure of compiler filer succeeds or not. + """ + if not self.compiler_filter: + print_utils.debug_print('No --compiler-filter specified, don\'t' + ' need to force it.') + return True + + passed, current_compiler_filter_info = \ + cmd_utils.run_shell_command( + '{} --package {}'.format(os.path.join(AppRunner.APP_STARTUP_DIR, + 'query_compiler_filter.py'), + self.package)) + + if passed != 0: + return passed + + # TODO: call query_compiler_filter directly as a python function instead of + # these shell calls. + current_compiler_filter, current_reason, current_isa = \ + current_compiler_filter_info.split(' ') + print_utils.debug_print('Compiler Filter={} Reason={} Isa={}'.format( + current_compiler_filter, current_reason, current_isa)) + + # Don't trust reasons that aren't 'unknown' because that means + # we didn't manually force the compilation filter. + # (e.g. if any automatic system-triggered compilations are not unknown). + if current_reason != 'unknown' or \ + current_compiler_filter != self.compiler_filter: + passed, _ = adb_utils.run_shell_command('{}/force_compiler_filter ' + '--compiler-filter "{}" ' + '--package "{}"' + ' --activity "{}'. + format(AppRunner.APP_STARTUP_DIR, + self.compiler_filter, + self.package, + self.activity)) + else: + adb_utils.debug_print('Queried compiler-filter matched requested ' + 'compiler-filter, skip forcing.') + passed = False + return passed + + def run(self) -> Optional[List[Tuple[str]]]: + """Runs an app. + + Returns: + A list of (metric, value) tuples. + """ + print_utils.debug_print('==========================================') + print_utils.debug_print('===== START =====') + print_utils.debug_print('==========================================') + # Run the preprocess. + for listener in self.listeners: + listener.preprocess() + + # Ensure the APK is currently compiled with whatever we passed in + # via --compiler-filter. + # No-op if this option was not passed in. + if not self.configure_compiler_filter(): + print_utils.error_print('Compiler filter configuration failed!') + return None + + pre_launch_timestamp = adb_utils.logcat_save_timestamp() + # Launch the app. + results = self.launch_app(pre_launch_timestamp) + + # Run the postprocess. + for listener in self.listeners: + listener.postprocess(pre_launch_timestamp) + + return results + + def launch_app(self, pre_launch_timestamp: str) -> Optional[List[Tuple[str]]]: + """ Launches the app. + + Returns: + A list of (metric, value) tuples. + """ + print_utils.debug_print('Running with timeout {}'.format(self.timeout)) + + passed, am_start_output = cmd_utils.run_shell_command('timeout {timeout} ' + '"{DIR}/launch_application" ' + '"{package}" ' + '"{activity}"'. + format(timeout=self.timeout, + DIR=AppRunner.APP_STARTUP_DIR, + package=self.package, + activity=self.activity)) + if not passed and not self.simulate: + return None + + return self.wait_for_app_finish(pre_launch_timestamp, am_start_output) + + def wait_for_app_finish(self, + pre_launch_timestamp: str, + am_start_output: str) -> Optional[List[Tuple[str]]]: + """ Wait for app finish and all metrics are shown in logcat. + + Returns: + A list of (metric, value) tuples. + """ + if self.simulate: + return [('TotalTime', '123')] + + ret = [] + for listener in self.listeners: + output = listener.metrics_selector(am_start_output, + pre_launch_timestamp) + ret = ret + AppRunner.parse_metrics_output(output) + + return ret + + @staticmethod + def parse_metrics_output(input: str) -> List[ + Tuple[str, str, str]]: + """Parses output of app startup to metrics and corresponding values. + + It converts 'a=b\nc=d\ne=f\n...' into '[(a,b,''),(c,d,''),(e,f,'')]' + + Returns: + A list of tuples that including metric name, metric value and rest info. + """ + all_metrics = [] + for line in input.split('\n'): + if not line: + continue + splits = line.split('=') + if len(splits) < 2: + print_utils.error_print('Bad line "{}"'.format(line)) + continue + metric_name = splits[0] + metric_value = splits[1] + rest = splits[2] if len(splits) > 2 else '' + if rest: + print_utils.error_print('Corrupt line "{}"'.format(line)) + print_utils.debug_print('metric: "{metric_name}", ' + 'value: "{metric_value}" '. + format(metric_name=metric_name, + metric_value=metric_value)) + + all_metrics.append((metric_name, metric_value)) + return all_metrics + + @staticmethod + def parse_total_time( am_start_output: str) -> Optional[str]: + """Parses the total time from 'adb shell am start pkg' output. + + Returns: + the total time of app startup. + """ + for line in am_start_output.split('\n'): + if 'TotalTime:' in line: + return line[len('TotalTime:'):].strip() + return None + diff --git a/startop/scripts/app_startup/lib/app_runner_test.py b/startop/scripts/app_startup/lib/app_runner_test.py new file mode 100644 index 000000000000..33d233b03aab --- /dev/null +++ b/startop/scripts/app_startup/lib/app_runner_test.py @@ -0,0 +1,104 @@ +# Copyright 2019, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Unit tests for the AppRunner.""" +import os +import sys +from pathlib import Path + +from app_runner import AppRunner, AppRunnerListener +from mock import Mock, call, patch + +# The path is "frameworks/base/startop/scripts/" +sys.path.append(Path(os.path.realpath(__file__)).parents[2]) +import lib.cmd_utils as cmd_utils + +class AppRunnerTestListener(AppRunnerListener): + def preprocess(self) -> None: + cmd_utils.run_shell_command('pre'), + + def postprocess(self, pre_launch_timestamp: str) -> None: + cmd_utils.run_shell_command('post'), + + def metrics_selector(self, am_start_output: str, + pre_launch_timestamp: str) -> None: + return 'TotalTime=123\n' + +RUNNER = AppRunner(package='music', + activity='MainActivity', + compiler_filter='speed', + timeout=None, + simulate=False) + + + +def test_configure_compiler_filter(): + with patch('lib.cmd_utils.run_shell_command', + new_callable=Mock) as mock_run_shell_command: + mock_run_shell_command.return_value = (True, 'speed arm64 kUpToDate') + + RUNNER.configure_compiler_filter() + + calls = [call(os.path.realpath( + os.path.join(RUNNER.DIR, + '../query_compiler_filter.py')) + ' --package music')] + mock_run_shell_command.assert_has_calls(calls) + +def test_parse_metrics_output(): + input = 'a1=b1\nc1=d1\ne1=f1' + ret = RUNNER.parse_metrics_output(input) + + assert ret == [('a1', 'b1'), ('c1', 'd1'), ('e1', 'f1')] + +def _mocked_run_shell_command(*args, **kwargs): + if args[0] == 'adb shell "date -u +\'%Y-%m-%d %H:%M:%S.%N\'"': + return (True, "2019-07-02 23:20:06.972674825") + elif args[0] == 'adb shell ps | grep "music" | awk \'{print $2;}\'': + return (True, '9999') + else: + return (True, 'a1=b1\nc1=d1=d2\ne1=f1') + +@patch('app_startup.lib.adb_utils.blocking_wait_for_logcat_displayed_time') +@patch('lib.cmd_utils.run_shell_command') +def test_run(mock_run_shell_command, + mock_blocking_wait_for_logcat_displayed_time): + mock_run_shell_command.side_effect = _mocked_run_shell_command + mock_blocking_wait_for_logcat_displayed_time.return_value = 123 + + test_listener = AppRunnerTestListener() + RUNNER.add_callbacks(test_listener) + + result = RUNNER.run() + + RUNNER.remove_callbacks(test_listener) + + calls = [call('pre'), + call(os.path.realpath( + os.path.join(RUNNER.DIR, + '../query_compiler_filter.py')) + + ' --package music'), + call('adb shell "date -u +\'%Y-%m-%d %H:%M:%S.%N\'"'), + call( + 'timeout {timeout} "{DIR}/launch_application" "{package}" "{activity}"' + .format(timeout=30, + DIR=os.path.realpath(os.path.dirname(RUNNER.DIR)), + package='music', + activity='MainActivity', + timestamp='2019-07-02 23:20:06.972674825')), + call('post') + ] + mock_run_shell_command.assert_has_calls(calls) + assert result == [('TotalTime', '123')] + assert len(RUNNER.listeners) == 0
\ No newline at end of file diff --git a/startop/scripts/app_startup/lib/args_utils.py b/startop/scripts/app_startup/lib/args_utils.py new file mode 100644 index 000000000000..080f3b53157b --- /dev/null +++ b/startop/scripts/app_startup/lib/args_utils.py @@ -0,0 +1,77 @@ +import itertools +import os +import sys +from typing import Any, Callable, Dict, Iterable, List, NamedTuple, Tuple, \ + TypeVar, Optional + +# local import +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__))))) +import lib.print_utils as print_utils + +T = TypeVar('T') +NamedTupleMeta = Callable[ + ..., T] # approximation of a (S : NamedTuple<T> where S() == T) metatype. +FilterFuncType = Callable[[NamedTuple], bool] + +def dict_lookup_any_key(dictionary: dict, *keys: List[Any]): + for k in keys: + if k in dictionary: + return dictionary[k] + + + print_utils.debug_print("None of the keys {} were in the dictionary".format( + keys)) + return [None] + +def generate_run_combinations(named_tuple: NamedTupleMeta[T], + opts_dict: Dict[str, List[Optional[object]]], + loop_count: int = 1) -> Iterable[T]: + """ + Create all possible combinations given the values in opts_dict[named_tuple._fields]. + + :type T: type annotation for the named_tuple type. + :param named_tuple: named tuple type, whose fields are used to make combinations for + :param opts_dict: dictionary of keys to value list. keys correspond to the named_tuple fields. + :param loop_count: number of repetitions. + :return: an iterable over named_tuple instances. + """ + combinations_list = [] + for k in named_tuple._fields: + # the key can be either singular or plural , e.g. 'package' or 'packages' + val = dict_lookup_any_key(opts_dict, k, k + "s") + + # treat {'x': None} key value pairs as if it was [None] + # otherwise itertools.product throws an exception about not being able to iterate None. + combinations_list.append(val or [None]) + + print_utils.debug_print("opts_dict: ", opts_dict) + print_utils.debug_print_nd("named_tuple: ", named_tuple) + print_utils.debug_print("combinations_list: ", combinations_list) + + for i in range(loop_count): + for combo in itertools.product(*combinations_list): + yield named_tuple(*combo) + +def filter_run_combinations(named_tuple: NamedTuple, + filters: List[FilterFuncType]) -> bool: + for filter in filters: + if filter(named_tuple): + return False + return True + +def generate_group_run_combinations(run_combinations: Iterable[NamedTuple], + dst_nt: NamedTupleMeta[T]) \ + -> Iterable[Tuple[T, Iterable[NamedTuple]]]: + def group_by_keys(src_nt): + src_d = src_nt._asdict() + # now remove the keys that aren't legal in dst. + for illegal_key in set(src_d.keys()) - set(dst_nt._fields): + if illegal_key in src_d: + del src_d[illegal_key] + + return dst_nt(**src_d) + + for args_list_it in itertools.groupby(run_combinations, group_by_keys): + (group_key_value, args_it) = args_list_it + yield (group_key_value, args_it) diff --git a/startop/scripts/app_startup/lib/args_utils_test.py b/startop/scripts/app_startup/lib/args_utils_test.py new file mode 100644 index 000000000000..4b7e0fa20627 --- /dev/null +++ b/startop/scripts/app_startup/lib/args_utils_test.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# +# Copyright 2018, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Unit tests for the args_utils.py script.""" + +import typing + +import args_utils + +def generate_run_combinations(*args): + # expand out the generator values so that assert x == y works properly. + return [i for i in args_utils.generate_run_combinations(*args)] + +def test_generate_run_combinations(): + blank_nd = typing.NamedTuple('Blank') + assert generate_run_combinations(blank_nd, {}, 1) == [()], "empty" + assert generate_run_combinations(blank_nd, {'a': ['a1', 'a2']}) == [ + ()], "empty filter" + a_nd = typing.NamedTuple('A', [('a', str)]) + assert generate_run_combinations(a_nd, {'a': None}) == [(None,)], "None" + assert generate_run_combinations(a_nd, {'a': ['a1', 'a2']}) == [('a1',), ( + 'a2',)], "one item" + assert generate_run_combinations(a_nd, + {'a': ['a1', 'a2'], 'b': ['b1', 'b2']}) == [ + ('a1',), ('a2',)], \ + "one item filter" + assert generate_run_combinations(a_nd, {'a': ['a1', 'a2']}, 2) == [('a1',), ( + 'a2',), ('a1',), ('a2',)], "one item" + ab_nd = typing.NamedTuple('AB', [('a', str), ('b', str)]) + assert generate_run_combinations(ab_nd, + {'a': ['a1', 'a2'], + 'b': ['b1', 'b2']}) == [ab_nd('a1', 'b1'), + ab_nd('a1', 'b2'), + ab_nd('a2', 'b1'), + ab_nd('a2', 'b2')], \ + "two items" + + assert generate_run_combinations(ab_nd, + {'as': ['a1', 'a2'], + 'bs': ['b1', 'b2']}) == [ab_nd('a1', 'b1'), + ab_nd('a1', 'b2'), + ab_nd('a2', 'b1'), + ab_nd('a2', 'b2')], \ + "two items plural" diff --git a/startop/scripts/app_startup/lib/common b/startop/scripts/app_startup/lib/common index 043d8550b64b..bedaa1e10288 100755 --- a/startop/scripts/app_startup/lib/common +++ b/startop/scripts/app_startup/lib/common @@ -1,4 +1,17 @@ #!/bin/bash +# Copyright 2018, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. if [[ -z $ANDROID_BUILD_TOP ]]; then echo "Please run source build/envsetup.sh first" >&2 @@ -49,5 +62,137 @@ get_activity_name() { local activity_line="$(adb shell cmd package query-activities --brief -a android.intent.action.MAIN -c android.intent.category.LAUNCHER | grep "$package/")" IFS="/" read -a array <<< "$activity_line" local activity_name="${array[1]}" + + # Activities starting with '.' are shorthand for having their package name prefixed. + if [[ $activity_name == .* ]]; then + activity_name="${package}${activity_name}" + fi echo "$activity_name" } + +# Use with logcat_from_timestamp to skip all past log-lines. +logcat_save_timestamp() { + adb shell 'date -u +"%Y-%m-%d %H:%M:%S.%N"' +} + +# Roll forward logcat to only show events +# since the specified timestamp. +# +# i.e. don't look at historical logcat, +# only look at FUTURE logcat. +# +# First use 'logcat_save_timestamp' +# Then do whatever action you want. +# Then use 'logcat_from_timestamp_bg $timestamp' +logcat_from_timestamp_bg() { + local timestamp="$1" + shift # drop timestamp from args. + verbose_print adb logcat -T \"$timestamp\" \"$@\" + adb logcat -v UTC -T "$timestamp" "$@" & + logcat_from_timestamp_pid=$! +} + +# Starting at timestamp $2, wait until we seen pattern $3 +# or until a timeout happens in $1 seconds. +# If successful, also echo the line that matched the pattern. +# +# Set VERBOSE_LOGCAT=1 to debug every line of logcat it tries to parse. +logcat_select_pattern() { + local timeout="$1" + local timestamp="$2" + local pattern="$3" + + local logcat_fd + + coproc logcat_fd { + kill_children_quietly() { + kill "$logcat_pidd" + wait "$logcat_pidd" 2>/dev/null + } + + trap 'kill_children_quietly' EXIT # kill logcat when this coproc is killed. + + # run logcat in the background so it can be killed. + logcat_from_timestamp_bg "$timestamp" + logcat_pidd=$logcat_from_timestamp_pid + wait "$logcat_pidd" + } + local logcat_pid="$!" + verbose_print "[LOGCAT] Spawn pid $logcat_pid" + + local timeout_ts="$(date -d "now + ${timeout} seconds" '+%s')" + local now_ts="0" + + local return_code=1 + + verbose_print "logcat_wait_for_pattern begin" + + while read -t "$timeout" -r -u "${logcat_fd[0]}" logcat_output; do + if (( $VERBOSE_LOGCAT )); then + verbose_print "LOGCAT: $logcat_output" + fi + if [[ "$logcat_output:" == *"$pattern"* ]]; then + verbose_print "LOGCAT: " "$logcat_output" + verbose_print "WE DID SEE PATTERN" '<<' "$pattern" '>>.' + echo "$logcat_output" + return_code=0 + break + fi + now_ts="$(date -d "now" '+%s')" + if (( now_ts >= timeout_ts )); then + verbose_print "DID TIMEOUT BEFORE SEEING ANYTHING (timeout=$timeout seconds) " '<<' "$pattern" '>>.' + break + fi + done + + # Don't leave logcat lying around since it will keep going. + kill "$logcat_pid" + # Suppress annoying 'Terminated...' message. + wait "$logcat_pid" 2>/dev/null + + verbose_print "[LOGCAT] $logcat_pid should be killed" + + return $return_code +} + +# Starting at timestamp $2, wait until we seen pattern $3 +# or until a timeout happens in $1 seconds. +# +# Set VERBOSE_LOGCAT=1 to debug every line of logcat it tries to parse. +logcat_wait_for_pattern() { + logcat_select_pattern "$@" > /dev/null +} + +# Starting at timestamp $2, wait until we seen pattern $3 +# or until a timeout happens in $1 seconds. +# If successful, extract with the regular expression pattern in #4 +# and return the first capture group. +# +# Set VERBOSE_LOGCAT=1 to debug every line of logcat it tries to parse. +logcat_extract_pattern() { + local timeout="$1" + local timestamp="$2" + local pattern="$3" + local re_pattern="$4" + + local result + local exit_code + + result="$(logcat_select_pattern "$@")" + exit_code=$? + + if [[ $exit_code -ne 0 ]]; then + return $exit_code + fi + + echo "$result" | sed 's/'"$re_pattern"'/\1/g' +} + +# Join array +# FOO=(a b c) +# join_by , "${FOO[@]}" #a,b,c +join_by() { + local IFS="$1" + shift + echo "$*" +} diff --git a/startop/scripts/app_startup/lib/data_frame.py b/startop/scripts/app_startup/lib/data_frame.py new file mode 100644 index 000000000000..20a2308637f2 --- /dev/null +++ b/startop/scripts/app_startup/lib/data_frame.py @@ -0,0 +1,201 @@ +import itertools +from typing import Dict, List + +class DataFrame: + """Table-like class for storing a 2D cells table with named columns.""" + def __init__(self, data: Dict[str, List[object]] = {}): + """ + Create a new DataFrame from a dictionary (keys = headers, + values = columns). + """ + self._headers = [i for i in data.keys()] + self._rows = [] + + row_num = 0 + + def get_data_row(idx): + r = {} + for header, header_data in data.items(): + + if not len(header_data) > idx: + continue + + r[header] = header_data[idx] + + return r + + while True: + row_dict = get_data_row(row_num) + if len(row_dict) == 0: + break + + self._append_row(row_dict.keys(), row_dict.values()) + row_num = row_num + 1 + + def concat_rows(self, other: 'DataFrame') -> None: + """ + In-place concatenate rows of other into the rows of the + current DataFrame. + + None is added in pre-existing cells if new headers + are introduced. + """ + other_datas = other._data_only() + + other_headers = other.headers + + for d in other_datas: + self._append_row(other_headers, d) + + def _append_row(self, headers: List[str], data: List[object]): + new_row = {k:v for k,v in zip(headers, data)} + self._rows.append(new_row) + + for header in headers: + if not header in self._headers: + self._headers.append(header) + + def __repr__(self): +# return repr(self._rows) + repr = "" + + header_list = self._headers_only() + + row_format = u"" + for header in header_list: + row_format = row_format + u"{:>%d}" %(len(header) + 1) + + repr = row_format.format(*header_list) + "\n" + + for v in self._data_only(): + repr = repr + row_format.format(*v) + "\n" + + return repr + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.headers == other.headers and self.data_table == other.data_table + else: + print("wrong instance", other.__class__) + return False + + @property + def headers(self) -> List[str]: + return [i for i in self._headers_only()] + + @property + def data_table(self) -> List[List[object]]: + return list(self._data_only()) + + @property + def data_table_transposed(self) -> List[List[object]]: + return list(self._transposed_data()) + + @property + def data_row_len(self) -> int: + return len(self._rows) + + def data_row_at(self, idx) -> List[object]: + """ + Return a single data row at the specified index (0th based). + + Accepts negative indices, e.g. -1 is last row. + """ + row_dict = self._rows[idx] + l = [] + + for h in self._headers_only(): + l.append(row_dict.get(h)) # Adds None in blank spots. + + return l + + def copy(self) -> 'DataFrame': + """ + Shallow copy of this DataFrame. + """ + return self.repeat(count=0) + + def repeat(self, count: int) -> 'DataFrame': + """ + Returns a new DataFrame where each row of this dataframe is repeated count times. + A repeat of a row is adjacent to other repeats of that same row. + """ + df = DataFrame() + df._headers = self._headers.copy() + + rows = [] + for row in self._rows: + for i in range(count): + rows.append(row.copy()) + + df._rows = rows + + return df + + def merge_data_columns(self, other: 'DataFrame'): + """ + Merge self and another DataFrame by adding the data from other column-wise. + For any headers that are the same, data from 'other' is preferred. + """ + for h in other._headers: + if not h in self._headers: + self._headers.append(h) + + append_rows = [] + + for self_dict, other_dict in itertools.zip_longest(self._rows, other._rows): + if not self_dict: + d = {} + append_rows.append(d) + else: + d = self_dict + + d_other = other_dict + if d_other: + for k,v in d_other.items(): + d[k] = v + + for r in append_rows: + self._rows.append(r) + + def data_row_reduce(self, fnc) -> 'DataFrame': + """ + Reduces the data row-wise by applying the fnc to each row (column-wise). + Empty cells are skipped. + + fnc(Iterable[object]) -> object + fnc is applied over every non-empty cell in that column (descending row-wise). + + Example: + DataFrame({'a':[1,2,3]}).data_row_reduce(sum) == DataFrame({'a':[6]}) + + Returns a new single-row DataFrame. + """ + df = DataFrame() + df._headers = self._headers.copy() + + def yield_by_column(header_key): + for row_dict in self._rows: + val = row_dict.get(header_key) + if val: + yield val + + new_row_dict = {} + for h in df._headers: + cell_value = fnc(yield_by_column(h)) + new_row_dict[h] = cell_value + + df._rows = [new_row_dict] + return df + + def _headers_only(self): + return self._headers + + def _data_only(self): + row_len = len(self._rows) + + for i in range(row_len): + yield self.data_row_at(i) + + def _transposed_data(self): + return zip(*self._data_only())
\ No newline at end of file diff --git a/startop/scripts/app_startup/lib/data_frame_test.py b/startop/scripts/app_startup/lib/data_frame_test.py new file mode 100644 index 000000000000..1cbc1cbe45cb --- /dev/null +++ b/startop/scripts/app_startup/lib/data_frame_test.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 +# +# Copyright 2018, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Unit tests for the data_frame.py script.""" + +from data_frame import DataFrame + +def test_data_frame(): + # trivial empty data frame + df = DataFrame() + assert df.headers == [] + assert df.data_table == [] + assert df.data_table_transposed == [] + + # common case, same number of values in each place. + df = DataFrame({'TotalTime_ms': [1, 2, 3], 'Displayed_ms': [4, 5, 6]}) + assert df.headers == ['TotalTime_ms', 'Displayed_ms'] + assert df.data_table == [[1, 4], [2, 5], [3, 6]] + assert df.data_table_transposed == [(1, 2, 3), (4, 5, 6)] + + # varying num values. + df = DataFrame({'many': [1, 2], 'none': []}) + assert df.headers == ['many', 'none'] + assert df.data_table == [[1, None], [2, None]] + assert df.data_table_transposed == [(1, 2), (None, None)] + + df = DataFrame({'many': [], 'none': [1, 2]}) + assert df.headers == ['many', 'none'] + assert df.data_table == [[None, 1], [None, 2]] + assert df.data_table_transposed == [(None, None), (1, 2)] + + # merge multiple data frames + df = DataFrame() + df.concat_rows(DataFrame()) + assert df.headers == [] + assert df.data_table == [] + assert df.data_table_transposed == [] + + df = DataFrame() + df2 = DataFrame({'TotalTime_ms': [1, 2, 3], 'Displayed_ms': [4, 5, 6]}) + + df.concat_rows(df2) + assert df.headers == ['TotalTime_ms', 'Displayed_ms'] + assert df.data_table == [[1, 4], [2, 5], [3, 6]] + assert df.data_table_transposed == [(1, 2, 3), (4, 5, 6)] + + df = DataFrame({'TotalTime_ms': [1, 2]}) + df2 = DataFrame({'Displayed_ms': [4, 5]}) + + df.concat_rows(df2) + assert df.headers == ['TotalTime_ms', 'Displayed_ms'] + assert df.data_table == [[1, None], [2, None], [None, 4], [None, 5]] + + df = DataFrame({'TotalTime_ms': [1, 2]}) + df2 = DataFrame({'TotalTime_ms': [3, 4], 'Displayed_ms': [5, 6]}) + + df.concat_rows(df2) + assert df.headers == ['TotalTime_ms', 'Displayed_ms'] + assert df.data_table == [[1, None], [2, None], [3, 5], [4, 6]] + + # data_row_at + df = DataFrame({'TotalTime_ms': [1, 2, 3], 'Displayed_ms': [4, 5, 6]}) + assert df.data_row_at(-1) == [3, 6] + assert df.data_row_at(2) == [3, 6] + assert df.data_row_at(1) == [2, 5] + + # repeat + df = DataFrame({'TotalTime_ms': [1], 'Displayed_ms': [4]}) + df2 = DataFrame({'TotalTime_ms': [1, 1, 1], 'Displayed_ms': [4, 4, 4]}) + assert df.repeat(3) == df2 + + # repeat + df = DataFrame({'TotalTime_ms': [1, 1, 1], 'Displayed_ms': [4, 4, 4]}) + assert df.data_row_len == 3 + df = DataFrame({'TotalTime_ms': [1, 1]}) + assert df.data_row_len == 2 + + # repeat + df = DataFrame({'TotalTime_ms': [1, 1, 1], 'Displayed_ms': [4, 4, 4]}) + assert df.data_row_len == 3 + df = DataFrame({'TotalTime_ms': [1, 1]}) + assert df.data_row_len == 2 + + # data_row_reduce + df = DataFrame({'TotalTime_ms': [1, 1, 1], 'Displayed_ms': [4, 4, 4]}) + df_sum = DataFrame({'TotalTime_ms': [3], 'Displayed_ms': [12]}) + assert df.data_row_reduce(sum) == df_sum + + # merge_data_columns + df = DataFrame({'TotalTime_ms': [1, 2, 3]}) + df2 = DataFrame({'Displayed_ms': [3, 4, 5, 6]}) + + df.merge_data_columns(df2) + assert df == DataFrame( + {'TotalTime_ms': [1, 2, 3], 'Displayed_ms': [3, 4, 5, 6]}) + + df = DataFrame({'TotalTime_ms': [1, 2, 3]}) + df2 = DataFrame({'Displayed_ms': [3, 4]}) + + df.merge_data_columns(df2) + assert df == DataFrame( + {'TotalTime_ms': [1, 2, 3], 'Displayed_ms': [3, 4]}) + + df = DataFrame({'TotalTime_ms': [1, 2, 3]}) + df2 = DataFrame({'TotalTime_ms': [10, 11]}) + + df.merge_data_columns(df2) + assert df == DataFrame({'TotalTime_ms': [10, 11, 3]}) + + df = DataFrame({'TotalTime_ms': []}) + df2 = DataFrame({'TotalTime_ms': [10, 11]}) + + df.merge_data_columns(df2) + assert df == DataFrame({'TotalTime_ms': [10, 11]}) diff --git a/startop/scripts/app_startup/lib/perfetto_trace_collector.py b/startop/scripts/app_startup/lib/perfetto_trace_collector.py new file mode 100644 index 000000000000..9ffb3494da49 --- /dev/null +++ b/startop/scripts/app_startup/lib/perfetto_trace_collector.py @@ -0,0 +1,166 @@ +# Copyright 2019, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Class to collector perfetto trace.""" +import datetime +import os +import re +import sys +import time +from datetime import timedelta +from typing import Optional, List, Tuple + +# global variables +DIR = os.path.abspath(os.path.dirname(__file__)) + +sys.path.append(os.path.dirname(os.path.dirname(DIR))) + +import app_startup.lib.adb_utils as adb_utils +from app_startup.lib.app_runner import AppRunner, AppRunnerListener +import lib.print_utils as print_utils +import lib.logcat_utils as logcat_utils +import iorap.lib.iorapd_utils as iorapd_utils + +class PerfettoTraceCollector(AppRunnerListener): + """ Class to collect perfetto trace. + + To set trace duration of perfetto, change the 'trace_duration_ms'. + To pull the generated perfetto trace on device, set the 'output'. + """ + TRACE_FILE_SUFFIX = 'perfetto_trace.pb' + TRACE_DURATION_PROP = 'iorapd.perfetto.trace_duration_ms' + MS_PER_SEC = 1000 + DEFAULT_TRACE_DURATION = timedelta(milliseconds=5000) # 5 seconds + _COLLECTOR_TIMEOUT_MULTIPLIER = 10 # take the regular timeout and multiply + + def __init__(self, + package: str, + activity: Optional[str], + compiler_filter: Optional[str], + timeout: Optional[int], + simulate: bool, + trace_duration: timedelta = DEFAULT_TRACE_DURATION, + save_destination_file_path: Optional[str] = None): + """ Initialize the perfetto trace collector. """ + self.app_runner = AppRunner(package, + activity, + compiler_filter, + timeout, + simulate) + self.app_runner.add_callbacks(self) + + self.trace_duration = trace_duration + self.save_destination_file_path = save_destination_file_path + + def purge_file(self, suffix: str) -> None: + print_utils.debug_print('iorapd-perfetto: purge file in ' + + self._get_remote_path()) + adb_utils.delete_file_on_device(self._get_remote_path()) + + def run(self) -> Optional[List[Tuple[str]]]: + """Runs an app. + + Returns: + A list of (metric, value) tuples. + """ + return self.app_runner.run() + + def preprocess(self): + # Sets up adb environment. + adb_utils.root() + adb_utils.disable_selinux() + time.sleep(1) + + # Kill any existing process of this app + adb_utils.pkill(self.app_runner.package) + + # Remove existing trace and compiler files + self.purge_file(PerfettoTraceCollector.TRACE_FILE_SUFFIX) + + # Set perfetto trace duration prop to milliseconds. + adb_utils.set_prop(PerfettoTraceCollector.TRACE_DURATION_PROP, + int(self.trace_duration.total_seconds()* + PerfettoTraceCollector.MS_PER_SEC)) + + if not iorapd_utils.stop_iorapd(): + raise RuntimeError('Cannot stop iorapd!') + + if not iorapd_utils.enable_iorapd_perfetto(): + raise RuntimeError('Cannot enable perfetto!') + + if not iorapd_utils.disable_iorapd_readahead(): + raise RuntimeError('Cannot disable readahead!') + + if not iorapd_utils.start_iorapd(): + raise RuntimeError('Cannot start iorapd!') + + # Drop all caches to get cold starts. + adb_utils.vm_drop_cache() + + def postprocess(self, pre_launch_timestamp: str): + # Kill any existing process of this app + adb_utils.pkill(self.app_runner.package) + + iorapd_utils.disable_iorapd_perfetto() + + if self.save_destination_file_path is not None: + adb_utils.pull_file(self._get_remote_path(), + self.save_destination_file_path) + + def metrics_selector(self, am_start_output: str, + pre_launch_timestamp: str) -> str: + """Parses the metric after app startup by reading from logcat in a blocking + manner until all metrics have been found". + + Returns: + An empty string because the metric needs no further parsing. + """ + if not self._wait_for_perfetto_trace(pre_launch_timestamp): + raise RuntimeError('Could not save perfetto app trace file!') + + return '' + + def _wait_for_perfetto_trace(self, pre_launch_timestamp) -> Optional[str]: + """ Waits for the perfetto trace being saved to file. + + The string is in the format of r".*Perfetto TraceBuffer saved to file: + <file path>.*" + + Returns: + the string what the program waits for. If the string doesn't show up, + return None. + """ + pattern = re.compile(r'.*Perfetto TraceBuffer saved to file: {}.*'. + format(self._get_remote_path())) + + # The pre_launch_timestamp is longer than what the datetime can parse. Trim + # last three digits to make them align. For example: + # 2019-07-02 23:20:06.972674825999 -> 2019-07-02 23:20:06.972674825 + assert len(pre_launch_timestamp) == len('2019-07-02 23:20:06.972674825') + timestamp = datetime.datetime.strptime(pre_launch_timestamp[:-3], + '%Y-%m-%d %H:%M:%S.%f') + + # The timeout of perfetto trace is longer than the normal app run timeout. + timeout_dt = self.app_runner.timeout * PerfettoTraceCollector._COLLECTOR_TIMEOUT_MULTIPLIER + timeout_end = timestamp + datetime.timedelta(seconds=timeout_dt) + + return logcat_utils.blocking_wait_for_logcat_pattern(timestamp, + pattern, + timeout_end) + + def _get_remote_path(self): + # For example: android.music%2Fmusic.TopLevelActivity.perfetto_trace.pb + return iorapd_utils._iorapd_path_to_data_file(self.app_runner.package, + self.app_runner.activity, + PerfettoTraceCollector.TRACE_FILE_SUFFIX) diff --git a/startop/scripts/app_startup/lib/perfetto_trace_collector_test.py b/startop/scripts/app_startup/lib/perfetto_trace_collector_test.py new file mode 100644 index 000000000000..8d94fc58bede --- /dev/null +++ b/startop/scripts/app_startup/lib/perfetto_trace_collector_test.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +# +# Copyright 2019, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Unit tests for the data_frame.py script.""" +import os +import sys +from pathlib import Path +from datetime import timedelta + +from mock import call, patch +from perfetto_trace_collector import PerfettoTraceCollector + +sys.path.append(Path(os.path.realpath(__file__)).parents[2]) +from app_startup.lib.app_runner import AppRunner + +RUNNER = PerfettoTraceCollector(package='music', + activity='MainActivity', + compiler_filter=None, + timeout=10, + simulate=False, + trace_duration = timedelta(milliseconds=1000), + # No actual file will be created. Just to + # check the command. + save_destination_file_path='/tmp/trace.pb') + +def _mocked_run_shell_command(*args, **kwargs): + if args[0] == 'adb shell ps | grep "music" | awk \'{print $2;}\'': + return (True, '9999') + else: + return (True, '') + +@patch('lib.logcat_utils.blocking_wait_for_logcat_pattern') +@patch('lib.cmd_utils.run_shell_command') +def test_perfetto_trace_collector_preprocess(mock_run_shell_command, + mock_blocking_wait_for_logcat_pattern): + mock_run_shell_command.side_effect = _mocked_run_shell_command + mock_blocking_wait_for_logcat_pattern.return_value = "Succeed!" + + RUNNER.preprocess() + + calls = [call('adb root'), + call('adb shell "getenforce"'), + call('adb shell "setenforce 0"'), + call('adb shell "stop"'), + call('adb shell "start"'), + call('adb wait-for-device'), + call('adb shell ps | grep "music" | awk \'{print $2;}\''), + call('adb shell "kill 9999"'), + call( + 'adb shell "[[ -f \'/data/misc/iorapd/music%2FMainActivity.perfetto_trace.pb\' ]] ' + '&& rm -f \'/data/misc/iorapd/music%2FMainActivity.perfetto_trace.pb\' || exit 0"'), + call('adb shell "setprop "iorapd.perfetto.trace_duration_ms" "1000""'), + call( + 'bash -c "source {}; iorapd_stop"'.format( + AppRunner.IORAP_COMMON_BASH_SCRIPT)), + call( + 'bash -c "source {}; iorapd_perfetto_enable"'.format( + AppRunner.IORAP_COMMON_BASH_SCRIPT)), + call( + 'bash -c "source {}; iorapd_readahead_disable"'.format( + AppRunner.IORAP_COMMON_BASH_SCRIPT)), + call( + 'bash -c "source {}; iorapd_start"'.format( + AppRunner.IORAP_COMMON_BASH_SCRIPT)), + call('adb shell "echo 3 > /proc/sys/vm/drop_caches"')] + + mock_run_shell_command.assert_has_calls(calls) + +@patch('lib.logcat_utils.blocking_wait_for_logcat_pattern') +@patch('lib.cmd_utils.run_shell_command') +def test_perfetto_trace_collector_postprocess(mock_run_shell_command, + mock_blocking_wait_for_logcat_pattern): + mock_run_shell_command.side_effect = _mocked_run_shell_command + mock_blocking_wait_for_logcat_pattern.return_value = "Succeed!" + + RUNNER.postprocess('2019-07-02 23:20:06.972674825') + + calls = [call('adb shell ps | grep "music" | awk \'{print $2;}\''), + call('adb shell "kill 9999"'), + call( + 'bash -c "source {}; iorapd_perfetto_disable"'.format( + AppRunner.IORAP_COMMON_BASH_SCRIPT)), + call('adb pull ' + '"/data/misc/iorapd/music%2FMainActivity.perfetto_trace.pb" ' + '"/tmp/trace.pb"')] + + mock_run_shell_command.assert_has_calls(calls) diff --git a/startop/scripts/app_startup/parse_metrics b/startop/scripts/app_startup/parse_metrics new file mode 100755 index 000000000000..3fa1462bc56e --- /dev/null +++ b/startop/scripts/app_startup/parse_metrics @@ -0,0 +1,215 @@ +#!/bin/bash +# +# Copyright 2019, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +usage() { + cat <<EOF +Usage: launch_application package activity | parse_metrics --package <name> --timestamp <timestamp> [OPTIONS]... + + Reads from stdin the result of 'am start' metrics. May also parse logcat + for additional metrics. + + Output form: + + MetricName_unit=numeric_value + MetricName2_unit=numeric_value2 + + This may block until all desired metrics are parsed from logcat. + To get a list of metrics without doing real parsing, use --simulate. + + To add package-specific metrics, add a script called 'metrics/\$full_package_name' + that exposes additional metrics in same way as above. + + (required) + -p, --package <name> package of the app that is being used + -ts, --timestamp <name> logcat timestamp [only looks at logcat entries after this timestamp]. + + (optional) + -s, --simulate prints dummy values instead of real metrics + -a, --activity <name> activity to use (default: inferred) + -h, --help usage information (this) + -v, --verbose enable extra verbose printing + -t, --timeout <sec> how many seconds to timeout when trying to wait for logcat to change + -rfd, --reportfullydrawn wait for report fully drawn (default: off) +EOF +} + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "$DIR/lib/common" + +report_fully_drawn="n" +package="" +activity="" +timeout=5 +simulate="n" +parse_arguments() { + while [[ $# -gt 0 ]]; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + -p|--package) + package="$2" + shift + ;; + -a|--activity) + activity="$2" + shift + ;; + -v|--verbose) + export verbose="y" + ;; + -t|--timeout) + timeout="$2" + shift + ;; + -ts|--timestamp) + timestamp="$2" + shift + ;; + -s|--simulate) + simulate="y" + ;; + -rfd|--reportfullydrawn) + report_fully_drawn="y" + ;; + + + *) + echo "Invalid argument: $1" >&2 + exit 1 + esac + shift + done +} + +# Main entry point +if [[ $# -eq 0 ]]; then + usage + exit 1 +else + parse_arguments "$@" + + # if we do not have have package exit early with an error + [[ "$package" == "" ]] && echo "--package not specified" 1>&2 && exit 64 + + # ignore timestamp for --simulate. it's optional. + if [[ $simulate == y ]]; then + timestamp=0 + fi + + # if we do not have timestamp, exit early with an error + [[ "$timestamp" == "" ]] && echo "--timestamp not specified" 1>&2 && exit 64 + + if [[ "$activity" == "" ]] && [[ "$simulate" != "y" ]]; then + activity="$(get_activity_name "$package")" + if [[ "$activity" == "" ]]; then + echo "Activity name could not be found, invalid package name?" 1>&2 + exit 64 + else + verbose_print "Activity name inferred: " "$activity" + fi + fi +fi + +parse_metric_from_logcat() { + local metric_name="$1" + local pattern="$2" + local re_pattern="$3" + local retcode + local result + local sec + local ms + + # parse logcat for 'Displayed...' and that other one... + + # 05-06 14:34:08.854 29460 29481 I ActivityTaskManager: Displayed com.google.android.dialer/.extensions.GoogleDialtactsActivity: +361ms + verbose_print "parse_metric_from_logcat: $re_pattern" + + + echo -ne "$metric_name=" + + if [[ $simulate == y ]]; then + echo "-1" + return 0 + fi + + result="$(logcat_extract_pattern "$timeout" "$timestamp" "$pattern" "$re_pattern")" + retcode=$? + + if [[ $retcode -ne 0 ]]; then + # Timed out before finding the pattern. Could also mean the pattern is wrong. + echo "Parse $re_pattern from logcat TIMED OUT after $timeout seconds." >&2 + echo "-$?" + return $retcode + fi + + # "10s123ms" -> "10s123" + result=${result/ms/} + if [[ $result =~ s ]]; then + ms=${result/*s/} + sec=${result/s*/} + else + sec=0 + ms=$result + fi + ((result=sec*1000+ms)) + + echo "$result" + return $retcode +} + + +total_time="-1" +if [[ $simulate != y ]]; then + verbose_print 'logcat timestamp NOW: ' $(logcat_save_timestamp) + + # parse stdin for 'am start' result + while read -t "$timeout" -r input_line; do + verbose_print 'stdin:' "$input_line" + if [[ $input_line == *TotalTime:* ]]; then + total_time="$(echo "$input_line" | sed 's/TotalTime: \([[:digit:]]\+\)/\1/g')" + # but keep reading the rest from stdin until <EOF> + fi + done +fi + +echo "TotalTime_ms=$total_time" + +# parse logcat for 'Displayed...' and that other one... + +# 05-06 14:34:08.854 29460 29481 I ActivityTaskManager: Displayed com.google.android.dialer/.extensions.GoogleDialtactsActivity: +361ms +pattern="ActivityTaskManager: Displayed ${package}" +re_pattern='.*Displayed[[:blank:]]\+'"${package}"'[/][^[:blank:]]\+[[:blank:]]+\([[:digit:]]\+ms\|[[:digit:]]\+s[[:digit:]]\+ms\).*' + +parse_metric_from_logcat "Displayed_ms" "$pattern" "$re_pattern" + +# Only track ReportFullyDrawn with --reportfullydrawn/-rfd flags +if [[ $report_fully_drawn == y ]]; then + # 01-16 17:31:44.550 11172 11204 I ActivityTaskManager: Fully drawn com.google.android.GoogleCamera/com.android.camera.CameraLauncher: +10s897ms + pattern="ActivityTaskManager: Fully drawn ${package}" + #re_pattern='.*Fully drawn[[:blank:]]\+'"${package}"'[/][^[:blank:]]\+[[:blank:]]+\([[:digit:]]\+\).*' + re_pattern='.*Fully drawn[[:blank:]]\+'"${package}"'[/][^[:blank:]]\+[[:blank:]]+\([[:digit:]]\+ms\|[[:digit:]]\+s[[:digit:]]\+ms\).*' + + parse_metric_from_logcat "Fully_drawn_ms" "$pattern" "$re_pattern" +fi + +# also call into package-specific scripts if there are additional metrics +if [[ -x "$DIR/metrics/$package" ]]; then + source "$DIR/metrics/$package" "$timestamp" +else + verbose_print parse_metrics: no per-package metrics script found at "$DIR/metrics/$package" +fi diff --git a/startop/scripts/app_startup/query_compiler_filter.py b/startop/scripts/app_startup/query_compiler_filter.py index dc97c6641f35..ea14264b4a1c 100755 --- a/startop/scripts/app_startup/query_compiler_filter.py +++ b/startop/scripts/app_startup/query_compiler_filter.py @@ -31,13 +31,15 @@ # import argparse -import sys +import os import re +import sys # TODO: refactor this with a common library file with analyze_metrics.py -import app_startup_runner -from app_startup_runner import _debug_print -from app_startup_runner import execute_arbitrary_command +DIR = os.path.abspath(os.path.dirname(__file__)) +sys.path.append(os.path.dirname(DIR)) +import lib.cmd_utils as cmd_utils +import lib.print_utils as print_utils from typing import List, NamedTuple, Iterable @@ -74,7 +76,11 @@ Dexopt state: x86: [status=quicken] [reason=install] """ %(package, package, package, package) - code, res = execute_arbitrary_command(['adb', 'shell', 'dumpsys', 'package', package], simulate=False, timeout=5) + code, res = cmd_utils.execute_arbitrary_command(['adb', 'shell', 'dumpsys', + 'package', package], + simulate=False, + timeout=5, + shell=False) if code: return res else: @@ -115,7 +121,7 @@ def parse_tab_subtree(label: str, str_lines: List[str], separator=' ', indent=-1 line = str_lines[line_num] current_indent = get_indent_level(line) - _debug_print("INDENT=%d, LINE=%s" %(current_indent, line)) + print_utils.debug_print("INDENT=%d, LINE=%s" %(current_indent, line)) current_label = line.lstrip() @@ -135,7 +141,7 @@ def parse_tab_subtree(label: str, str_lines: List[str], separator=' ', indent=-1 break new_remainder = str_lines[line_num::] - _debug_print("NEW REMAINDER: ", new_remainder) + print_utils.debug_print("NEW REMAINDER: ", new_remainder) parse_tree = ParseTree(label, children) return ParseResult(new_remainder, parse_tree) @@ -159,7 +165,7 @@ def parse_dexopt_state(dumpsys_tree: ParseTree) -> DexoptState: def find_first_compiler_filter(dexopt_state: DexoptState, package: str, instruction_set: str) -> str: lst = find_all_compiler_filters(dexopt_state, package) - _debug_print("all compiler filters: ", lst) + print_utils.debug_print("all compiler filters: ", lst) for compiler_filter_info in lst: if not instruction_set: @@ -180,10 +186,10 @@ def find_all_compiler_filters(dexopt_state: DexoptState, package: str) -> List[C if not package_tree: raise AssertionError("Could not find any package subtree for package %s" %(package)) - _debug_print("package tree: ", package_tree) + print_utils.debug_print("package tree: ", package_tree) for path_tree in find_parse_children(package_tree, "path: "): - _debug_print("path tree: ", path_tree) + print_utils.debug_print("path tree: ", path_tree) matchre = re.compile("([^:]+):\s+\[status=([^\]]+)\]\s+\[reason=([^\]]+)\]") @@ -198,16 +204,16 @@ def find_all_compiler_filters(dexopt_state: DexoptState, package: str) -> List[C def main() -> int: opts = parse_options() - app_startup_runner._debug = opts.debug + cmd_utils._debug = opts.debug if _DEBUG_FORCE is not None: - app_startup_runner._debug = _DEBUG_FORCE - _debug_print("parsed options: ", opts) + cmd_utils._debug = _DEBUG_FORCE + print_utils.debug_print("parsed options: ", opts) # Note: This can often 'fail' if the package isn't actually installed. package_dumpsys = remote_dumpsys_package(opts.package, opts.simulate) - _debug_print("package dumpsys: ", package_dumpsys) + print_utils.debug_print("package dumpsys: ", package_dumpsys) dumpsys_parse_tree = parse_tab_tree(package_dumpsys, package_dumpsys) - _debug_print("parse tree: ", dumpsys_parse_tree) + print_utils.debug_print("parse tree: ", dumpsys_parse_tree) dexopt_state = parse_dexopt_state(dumpsys_parse_tree) filter = find_first_compiler_filter(dexopt_state, opts.package, opts.instruction_set) diff --git a/startop/scripts/app_startup/run_app_with_prefetch b/startop/scripts/app_startup/run_app_with_prefetch index 56bffa85eb90..31f625334b1e 100755 --- a/startop/scripts/app_startup/run_app_with_prefetch +++ b/startop/scripts/app_startup/run_app_with_prefetch @@ -24,7 +24,7 @@ Usage: run_app_with_prefetch --package <name> [OPTIONS]... -v, --verbose enable extra verbose printing -i, --input <file> trace file protobuf (default 'TraceFile.pb') -r, --readahead <mode> cold, warm, fadvise, mlock (default 'warm') - -w, --when <when> aot or jit (default 'aot') + -w, --when <when> aot or jit (default 'jit') -c, --count <count> how many times to run (default 1) -s, --sleep <sec> how long to sleep after readahead -t, --timeout <sec> how many seconds to timeout in between each app run (default 10) @@ -33,8 +33,9 @@ EOF } DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -source "$DIR/lib/common" +source "$DIR/../iorap/common" +report_fully_drawn="n" needs_trace_file="n" input_file="" package="" @@ -43,7 +44,7 @@ count=2 sleep_time=2 timeout=10 output="" # stdout by default -when="aot" +when="jit" parse_arguments() { while [[ $# -gt 0 ]]; do case "$1" in @@ -70,6 +71,10 @@ parse_arguments() { mode="$2" shift ;; + -rfd|--reportfullydrawn) + report_fully_drawn="y" + shift + ;; -c|--count) count="$2" ((count+=1)) @@ -101,6 +106,15 @@ parse_arguments() { esac shift done + + if [[ $when == "aot" ]]; then + # TODO: re-implement aot later for experimenting. + echo "Error: --when $when is unsupported" >&2 + exit 1 + elif [[ $when != "jit" ]]; then + echo "Error: --when must be one of (aot jit)." >&2 + exit 1 + fi } echo_to_output_file() { @@ -176,16 +190,19 @@ if [[ $? -ne 0 ]]; then fi verbose_print "Package was in path '$package_path'" -keep_application_trace_file=n application_trace_file_path="$package_path/TraceFile.pb" trace_file_directory="$package_path" if [[ $needs_trace_file == y ]]; then # system server always passes down the package path in a hardcoded spot. if [[ $when == "jit" ]]; then - verbose_print adb push "$input_file" "$application_trace_file_path" - adb push "$input_file" "$application_trace_file_path" + if ! iorapd_compiler_install_trace_file "$package" "$activity" "$input_file"; then + echo "Error: Failed to install compiled TraceFile.pb for '$package/$activity'" >&2 + exit 1 + fi keep_application_trace_file="y" else + echo "TODO: --readahead=aot is non-functional and needs to be fixed." >&2 + exit 1 # otherwise use a temporary directory to get normal non-jit behavior. trace_file_directory="/data/local/tmp/prefetch/$package" adb shell mkdir -p "$trace_file_directory" @@ -198,7 +215,7 @@ fi # otherwise system server activity hints will kick in # and the new just-in-time app pre-warmup will happen. if [[ $keep_application_trace_file == "n" ]]; then - adb shell "[[ -f '$application_trace_file_path' ]] && rm '$application_trace_file_path'" + iorapd_compiler_purge_trace_file "$package" "$activity" fi # Perform AOT readahead/pinning/etc when an application is about to be launched. @@ -209,6 +226,12 @@ perform_aot() { local the_when="$1" # user: aot, jit local the_mode="$2" # warm, cold, fadvise, mlock, etc. + # iorapd readahead for jit+(mlock/fadvise) + if [[ $the_when == "jit" && $the_mode != 'warm' && $the_mode != 'cold' ]]; then + iorapd_readahead_enable + return 0 + fi + if [[ $the_when != "aot" ]]; then # TODO: just in time implementation.. should probably use system server. return 0 @@ -242,12 +265,25 @@ perform_aot() { fi } -perform_aot_cleanup() { +# Perform cleanup at the end of each loop iteration. +perform_post_launch_cleanup() { local the_when="$1" # user: aot, jit local the_mode="$2" # warm, cold, fadvise, mlock, etc. + local logcat_timestamp="$3" # timestamp from before am start. + local res if [[ $the_when != "aot" ]]; then - # TODO: just in time implementation.. should probably use system server. + if [[ $the_mode != 'warm' && $the_mode != 'cold' ]]; then + # Validate that readahead completes. + # If this fails for some reason, then this will also discard the timing of the run. + iorapd_readahead_wait_until_finished "$package" "$activity" "$logcat_timestamp" "$timeout" + res=$? + + iorapd_readahead_disable + + return $res + fi + # Don't need to do anything for warm or cold. return 0 fi @@ -306,12 +342,87 @@ configure_compiler_filter() { # No-op if this option was not passed in. configure_compiler_filter "$compiler_filter" "$package" "$activity" || exit 1 +# convert 'a=b\nc=d\ne=f\n...' into 'b,d,f,...' +parse_metrics_output_string() { + # single string with newlines in it. + local input="$1" + + local metric_name + local metric_value + local rest + + local all_metrics=() + + # (n1=v1 n2=v2 n3=v3 ...) + readarray -t all_metrics <<< "$input" + + local kv_pair=() + local i + + for i in "${all_metrics[@]}" + do + verbose_print "parse_metrics_output: element '$i'" + # name=value + + IFS='=' read -r metric_name metric_value rest <<< "$i" + + verbose_print "parse_metrics_output: metric_value '$metric_value'" + + # (value1 value2 value3 ...) + all_metrics+=(${metric_value}) + done + + # "value1,value2,value3,..." + join_by ',' "${all_metrics[@]}" +} + +# convert 'a=b\nc=d\ne=f\n... into b,d,f,...' +parse_metrics_output() { + local metric_name + local metric_value + local rest + + local all_metrics=() + + while IFS='=' read -r metric_name metric_value rest; do + verbose_print "metric: $metric_name, value: $metric_value; rest: $rest" + all_metrics+=($metric_value) + done + + join_by ',' "${all_metrics[@]}" +} + +# convert 'a=b\nc=d\ne=f\n... into b,d,f,...' +parse_metrics_header() { + local metric_name + local metric_value + local rest + + local all_metrics=() + + while IFS='=' read -r metric_name metric_value rest; do + verbose_print "metric: $metric_name, value: $metric_value; rest: $rest" + all_metrics+=($metric_name) + done + + join_by ',' "${all_metrics[@]}" +} + +if [[ $report_fully_drawn == y ]]; then + metrics_header="$("$DIR/parse_metrics" --package "$package" --activity "$activity" --simulate --reportfullydrawn | parse_metrics_header)" +else + metrics_header="$("$DIR/parse_metrics" --package "$package" --activity "$activity" --simulate | parse_metrics_header)" +fi + # TODO: This loop logic could probably be moved into app_startup_runner.py for ((i=0;i<count;++i)) do verbose_print "==========================================" verbose_print "==== ITERATION $i ====" verbose_print "==========================================" if [[ $mode != "warm" ]]; then + # The package must be killed **before** we drop caches, otherwise pages will stay resident. + verbose_print "Kill package for non-warm start." + remote_pkill "$package" verbose_print "Drop caches for non-warm start." # Drop all caches to get cold starts. adb shell "echo 3 > /proc/sys/vm/drop_caches" @@ -321,8 +432,15 @@ for ((i=0;i<count;++i)) do verbose_print "Running with timeout $timeout" + pre_launch_timestamp="$(logcat_save_timestamp)" + # TODO: multiple metrics output. - total_time="$(timeout $timeout $DIR/launch_application "$package" "$activity")" + +if [[ $report_fully_drawn == y ]]; then + total_time="$(timeout $timeout "$DIR/launch_application" "$package" "$activity" | "$DIR/parse_metrics" --package "$package" --activity "$activity" --timestamp "$pre_launch_timestamp" --reportfullydrawn | parse_metrics_output)" +else + total_time="$(timeout $timeout "$DIR/launch_application" "$package" "$activity" | "$DIR/parse_metrics" --package "$package" --activity "$activity" --timestamp "$pre_launch_timestamp" | parse_metrics_output)" +fi if [[ $? -ne 0 ]]; then echo "WARNING: Skip bad result, try iteration again." >&2 @@ -330,33 +448,38 @@ for ((i=0;i<count;++i)) do continue fi - perform_aot_cleanup "$when" "$mode" + perform_post_launch_cleanup "$when" "$mode" "$pre_launch_timestamp" + + if [[ $? -ne 0 ]]; then + echo "WARNING: Skip bad cleanup, try iteration again." >&2 + ((i=i-1)) + continue + fi echo "Iteration $i. Total time was: $total_time" - timings_array+=($total_time) + timings_array+=("$total_time") done # drop the first result which is usually garbage. timings_array=("${timings_array[@]:1}") +# Print the CSV header first. +echo_to_output_file "$metrics_header" # Print out interactive/debugging timings and averages. # Other scripts should use the --output flag and parse the CSV. for tim in "${timings_array[@]}"; do - echo_to_output_file -ne "$tim," + echo_to_output_file "$tim" done -echo_to_output_file "" -average_string=$(echo "${timings_array[@]}" | awk '{s+=$0}END{print "Average:",s/NR}' RS=" ") -echo -ne ${average_string}. if [[ x$output != x ]]; then echo " Saved results to '$output'" fi -# Temporary hack around multiple activities being launched with different package paths (for same app): -# Clean up all left-over TraceFile.pb -adb shell 'for i in $(find /data/app -name TraceFile.pb); do rm \$i; done' +if [[ $needs_trace_file == y ]] ; then + iorapd_compiler_purge_trace_file "$package" "$activity" +fi # Kill the process to ensure AM isn't keeping it around. remote_pkill "$package" diff --git a/startop/scripts/app_startup/run_app_with_prefetch.py b/startop/scripts/app_startup/run_app_with_prefetch.py new file mode 100755 index 000000000000..2f1eff2c41f6 --- /dev/null +++ b/startop/scripts/app_startup/run_app_with_prefetch.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python3 +# +# Copyright 2019, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Runner of one test given a setting. + +Run app and gather the measurement in a certain configuration. +Print the result to stdout. +See --help for more details. + +Sample usage: + $> ./python run_app_with_prefetch.py -p com.android.settings -a + com.android.settings.Settings -r fadvise -i input + +""" + +import argparse +import os +import sys +import time +from typing import List, Tuple, Optional + +# local imports +import lib.adb_utils as adb_utils +from lib.app_runner import AppRunner, AppRunnerListener + +# global variables +DIR = os.path.abspath(os.path.dirname(__file__)) + +sys.path.append(os.path.dirname(DIR)) +import lib.print_utils as print_utils +import lib.cmd_utils as cmd_utils +import iorap.lib.iorapd_utils as iorapd_utils + +class PrefetchAppRunner(AppRunnerListener): + def __init__(self, + package: str, + activity: Optional[str], + readahead: str, + compiler_filter: Optional[str], + timeout: Optional[int], + simulate: bool, + debug: bool, + input:Optional[str], + **kwargs): + self.app_runner = AppRunner(package, + activity, + compiler_filter, + timeout, + simulate) + self.app_runner.add_callbacks(self) + + self.simulate = simulate + self.readahead = readahead + self.debug = debug + self.input = input + print_utils.DEBUG = self.debug + cmd_utils.SIMULATE = self.simulate + + + def run(self) -> Optional[List[Tuple[str]]]: + """Runs an app. + + Returns: + A list of (metric, value) tuples. + """ + return self.app_runner.run() + + def preprocess(self): + passed = self.validate_options() + if not passed: + return + + # Sets up adb environment. + adb_utils.root() + adb_utils.disable_selinux() + time.sleep(1) + + # Kill any existing process of this app + adb_utils.pkill(self.app_runner.package) + + if self.readahead != 'warm': + print_utils.debug_print('Drop caches for non-warm start.') + # Drop all caches to get cold starts. + adb_utils.vm_drop_cache() + + if self.readahead != 'warm' and self.readahead != 'cold': + iorapd_utils.enable_iorapd_readahead() + + def postprocess(self, pre_launch_timestamp: str): + passed = self._perform_post_launch_cleanup(pre_launch_timestamp) + if not passed and not self.app_runner.simulate: + print_utils.error_print('Cannot perform post launch cleanup!') + return None + + # Kill any existing process of this app + adb_utils.pkill(self.app_runner.package) + + def _perform_post_launch_cleanup(self, logcat_timestamp: str) -> bool: + """Performs cleanup at the end of each loop iteration. + + Returns: + A bool indicates whether the cleanup succeeds or not. + """ + if self.readahead != 'warm' and self.readahead != 'cold': + passed = iorapd_utils.wait_for_iorapd_finish(self.app_runner.package, + self.app_runner.activity, + self.app_runner.timeout, + self.debug, + logcat_timestamp) + + if not passed: + return passed + + return iorapd_utils.disable_iorapd_readahead() + + # Don't need to do anything for warm or cold. + return True + + def metrics_selector(self, am_start_output: str, + pre_launch_timestamp: str) -> str: + """Parses the metric after app startup by reading from logcat in a blocking + manner until all metrics have been found". + + Returns: + the total time and displayed time of app startup. + For example: "TotalTime=123\nDisplayedTime=121 + """ + total_time = AppRunner.parse_total_time(am_start_output) + displayed_time = adb_utils.blocking_wait_for_logcat_displayed_time( + pre_launch_timestamp, self.app_runner.package, self.app_runner.timeout) + + return 'TotalTime={}\nDisplayedTime={}'.format(total_time, displayed_time) + + def validate_options(self) -> bool: + """Validates the activity and trace file if needed. + + Returns: + A bool indicates whether the activity is valid. + """ + needs_trace_file = self.readahead != 'cold' and self.readahead != 'warm' + if needs_trace_file and (self.input is None or + not os.path.exists(self.input)): + print_utils.error_print('--input not specified!') + return False + + # Install necessary trace file. This must be after the activity checking. + if needs_trace_file: + passed = iorapd_utils.iorapd_compiler_install_trace_file( + self.app_runner.package, self.app_runner.activity, self.input) + if not cmd_utils.SIMULATE and not passed: + print_utils.error_print('Failed to install compiled TraceFile.pb for ' + '"{}/{}"'. + format(self.app_runner.package, + self.app_runner.activity)) + return False + + return True + + + +def parse_options(argv: List[str] = None): + """Parses command line arguments and return an argparse Namespace object.""" + parser = argparse.ArgumentParser( + description='Run an Android application once and measure startup time.' + ) + + required_named = parser.add_argument_group('required named arguments') + required_named.add_argument('-p', '--package', action='store', dest='package', + help='package of the application', required=True) + + # optional arguments + # use a group here to get the required arguments to appear 'above' the + # optional arguments in help. + optional_named = parser.add_argument_group('optional named arguments') + optional_named.add_argument('-a', '--activity', action='store', + dest='activity', + help='launch activity of the application') + optional_named.add_argument('-s', '--simulate', dest='simulate', + action='store_true', + help='simulate the process without executing ' + 'any shell commands') + optional_named.add_argument('-d', '--debug', dest='debug', + action='store_true', + help='Add extra debugging output') + optional_named.add_argument('-i', '--input', action='store', dest='input', + help='perfetto trace file protobuf', + default='TraceFile.pb') + optional_named.add_argument('-r', '--readahead', action='store', + dest='readahead', + help='which readahead mode to use', + default='cold', + choices=('warm', 'cold', 'mlock', 'fadvise')) + optional_named.add_argument('-t', '--timeout', dest='timeout', action='store', + type=int, + help='Timeout after this many seconds when ' + 'executing a single run.', + default=10) + optional_named.add_argument('--compiler-filter', dest='compiler_filter', + action='store', + help='Which compiler filter to use.', + default=None) + + return parser.parse_args(argv) + +def main(): + opts = parse_options() + runner = PrefetchAppRunner(**vars(opts)) + result = runner.run() + + if result is None: + return 1 + + print(result) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/startop/scripts/app_startup/run_app_with_prefetch_test.py b/startop/scripts/app_startup/run_app_with_prefetch_test.py new file mode 100644 index 000000000000..8a588e4463e9 --- /dev/null +++ b/startop/scripts/app_startup/run_app_with_prefetch_test.py @@ -0,0 +1,286 @@ +#!/usr/bin/env python3 +# +# Copyright 2019, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Unit tests for the run_app_with_prefetch_test.py script. + +Install: + $> sudo apt-get install python3-pytest ## OR + $> pip install -U pytest +See also https://docs.pytest.org/en/latest/getting-started.html + +Usage: + $> ./run_app_with_prefetch_test.py + $> pytest run_app_with_prefetch_test.py + $> python -m pytest run_app_with_prefetch_test.py + +See also https://docs.pytest.org/en/latest/usage.html +""" + +import io +import os +import shlex +import sys +import tempfile +# global imports +from contextlib import contextmanager + +# pip imports +import pytest +# local imports +import run_app_with_prefetch as runner +from mock import call, patch, Mock + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +from app_startup.lib.app_runner import AppRunner +# +# Argument Parsing Helpers +# + +@contextmanager +def ignore_stdout_stderr(): + """Ignore stdout/stderr output for duration of this context.""" + old_stdout = sys.stdout + old_stderr = sys.stderr + sys.stdout = io.StringIO() + sys.stderr = io.StringIO() + try: + yield + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + +@contextmanager +def argparse_bad_argument(msg): + """Asserts that a SystemExit is raised when executing this context. + + If the assertion fails, print the message 'msg'. + """ + with pytest.raises(SystemExit, message=msg): + with ignore_stdout_stderr(): + yield + +def assert_bad_argument(args, msg): + """Asserts that the command line arguments in 'args' are malformed. + + Prints 'msg' if the assertion fails. + """ + with argparse_bad_argument(msg): + parse_args(args) + +def parse_args(args): + """ + :param args: command-line like arguments as a single string + :return: dictionary of parsed key/values + """ + # "-a b -c d" => ['-a', 'b', '-c', 'd'] + return vars(runner.parse_options(shlex.split(args))) + +def default_dict_for_parsed_args(**kwargs): + """Combines it with all of the "optional" parameters' default values.""" + d = { + 'readahead': 'cold', + 'simulate': None, + 'simulate': False, + 'debug': False, + 'input': 'TraceFile.pb', + 'timeout': 10, + 'compiler_filter': None, + 'activity': None + } + d.update(kwargs) + return d + +def default_mock_dict_for_parsed_args(include_optional=True, **kwargs): + """Combines default dict with all optional parameters with some mock required + parameters. + """ + d = {'package': 'com.fake.package'} + if include_optional: + d.update(default_dict_for_parsed_args()) + d.update(kwargs) + return d + +def parse_optional_args(str): + """ + Parses an argument string which already includes all the required arguments + in default_mock_dict_for_parsed_args. + """ + req = '--package com.fake.package' + return parse_args('%s %s' % (req, str)) + +def test_argparse(): + # missing arguments + assert_bad_argument('', '-p are required') + + # required arguments are parsed correctly + ad = default_dict_for_parsed_args # assert dict + assert parse_args('--package xyz') == ad(package='xyz') + + assert parse_args('-p xyz') == ad(package='xyz') + + assert parse_args('-p xyz -s') == ad(package='xyz', simulate=True) + assert parse_args('-p xyz --simulate') == ad(package='xyz', simulate=True) + + # optional arguments are parsed correctly. + mad = default_mock_dict_for_parsed_args # mock assert dict + assert parse_optional_args('--input trace.pb') == mad(input='trace.pb') + + assert parse_optional_args('--compiler-filter speed') == \ + mad(compiler_filter='speed') + + assert parse_optional_args('-d') == mad(debug=True) + assert parse_optional_args('--debug') == mad(debug=True) + + assert parse_optional_args('--timeout 123') == mad(timeout=123) + assert parse_optional_args('-t 456') == mad(timeout=456) + + assert parse_optional_args('-r warm') == mad(readahead='warm') + assert parse_optional_args('--readahead warm') == mad(readahead='warm') + + assert parse_optional_args('-a act') == mad(activity='act') + assert parse_optional_args('--activity act') == mad(activity='act') + +def test_main(): + args = '--package com.fake.package --activity act -s' + opts = runner.parse_options(shlex.split(args)) + result = runner.PrefetchAppRunner(**vars(opts)).run() + assert result == [('TotalTime', '123')] + +def _mocked_run_shell_command(*args, **kwargs): + if args[0] == 'adb shell ps | grep "music" | awk \'{print $2;}\'': + return (True, '9999') + else: + return (True, '') + +def test_preprocess_no_cache_drop(): + with patch('lib.cmd_utils.run_shell_command', + new_callable=Mock) as mock_run_shell_command: + mock_run_shell_command.side_effect = _mocked_run_shell_command + prefetch_app_runner = runner.PrefetchAppRunner(package='music', + activity='MainActivity', + readahead='warm', + compiler_filter=None, + timeout=None, + simulate=False, + debug=False, + input=None) + + prefetch_app_runner.preprocess() + + calls = [call('adb root'), + call('adb shell "getenforce"'), + call('adb shell "setenforce 0"'), + call('adb shell "stop"'), + call('adb shell "start"'), + call('adb wait-for-device'), + call('adb shell ps | grep "music" | awk \'{print $2;}\''), + call('adb shell "kill 9999"')] + mock_run_shell_command.assert_has_calls(calls) + +def test_preprocess_with_cache_drop(): + with patch('lib.cmd_utils.run_shell_command', + new_callable=Mock) as mock_run_shell_command: + mock_run_shell_command.side_effect = _mocked_run_shell_command + prefetch_app_runner = runner.PrefetchAppRunner(package='music', + activity='MainActivity', + readahead='cold', + compiler_filter=None, + timeout=None, + simulate=False, + debug=False, + input=None) + + prefetch_app_runner.preprocess() + + calls = [call('adb root'), + call('adb shell "getenforce"'), + call('adb shell "setenforce 0"'), + call('adb shell "stop"'), + call('adb shell "start"'), + call('adb wait-for-device'), + call('adb shell ps | grep "music" | awk \'{print $2;}\''), + call('adb shell "kill 9999"'), + call('adb shell "echo 3 > /proc/sys/vm/drop_caches"')] + mock_run_shell_command.assert_has_calls(calls) + +def test_preprocess_with_cache_drop_and_iorapd_enabled(): + with patch('lib.cmd_utils.run_shell_command', + new_callable=Mock) as mock_run_shell_command: + mock_run_shell_command.side_effect = _mocked_run_shell_command + + with tempfile.NamedTemporaryFile() as input: + prefetch_app_runner = runner.PrefetchAppRunner(package='music', + activity='MainActivity', + readahead='fadvise', + compiler_filter=None, + timeout=None, + simulate=False, + debug=False, + input=input.name) + + prefetch_app_runner.preprocess() + + calls = [call('adb root'), + call('adb shell "getenforce"'), + call('adb shell "setenforce 0"'), + call('adb shell "stop"'), + call('adb shell "start"'), + call('adb wait-for-device'), + call( + 'adb shell ps | grep "music" | awk \'{print $2;}\''), + call('adb shell "kill 9999"'), + call('adb shell "echo 3 > /proc/sys/vm/drop_caches"'), + call('bash -c "source {}; iorapd_readahead_enable"'. + format(AppRunner.IORAP_COMMON_BASH_SCRIPT))] + mock_run_shell_command.assert_has_calls(calls) + +@patch('lib.adb_utils.blocking_wait_for_logcat_displayed_time') +@patch('lib.cmd_utils.run_shell_command') +def test_postprocess_with_launch_cleanup( + mock_run_shell_command, + mock_blocking_wait_for_logcat_displayed_time): + mock_run_shell_command.side_effect = _mocked_run_shell_command + mock_blocking_wait_for_logcat_displayed_time.return_value = 123 + + with tempfile.NamedTemporaryFile() as input: + prefetch_app_runner = runner.PrefetchAppRunner(package='music', + activity='MainActivity', + readahead='fadvise', + compiler_filter=None, + timeout=10, + simulate=False, + debug=False, + input=input.name) + + prefetch_app_runner.postprocess('2019-07-02 23:20:06.972674825') + + calls = [ + call('bash -c "source {script_path}; ' + 'iorapd_readahead_wait_until_finished ' + '\'{package}\' \'{activity}\' \'{timestamp}\' \'{timeout}\'"'. + format(timeout=10, + package='music', + activity='MainActivity', + timestamp='2019-07-02 23:20:06.972674825', + script_path=AppRunner.IORAP_COMMON_BASH_SCRIPT)), + call('bash -c "source {}; iorapd_readahead_disable"'. + format(AppRunner.IORAP_COMMON_BASH_SCRIPT)), + call('adb shell ps | grep "music" | awk \'{print $2;}\''), + call('adb shell "kill 9999"')] + mock_run_shell_command.assert_has_calls(calls) + +if __name__ == '__main__': + pytest.main() |