summaryrefslogtreecommitdiff
path: root/startop/scripts/app_startup
diff options
context:
space:
mode:
Diffstat (limited to 'startop/scripts/app_startup')
-rwxr-xr-xstartop/scripts/app_startup/analyze_metrics.py457
-rwxr-xr-xstartop/scripts/app_startup/app_startup_runner.py322
-rwxr-xr-xstartop/scripts/app_startup/app_startup_runner_test.py210
-rwxr-xr-xstartop/scripts/app_startup/launch_application41
-rwxr-xr-xstartop/scripts/app_startup/lib/common14
-rwxr-xr-xstartop/scripts/app_startup/run_app_with_prefetch344
-rwxr-xr-xstartop/scripts/app_startup/unlock_screen22
7 files changed, 1410 insertions, 0 deletions
diff --git a/startop/scripts/app_startup/analyze_metrics.py b/startop/scripts/app_startup/analyze_metrics.py
new file mode 100755
index 000000000000..d74d6f68d823
--- /dev/null
+++ b/startop/scripts/app_startup/analyze_metrics.py
@@ -0,0 +1,457 @@
+#!/usr/bin/env python3
+#
+# Copyright 2018, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Perform statistical analysis on measurements produced by app_startup_runner.py
+
+Install:
+$> sudo apt-get install python3-scipy
+
+Usage:
+$> ./analyze_metrics.py <filename.csv> [<filename2.csv> ...]
+$> ./analyze_metrics.py --help
+"""
+
+import argparse
+import csv
+import itertools
+import os
+import subprocess
+import sys
+import tempfile
+from typing import Any, List, Dict, Iterable, TextIO, Tuple
+
+from scipy import stats as sc
+import numpy as np
+
+
+# These CSV columns are considered labels. Everything after them in the same row are metrics.
+_LABEL_COLUMNS=['packages', 'readaheads', 'compiler_filters']
+# The metric series with the 'cold' readahead is the baseline.
+# All others (warm, jit, etc) are the potential improvements.
+
+#fixme: this should probably be an option
+_BASELINE=('readaheads', 'cold')
+# ignore this for some statistic calculations
+_IGNORE_PAIR=('readaheads', 'warm')
+_PLOT_SUBKEY='readaheads'
+_PLOT_GROUPKEY='packages'
+_PLOT_DATA_INDEX = 0
+_DELTA=50
+_DELTA2=100
+_PVALUE_THRESHOLD=0.10
+_debug = False # See -d/--debug flag.
+
+def parse_options(argv: List[str] = None):
+ """Parse command line arguments and return an argparse Namespace object."""
+ parser = argparse.ArgumentParser(description="Perform statistical analysis on measurements produced by app_start_runner.py.")
+ parser.add_argument('input_files', metavar='file.csv', nargs='+', help='CSV file produced by app_startup_runner.py')
+
+ parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='Add extra debugging output')
+ parser.add_argument('-os', '--output-samples', dest='output_samples', default='/dev/null', action='store', help='Store CSV for per-sample data')
+ parser.add_argument('-oc', '--output-comparable', dest='output_comparable', default='/dev/null', action='store', help='Output CSV for comparable against baseline')
+ parser.add_argument('-ocs', '--output-comparable-significant', dest='output_comparable_significant', default='/dev/null', action='store', help='Output CSV for comparable against baseline (significant only)')
+ parser.add_argument('-pt', '--pvalue-threshold', dest='pvalue_threshold', type=float, default=_PVALUE_THRESHOLD, action='store')
+ parser.add_argument('-dt', '--delta-threshold', dest='delta_threshold', type=int, default=_DELTA, action='store')
+
+ return parser.parse_args(argv)
+
+def _debug_print(*args, **kwargs):
+ """Print the args to sys.stderr if the --debug/-d flag was passed in."""
+ global _debug
+ if _debug:
+ print(*args, **kwargs, file=sys.stderr)
+
+def _expand_gen_repr(args):
+ new_args_list = []
+ for i in args:
+ # detect iterable objects that do not have their own override of __str__
+ if hasattr(i, '__iter__'):
+ to_str = getattr(i, '__str__')
+ if to_str.__objclass__ == object:
+ # the repr for a generator is just type+address, expand it out instead.
+ new_args_list.append([_expand_gen_repr([j])[0] for j in i])
+ continue
+ # normal case: uses the built-in to-string
+ new_args_list.append(i)
+ return new_args_list
+
+def _debug_print_gen(*args, **kwargs):
+ """Like _debug_print but will turn any iterable args into a list."""
+ if not _debug:
+ return
+
+ new_args_list = _expand_gen_repr(args)
+ _debug_print(*new_args_list, **kwargs)
+
+def read_headers(input_file: TextIO) -> Tuple[List[str], List[str]]:
+ _debug_print("read_headers for file: ", input_file.name)
+ csv_reader = csv.reader(input_file)
+
+ label_num_columns = len(_LABEL_COLUMNS)
+
+ try:
+ header = next(csv_reader)
+ except StopIteration:
+ header = None
+ _debug_print('header', header)
+
+ if not header:
+ return (None, None)
+
+ labels = header[0:label_num_columns]
+ data = header[label_num_columns:]
+
+ return (labels, data)
+
+def read_labels_and_data(input_file: TextIO) -> Iterable[Tuple[List[str], List[int]]]:
+ _debug_print("print_analysis for file: ", input_file.name)
+ csv_reader = csv.reader(input_file)
+
+ # Skip the header because it doesn't contain any data.
+ # To get the header see read_headers function.
+ try:
+ header = next(csv_reader)
+ except StopIteration:
+ header = None
+
+ label_num_columns = len(_LABEL_COLUMNS)
+
+ for row in csv_reader:
+ if len(row) > 0 and row[0][0] == ';':
+ _debug_print("skip comment line", row)
+ continue
+
+ labels = row[0:label_num_columns]
+ data = [int(i) for i in row[label_num_columns:]]
+
+# _debug_print("labels:", labels)
+# _debug_print("data:", data)
+
+ yield (labels, data)
+
+def group_metrics_by_label(it: Iterable[Tuple[List[str], List[int]]]):
+ prev_labels = None
+ data_2d = []
+
+ for label_list, data_list in it:
+ if prev_labels != label_list:
+ if prev_labels:
+# _debug_print("grouped labels:", prev_labels, "data_2d:", data_2d)
+ yield (prev_labels, data_2d)
+ data_2d = []
+
+ data_2d.append(data_list)
+ prev_labels = label_list
+
+ if prev_labels:
+# _debug_print("grouped labels:", prev_labels, "data_2d:", data_2d)
+ yield (prev_labels, data_2d)
+
+def data_to_numpy(it: Iterable[Tuple[List[str], List[List[int]]]]) -> Iterable[Tuple[List[str], Any]]:
+ for label_list, data_2d in it:
+ yield (label_list, np.asarray(data_2d, dtype=int))
+
+def iterate_columns(np_data_2d):
+ for col in range(np_data_2d.shape[1]):
+ col_as_array = np_data_2d[:, col]
+ yield col_as_array
+
+def confidence_interval(np_data_2d, percent=0.95):
+ """
+ Given some data [[a,b,c],[d,e,f,]...]
+
+ We assume the same metric is in the column (e.g. [a,d])
+ and that data in the rows (e.g. [b,e]) are separate metric values.
+
+ We then calculate the CI for each metric individually returning it as a list of tuples.
+ """
+ arr = []
+ for col_2d in iterate_columns(np_data_2d):
+ mean = col_2d.mean()
+ sigma = col_2d.std()
+
+ ci = sc.norm.interval(percent, loc=mean, scale=sigma / np.sqrt(len(col_2d)))
+ arr.append(ci)
+
+ # TODO: This seems to be returning NaN when all the samples have the same exact value
+ # (e.g. stddev=0, which can trivially happen when sample count = 1).
+
+ return arr
+
+def print_analysis(it, label_header: List[str], data_header: List[str], output_samples: str):
+ print(label_header)
+
+ with open(output_samples, "w") as output_file:
+
+ csv_writer = csv.writer(output_file)
+ csv_writer.writerow(label_header + ['mean', 'std', 'confidence_interval_a', 'confidence_interval_b'])
+
+ for label_list, np_data_2d in it:
+ print("**********************")
+ print(label_list)
+ print()
+ print(" ", data_header)
+ # aggregate computation column-wise
+ print("Mean: ", np_data_2d.mean(axis=0))
+ print("Std: ", np_data_2d.std(axis=0))
+ print("CI95%:", confidence_interval(np_data_2d))
+ print("SEM: ", stats_standard_error_one(np_data_2d, axis=0))
+
+ #ci = confidence_interval(np_data_2d)[_PLOT_DATA_INDEX]
+ sem = stats_standard_error_one(np_data_2d, axis=0)[_PLOT_DATA_INDEX]
+ mean = np_data_2d.mean(axis=0)[_PLOT_DATA_INDEX]
+
+ ci = (mean - sem, mean + sem)
+
+ csv_writer.writerow(label_list + [mean, np_data_2d.std(axis=0)[_PLOT_DATA_INDEX], ci[0], ci[1]])
+
+def from_file_group_by_labels(input_file):
+ (label_header, data_header) = read_headers(input_file)
+ label_data_iter = read_labels_and_data(input_file)
+ grouped_iter = group_metrics_by_label(label_data_iter)
+ grouped_numpy_iter = data_to_numpy(grouped_iter)
+
+ return grouped_numpy_iter, label_header, data_header
+
+def list_without_index(list, index):
+ return list[:index] + list[index+1:]
+
+def group_by_without_baseline_key(grouped_numpy_iter, label_header):
+ """
+ Data is considered comparable if the only difference is the baseline key
+ (i.e. the readahead is different but the package, compilation filter, etc, are the same).
+
+ Returns iterator that's grouped by the non-baseline labels to an iterator of
+ (label_list, data_2d).
+ """
+ baseline_index = label_header.index(_BASELINE[0])
+
+ def get_label_without_baseline(tpl):
+ label_list, _ = tpl
+ return list_without_index(label_list, baseline_index)
+ # [['pkgname', 'compfilter', 'warm'], [data]]
+ # [['pkgname', 'compfilter', 'cold'], [data2]]
+ # [['pkgname2', 'compfilter', 'warm'], [data3]]
+ #
+ # ->
+ # ( [['pkgname', 'compfilter', 'warm'], [data]] # ignore baseline label change.
+ # [['pkgname', 'compfilter', 'cold'], [data2]] ), # split here because the pkgname changed.
+ # ( [['pkgname2', 'compfilter', 'warm'], [data3]] )
+ for group_info, it in itertools.groupby(grouped_numpy_iter, key = get_label_without_baseline):
+ yield it
+
+ # TODO: replace this messy manual iteration/grouping with pandas
+
+def iterate_comparable_metrics(without_baseline_iter, label_header):
+ baseline_index = label_header.index(_BASELINE[0])
+ baseline_value = _BASELINE[1]
+
+ _debug_print("iterate comparables")
+
+ def is_baseline_fun(tp):
+ ll, dat = tp
+ return ll[baseline_index] == baseline_value
+
+ # iterating here when everything but the baseline key is the same.
+ for it in without_baseline_iter:
+ it1, it2 = itertools.tee(it)
+
+ # find all the baseline data.
+ baseline_filter_it = filter(is_baseline_fun, it1)
+
+ # find non-baseline data.
+ nonbaseline_filter_it = itertools.filterfalse(is_baseline_fun, it2)
+
+ yield itertools.product(baseline_filter_it, nonbaseline_filter_it)
+
+def stats_standard_error_one(a, axis):
+ a_std = a.std(axis=axis, ddof=0)
+ a_len = a.shape[axis]
+
+ return a_std / np.sqrt(a_len)
+
+def stats_standard_error(a, b, axis):
+ a_std = a.std(axis=axis, ddof=0)
+ b_std = b.std(axis=axis, ddof=0)
+
+ a_len = a.shape[axis]
+ b_len = b.shape[axis]
+
+ temp1 = a_std*a_std/a_len
+ temp2 = b_std*b_std/b_len
+
+ return np.sqrt(temp1 + temp2)
+
+def stats_tvalue(a, b, axis, delta = 0):
+ a_mean = a.mean(axis=axis)
+ b_mean = b.mean(axis=axis)
+
+ return (a_mean - b_mean - delta) / stats_standard_error(a, b, axis)
+
+def stats_pvalue(a, b, axis, delta, left:bool = False):
+ """
+ Single-tailed 2-sample t-test.
+
+ Returns p-value for the null hypothesis: mean(a) - mean(b) >= delta.
+ :param a: numpy 2d array
+ :param b: numpy 2d array
+ :param axis: which axis to do the calculations across
+ :param delta: test value of mean differences
+ :param left: if true then use <= delta instead of >= delta
+ :return: p-value
+ """
+ # implement our own pvalue calculation because the built-in t-test (t,p values)
+ # only offer delta=0 , e.g. m1-m1 ? 0
+ # we are however interested in m1-m2 ? delta
+ t_value = stats_tvalue(a, b, axis, delta)
+
+ # 2-sample degrees of freedom is using the array sizes - 2.
+ dof = a.shape[axis] + b.shape[axis] - 2
+
+ if left:
+ # left tailed test. e.g. m1-m2 <= delta
+ return sc.t.cdf(t_value, dof)
+ else:
+ # right tailed test. e.g. m1-m2 >= delta
+ return sc.t.sf(t_value, dof)
+ # a left+right tailed test is a 2-tail t-test and can be done using ttest_ind for delta=0
+
+def print_comparable_analysis(comparable_metrics_iter, label_header, data_header, output_comparable: str, output_comparable_significant: str):
+ baseline_value = _BASELINE[1]
+ baseline_index = label_header.index(_BASELINE[0])
+
+ old_baseline_label_list = None
+ delta = _DELTA
+ filter_value = _IGNORE_PAIR[1]
+ filter_index = label_header.index(_IGNORE_PAIR[0])
+
+ pvalue_threshold = _PVALUE_THRESHOLD
+ ci_threshold = (1 - _PVALUE_THRESHOLD) * 100.0
+
+ with open(output_comparable, "w") as output_file:
+
+ csv_writer = csv.writer(output_file)
+ csv_writer.writerow(label_header + ['mean', 'mean_diff', 'sem', 'pvalue_2tailed', 'pvalue_gt%d' %(_DELTA), 'pvalue_gt%d' %(_DELTA2)])
+
+ print("------------------------------------------------------------------")
+ print("Comparison against the baseline %s = %s" %(_BASELINE, baseline_value))
+ print("--- Right-tailed t-test checks if the baseline >= current %s by at least %d" %(_BASELINE[0], delta))
+ print()
+
+ global_stats = {'better_than_delta': [], 'better_than_delta_p95': []}
+
+ for nested_it in comparable_metrics_iter:
+ print("************************")
+
+ better_than_delta = []
+ better_than_delta_p95 = []
+
+ saw_baseline_once = False
+
+ for ((baseline_label_list, baseline_np_data_2d), (rest_label_list, rest_np_data_2d)) in nested_it:
+ _debug_print("baseline_label_list:", baseline_label_list)
+ _debug_print("baseline_np_data_2d:", baseline_np_data_2d)
+ _debug_print("rest_label_list:", rest_label_list)
+ _debug_print("rest_np_data_2d:", rest_np_data_2d)
+
+ mean_diff = baseline_np_data_2d.mean(axis=0) - rest_np_data_2d.mean(axis=0)
+ # 2-sample 2-tailed t-test with delta=0
+ # e.g. "Is it true that usually the two sample means are different?"
+ t_statistic, t_pvalue = sc.ttest_ind(baseline_np_data_2d, rest_np_data_2d, axis=0)
+
+ # 2-sample 1-tailed t-test with delta=50
+ # e.g. "Is it true that usually the sample means better than 50ms?"
+ t2 = stats_tvalue(baseline_np_data_2d, rest_np_data_2d, axis=0, delta=delta)
+ p2 = stats_pvalue(baseline_np_data_2d, rest_np_data_2d, axis=0, delta=delta)
+
+ t2_b = stats_tvalue(baseline_np_data_2d, rest_np_data_2d, axis=0, delta=_DELTA2)
+ p2_b = stats_pvalue(baseline_np_data_2d, rest_np_data_2d, axis=0, delta=_DELTA2)
+
+ print("%s vs %s" %(rest_label_list, baseline_value))
+ print(" ", data_header)
+ print("Mean Difference: ", mean_diff)
+ print("T-test (2-tailed) != 0: t=%s, p=%s" %(t_statistic, t_pvalue))
+ print("T-test (right-tailed) >= %d: t=%s, p=%s" %(_DELTA, t2, p2))
+ print("T-test (right-tailed) >= %d: t=%s, p=%s" %(_DELTA2, t2_b, p2_b))
+
+ def write_out_values(label_list, *args):
+ csv_writer.writerow(label_list + [i[_PLOT_DATA_INDEX] for i in args])
+
+ sem = stats_standard_error(baseline_np_data_2d, rest_np_data_2d, axis=0)
+ if saw_baseline_once == False:
+ saw_baseline_once = True
+ base_sem = stats_standard_error_one(baseline_np_data_2d, axis=0)
+ write_out_values(baseline_label_list, baseline_np_data_2d.mean(axis=0), [0], base_sem, [None], [None], [None])
+ write_out_values(rest_label_list, rest_np_data_2d.mean(axis=0), mean_diff, sem, t_pvalue, p2, p2_b)
+
+ # now do the global statistics aggregation
+
+ if rest_label_list[filter_index] == filter_value:
+ continue
+
+ if mean_diff > delta:
+ better_than_delta.append((mean_diff, p2, rest_label_list))
+
+ if p2 <= pvalue_threshold:
+ better_than_delta_p95.append((mean_diff, rest_label_list))
+
+ if better_than_delta:
+ global_stats['better_than_delta'].append(better_than_delta)
+ if better_than_delta_p95:
+ global_stats['better_than_delta_p95'].append(better_than_delta_p95)
+
+ print("------------------------")
+ print("Global statistics:")
+ print("//// Rows with %s=%s are ignored here." %_IGNORE_PAIR)
+ print("- # of results with mean diff better than delta(%d) = %d" %(delta, len(global_stats['better_than_delta'])))
+ print(" > (meandiff, pvalue, labels)")
+ for i in global_stats['better_than_delta']:
+ print(" > %s" %i)
+ print("- # of results with mean diff better than delta(%d) CI%d%% = %d" %(delta, ci_threshold, len(global_stats['better_than_delta_p95'])))
+ print(" > (meandiff, labels)")
+ for i in global_stats['better_than_delta_p95']:
+ print(" > %s" %i)
+
+def main():
+ global _debug
+ global _DELTA
+ global _PVALUE_THRESHOLD
+
+ opts = parse_options()
+ _debug = opts.debug
+ _debug_print("parsed options: ", opts)
+
+ _PVALUE_THRESHOLD = opts.pvalue_threshold or _PVALUE_THRESHOLD
+
+ for file_name in opts.input_files:
+ with open(file_name, 'r') as input_file:
+ (grouped_numpy_iter, label_header, data_header) = from_file_group_by_labels(input_file)
+ print_analysis(grouped_numpy_iter, label_header, data_header, opts.output_samples)
+
+ with open(file_name, 'r') as input_file:
+ (grouped_numpy_iter, label_header, data_header) = from_file_group_by_labels(input_file)
+ without_baseline_iter = group_by_without_baseline_key(grouped_numpy_iter, label_header)
+ #_debug_print_gen(without_baseline_iter)
+
+ comparable_metrics_iter = iterate_comparable_metrics(without_baseline_iter, label_header)
+ print_comparable_analysis(comparable_metrics_iter, label_header, data_header, opts.output_comparable, opts.output_comparable_significant)
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/startop/scripts/app_startup/app_startup_runner.py b/startop/scripts/app_startup/app_startup_runner.py
new file mode 100755
index 000000000000..780bb4eaeeef
--- /dev/null
+++ b/startop/scripts/app_startup/app_startup_runner.py
@@ -0,0 +1,322 @@
+#!/usr/bin/env python3
+#
+# Copyright 2018, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+#
+# Measure application start-up time by launching applications under various combinations.
+# See --help for more details.
+#
+#
+# Sample usage:
+# $> ./app_startup_runner.py -p com.google.android.calculator -r warm -r cold -lc 10 -o out.csv
+# $> ./analyze_metrics.py out.csv
+#
+#
+
+import argparse
+import csv
+import itertools
+import os
+import subprocess
+import sys
+import tempfile
+from typing import Any, Callable, Dict, Generic, Iterable, List, NamedTuple, TextIO, Tuple, TypeVar, Optional, Union
+
+# The following command line options participate in the combinatorial generation.
+# All other arguments have a global effect.
+_COMBINATORIAL_OPTIONS=['packages', 'readaheads', 'compiler_filters']
+_TRACING_READAHEADS=['mlock', 'fadvise']
+_FORWARD_OPTIONS={'loop_count': '--count'}
+_RUN_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'run_app_with_prefetch')
+
+RunCommandArgs = NamedTuple('RunCommandArgs', [('package', str), ('readahead', str), ('compiler_filter', Optional[str])])
+CollectorPackageInfo = NamedTuple('CollectorPackageInfo', [('package', str), ('compiler_filter', str)])
+_COLLECTOR_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'collector')
+_COLLECTOR_TIMEOUT_MULTIPLIER = 2 # take the regular --timeout and multiply by 2; systrace starts up slowly.
+
+_UNLOCK_SCREEN_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'unlock_screen')
+
+# This must be the only mutable global variable. All other global variables are constants to avoid magic literals.
+_debug = False # See -d/--debug flag.
+_DEBUG_FORCE = None # Ignore -d/--debug if this is not none.
+
+# Type hinting names.
+T = TypeVar('T')
+NamedTupleMeta = Callable[..., T] # approximation of a (S : NamedTuple<T> where S() == T) metatype.
+
+def parse_options(argv: List[str] = None):
+ """Parse command line arguments and return an argparse Namespace object."""
+ parser = argparse.ArgumentParser(description="Run one or more Android applications under various settings in order to measure startup time.")
+ # argparse considers args starting with - and -- optional in --help, even though required=True.
+ # by using a named argument group --help will clearly say that it's required instead of optional.
+ required_named = parser.add_argument_group('required named arguments')
+ required_named.add_argument('-p', '--package', action='append', dest='packages', help='package of the application', required=True)
+ required_named.add_argument('-r', '--readahead', action='append', dest='readaheads', help='which readahead mode to use', choices=('warm', 'cold', 'mlock', 'fadvise'), required=True)
+
+ # optional arguments
+ # use a group here to get the required arguments to appear 'above' the optional arguments in help.
+ optional_named = parser.add_argument_group('optional named arguments')
+ optional_named.add_argument('-c', '--compiler-filter', action='append', dest='compiler_filters', help='which compiler filter to use. if omitted it does not enforce the app\'s compiler filter', choices=('speed', 'speed-profile', 'quicken'))
+ optional_named.add_argument('-s', '--simulate', dest='simulate', action='store_true', help='Print which commands will run, but don\'t run the apps')
+ optional_named.add_argument('-d', '--debug', dest='debug', action='store_true', help='Add extra debugging output')
+ optional_named.add_argument('-o', '--output', dest='output', action='store', help='Write CSV output to file.')
+ optional_named.add_argument('-t', '--timeout', dest='timeout', action='store', type=int, help='Timeout after this many seconds when executing a single run.')
+ optional_named.add_argument('-lc', '--loop-count', dest='loop_count', default=1, type=int, action='store', help='How many times to loop a single run.')
+ optional_named.add_argument('-in', '--inodes', dest='inodes', type=str, action='store', help='Path to inodes file (system/extras/pagecache/pagecache.py -d inodes)')
+
+ return parser.parse_args(argv)
+
+# TODO: refactor this with a common library file with analyze_metrics.py
+def _debug_print(*args, **kwargs):
+ """Print the args to sys.stderr if the --debug/-d flag was passed in."""
+ if _debug:
+ print(*args, **kwargs, file=sys.stderr)
+
+def _expand_gen_repr(args):
+ """Like repr but any generator-like object has its iterator consumed
+ and then called repr on."""
+ new_args_list = []
+ for i in args:
+ # detect iterable objects that do not have their own override of __str__
+ if hasattr(i, '__iter__'):
+ to_str = getattr(i, '__str__')
+ if to_str.__objclass__ == object:
+ # the repr for a generator is just type+address, expand it out instead.
+ new_args_list.append([_expand_gen_repr([j])[0] for j in i])
+ continue
+ # normal case: uses the built-in to-string
+ new_args_list.append(i)
+ return new_args_list
+
+def _debug_print_gen(*args, **kwargs):
+ """Like _debug_print but will turn any iterable args into a list."""
+ if not _debug:
+ return
+
+ new_args_list = _expand_gen_repr(args)
+ _debug_print(*new_args_list, **kwargs)
+
+def _debug_print_nd(*args, **kwargs):
+ """Like _debug_print but will turn any NamedTuple-type args into a string."""
+ if not _debug:
+ return
+
+ new_args_list = []
+ for i in args:
+ if hasattr(i, '_field_types'):
+ new_args_list.append("%s: %s" %(i.__name__, i._field_types))
+ else:
+ new_args_list.append(i)
+
+ _debug_print(*new_args_list, **kwargs)
+
+def dict_lookup_any_key(dictionary: dict, *keys: List[Any]):
+ for k in keys:
+ if k in dictionary:
+ return dictionary[k]
+ raise KeyError("None of the keys %s were in the dictionary" %(keys))
+
+def generate_run_combinations(named_tuple: NamedTupleMeta[T], opts_dict: Dict[str, List[Optional[str]]])\
+ -> Iterable[T]:
+ """
+ Create all possible combinations given the values in opts_dict[named_tuple._fields].
+
+ :type T: type annotation for the named_tuple type.
+ :param named_tuple: named tuple type, whose fields are used to make combinations for
+ :param opts_dict: dictionary of keys to value list. keys correspond to the named_tuple fields.
+ :return: an iterable over named_tuple instances.
+ """
+ combinations_list = []
+ for k in named_tuple._fields:
+ # the key can be either singular or plural , e.g. 'package' or 'packages'
+ val = dict_lookup_any_key(opts_dict, k, k + "s")
+
+ # treat {'x': None} key value pairs as if it was [None]
+ # otherwise itertools.product throws an exception about not being able to iterate None.
+ combinations_list.append(val or [None])
+
+ _debug_print("opts_dict: ", opts_dict)
+ _debug_print_nd("named_tuple: ", named_tuple)
+ _debug_print("combinations_list: ", combinations_list)
+
+ for combo in itertools.product(*combinations_list):
+ yield named_tuple(*combo)
+
+def key_to_cmdline_flag(key: str) -> str:
+ """Convert key into a command line flag, e.g. 'foo-bars' -> '--foo-bar' """
+ if key.endswith("s"):
+ key = key[:-1]
+ return "--" + key.replace("_", "-")
+
+def as_run_command(tpl: NamedTuple) -> List[Union[str, Any]]:
+ """
+ Convert a named tuple into a command-line compatible arguments list.
+
+ Example: ABC(1, 2, 3) -> ['--a', 1, '--b', 2, '--c', 3]
+ """
+ args = []
+ for key, value in tpl._asdict().items():
+ if value is None:
+ continue
+ args.append(key_to_cmdline_flag(key))
+ args.append(value)
+ return args
+
+def generate_group_run_combinations(run_combinations: Iterable[NamedTuple], dst_nt: NamedTupleMeta[T])\
+ -> Iterable[Tuple[T, Iterable[NamedTuple]]]:
+
+ def group_by_keys(src_nt):
+ src_d = src_nt._asdict()
+ # now remove the keys that aren't legal in dst.
+ for illegal_key in set(src_d.keys()) - set(dst_nt._fields):
+ if illegal_key in src_d:
+ del src_d[illegal_key]
+
+ return dst_nt(**src_d)
+
+ for args_list_it in itertools.groupby(run_combinations, group_by_keys):
+ (group_key_value, args_it) = args_list_it
+ yield (group_key_value, args_it)
+
+def parse_run_script_csv_file(csv_file: TextIO) -> List[int]:
+ """Parse a CSV file full of integers into a flat int list."""
+ csv_reader = csv.reader(csv_file)
+ arr = []
+ for row in csv_reader:
+ for i in row:
+ if i:
+ arr.append(int(i))
+ return arr
+
+def make_script_command_with_temp_output(script: str, args: List[str], **kwargs)\
+ -> Tuple[str, TextIO]:
+ """
+ Create a command to run a script given the args.
+ Appends --count <loop_count> --output <tmp-file-name>.
+ Returns a tuple (cmd, tmp_file)
+ """
+ tmp_output_file = tempfile.NamedTemporaryFile(mode='r')
+ cmd = [script] + args
+ for key, value in kwargs.items():
+ cmd += ['--%s' %(key), "%s" %(value)]
+ if _debug:
+ cmd += ['--verbose']
+ cmd = cmd + ["--output", tmp_output_file.name]
+ return cmd, tmp_output_file
+
+def execute_arbitrary_command(cmd: List[str], simulate: bool, timeout: int) -> Tuple[bool, str]:
+ if simulate:
+ print(" ".join(cmd))
+ return (True, "")
+ else:
+ _debug_print("[EXECUTE]", cmd)
+ proc = subprocess.Popen(cmd,
+ stderr=subprocess.STDOUT,
+ stdout=subprocess.PIPE,
+ universal_newlines=True)
+ try:
+ script_output = proc.communicate(timeout=timeout)[0]
+ except subprocess.TimeoutExpired:
+ print("[TIMEDOUT]")
+ proc.kill()
+ script_output = proc.communicate()[0]
+
+ _debug_print("[STDOUT]", script_output)
+ return_code = proc.wait()
+ passed = (return_code == 0)
+ _debug_print("[$?]", return_code)
+ if not passed:
+ print("[FAILED, code:%s]" %(return_code), script_output, file=sys.stderr)
+
+ return (passed, script_output)
+
+def execute_run_combos(grouped_run_combos: Iterable[Tuple[CollectorPackageInfo, Iterable[RunCommandArgs]]], simulate: bool, inodes_path: str, timeout: int, loop_count: int, need_trace: bool):
+ # nothing will work if the screen isn't unlocked first.
+ execute_arbitrary_command([_UNLOCK_SCREEN_SCRIPT], simulate, timeout)
+
+ for collector_info, run_combos in grouped_run_combos:
+ #collector_args = ["--package", package_name]
+ collector_args = as_run_command(collector_info)
+ # TODO: forward --wait_time for how long systrace runs?
+ # TODO: forward --trace_buffer_size for size of systrace buffer size?
+ collector_cmd, collector_tmp_output_file = make_script_command_with_temp_output(_COLLECTOR_SCRIPT, collector_args, inodes=inodes_path)
+
+ with collector_tmp_output_file:
+ collector_passed = True
+ if need_trace:
+ collector_timeout = timeout and _COLLECTOR_TIMEOUT_MULTIPLIER * timeout
+ (collector_passed, collector_script_output) = execute_arbitrary_command(collector_cmd, simulate, collector_timeout)
+ # TODO: consider to print a ; collector wrote file to <...> into the CSV file so we know it was ran.
+
+ for combos in run_combos:
+ args = as_run_command(combos)
+
+ cmd, tmp_output_file = make_script_command_with_temp_output(_RUN_SCRIPT, args, count=loop_count, input=collector_tmp_output_file.name)
+ with tmp_output_file:
+ (passed, script_output) = execute_arbitrary_command(cmd, simulate, timeout)
+ parsed_output = simulate and [1,2,3] or parse_run_script_csv_file(tmp_output_file)
+ yield (passed, script_output, parsed_output)
+
+def gather_results(commands: Iterable[Tuple[bool, str, List[int]]], key_list: List[str], value_list: List[Tuple[str, ...]]):
+ _debug_print("gather_results: key_list = ", key_list)
+ yield key_list + ["time(ms)"]
+
+ stringify_none = lambda s: s is None and "<none>" or s
+
+ for ((passed, script_output, run_result_list), values) in itertools.zip_longest(commands, value_list):
+ if not passed:
+ continue
+ for result in run_result_list:
+ yield [stringify_none(i) for i in values] + [result]
+
+ yield ["; avg(%s), min(%s), max(%s), count(%s)" %(sum(run_result_list, 0.0) / len(run_result_list), min(run_result_list), max(run_result_list), len(run_result_list)) ]
+
+def eval_and_save_to_csv(output, annotated_result_values):
+ csv_writer = csv.writer(output)
+ for row in annotated_result_values:
+ csv_writer.writerow(row)
+ output.flush() # see the output live.
+
+def main():
+ global _debug
+
+ opts = parse_options()
+ _debug = opts.debug
+ if _DEBUG_FORCE is not None:
+ _debug = _DEBUG_FORCE
+ _debug_print("parsed options: ", opts)
+ need_trace = not not set(opts.readaheads).intersection(set(_TRACING_READAHEADS))
+ if need_trace and not opts.inodes:
+ print("Error: Missing -in/--inodes, required when using a readahead of %s" %(_TRACING_READAHEADS), file=sys.stderr)
+ return 1
+
+ output_file = opts.output and open(opts.output, 'w') or sys.stdout
+
+ combos = lambda: generate_run_combinations(RunCommandArgs, vars(opts))
+ _debug_print_gen("run combinations: ", combos())
+
+ grouped_combos = lambda: generate_group_run_combinations(combos(), CollectorPackageInfo)
+ _debug_print_gen("grouped run combinations: ", grouped_combos())
+
+ exec = execute_run_combos(grouped_combos(), opts.simulate, opts.inodes, opts.timeout, opts.loop_count, need_trace)
+ results = gather_results(exec, _COMBINATORIAL_OPTIONS, combos())
+ eval_and_save_to_csv(output_file, results)
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/startop/scripts/app_startup/app_startup_runner_test.py b/startop/scripts/app_startup/app_startup_runner_test.py
new file mode 100755
index 000000000000..f96f802a3aef
--- /dev/null
+++ b/startop/scripts/app_startup/app_startup_runner_test.py
@@ -0,0 +1,210 @@
+#!/usr/bin/env python3
+#
+# Copyright 2018, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Unit tests for the app_startup_runner.py script.
+
+Install:
+ $> sudo apt-get install python3-pytest ## OR
+ $> pip install -U pytest
+See also https://docs.pytest.org/en/latest/getting-started.html
+
+Usage:
+ $> ./app_startup_runner_test.py
+ $> pytest app_startup_runner_test.py
+ $> python -m pytest app_startup_runner_test.py
+
+See also https://docs.pytest.org/en/latest/usage.html
+"""
+
+# global imports
+from contextlib import contextmanager
+import io
+import shlex
+import sys
+import typing
+
+# pip imports
+import pytest
+
+# local imports
+import app_startup_runner as asr
+
+#
+# Argument Parsing Helpers
+#
+
+@contextmanager
+def ignore_stdout_stderr():
+ """Ignore stdout/stderr output for duration of this context."""
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
+ sys.stdout = io.StringIO()
+ sys.stderr = io.StringIO()
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
+
+@contextmanager
+def argparse_bad_argument(msg):
+ """
+ Assert that a SystemExit is raised when executing this context.
+ If the assertion fails, print the message 'msg'.
+ """
+ with pytest.raises(SystemExit, message=msg):
+ with ignore_stdout_stderr():
+ yield
+
+def assert_bad_argument(args, msg):
+ """
+ Assert that the command line arguments in 'args' are malformed.
+ Prints 'msg' if the assertion fails.
+ """
+ with argparse_bad_argument(msg):
+ parse_args(args)
+
+def parse_args(args):
+ """
+ :param args: command-line like arguments as a single string
+ :return: dictionary of parsed key/values
+ """
+ # "-a b -c d" => ['-a', 'b', '-c', 'd']
+ return vars(asr.parse_options(shlex.split(args)))
+
+def default_dict_for_parsed_args(**kwargs):
+ """
+ # Combine it with all of the "optional" parameters' default values.
+ """
+ d = {'compiler_filters': None, 'simulate': False, 'debug': False, 'output': None, 'timeout': None, 'loop_count': 1, 'inodes': None}
+ d.update(kwargs)
+ return d
+
+def default_mock_dict_for_parsed_args(include_optional=True, **kwargs):
+ """
+ Combine default dict with all optional parameters with some mock required parameters.
+ """
+ d = {'packages': ['com.fake.package'], 'readaheads': ['warm']}
+ if include_optional:
+ d.update(default_dict_for_parsed_args())
+ d.update(kwargs)
+ return d
+
+def parse_optional_args(str):
+ """
+ Parse an argument string which already includes all the required arguments
+ in default_mock_dict_for_parsed_args.
+ """
+ req = "--package com.fake.package --readahead warm"
+ return parse_args("%s %s" %(req, str))
+
+def test_argparse():
+ # missing arguments
+ assert_bad_argument("", "-p and -r are required")
+ assert_bad_argument("-r warm", "-p is required")
+ assert_bad_argument("--readahead warm", "-p is required")
+ assert_bad_argument("-p com.fake.package", "-r is required")
+ assert_bad_argument("--package com.fake.package", "-r is required")
+
+ # required arguments are parsed correctly
+ ad = default_dict_for_parsed_args # assert dict
+
+ assert parse_args("--package xyz --readahead warm") == ad(packages=['xyz'], readaheads=['warm'])
+ assert parse_args("-p xyz -r warm") == ad(packages=['xyz'], readaheads=['warm'])
+
+ assert parse_args("-p xyz -r warm -s") == ad(packages=['xyz'], readaheads=['warm'], simulate=True)
+ assert parse_args("-p xyz -r warm --simulate") == ad(packages=['xyz'], readaheads=['warm'], simulate=True)
+
+ # optional arguments are parsed correctly.
+ mad = default_mock_dict_for_parsed_args # mock assert dict
+ assert parse_optional_args("--output filename.csv") == mad(output='filename.csv')
+ assert parse_optional_args("-o filename.csv") == mad(output='filename.csv')
+
+ assert parse_optional_args("--timeout 123") == mad(timeout=123)
+ assert parse_optional_args("-t 456") == mad(timeout=456)
+
+ assert parse_optional_args("--loop-count 123") == mad(loop_count=123)
+ assert parse_optional_args("-lc 456") == mad(loop_count=456)
+
+ assert parse_optional_args("--inodes bar") == mad(inodes="bar")
+ assert parse_optional_args("-in baz") == mad(inodes="baz")
+
+
+def generate_run_combinations(*args):
+ # expand out the generator values so that assert x == y works properly.
+ return [i for i in asr.generate_run_combinations(*args)]
+
+def test_generate_run_combinations():
+ blank_nd = typing.NamedTuple('Blank')
+ assert generate_run_combinations(blank_nd, {}) == [()], "empty"
+ assert generate_run_combinations(blank_nd, {'a' : ['a1', 'a2']}) == [()], "empty filter"
+ a_nd = typing.NamedTuple('A', [('a', str)])
+ assert generate_run_combinations(a_nd, {'a': None}) == [(None,)], "None"
+ assert generate_run_combinations(a_nd, {'a': ['a1', 'a2']}) == [('a1',), ('a2',)], "one item"
+ assert generate_run_combinations(a_nd,
+ {'a' : ['a1', 'a2'], 'b': ['b1', 'b2']}) == [('a1',), ('a2',)],\
+ "one item filter"
+ ab_nd = typing.NamedTuple('AB', [('a', str), ('b', str)])
+ assert generate_run_combinations(ab_nd,
+ {'a': ['a1', 'a2'],
+ 'b': ['b1', 'b2']}) == [ab_nd('a1', 'b1'),
+ ab_nd('a1', 'b2'),
+ ab_nd('a2', 'b1'),
+ ab_nd('a2', 'b2')],\
+ "two items"
+
+ assert generate_run_combinations(ab_nd,
+ {'as': ['a1', 'a2'],
+ 'bs': ['b1', 'b2']}) == [ab_nd('a1', 'b1'),
+ ab_nd('a1', 'b2'),
+ ab_nd('a2', 'b1'),
+ ab_nd('a2', 'b2')],\
+ "two items plural"
+
+def test_key_to_cmdline_flag():
+ assert asr.key_to_cmdline_flag("abc") == "--abc"
+ assert asr.key_to_cmdline_flag("foos") == "--foo"
+ assert asr.key_to_cmdline_flag("ba_r") == "--ba-r"
+ assert asr.key_to_cmdline_flag("ba_zs") == "--ba-z"
+
+
+def test_make_script_command_with_temp_output():
+ cmd_str, tmp_file = asr.make_script_command_with_temp_output("fake_script", args=[], count=1)
+ with tmp_file:
+ assert cmd_str == ["fake_script", "--count", "1", "--output", tmp_file.name]
+
+ cmd_str, tmp_file = asr.make_script_command_with_temp_output("fake_script", args=['a', 'b'], count=2)
+ with tmp_file:
+ assert cmd_str == ["fake_script", "a", "b", "--count", "2", "--output", tmp_file.name]
+
+def test_parse_run_script_csv_file():
+ # empty file -> empty list
+ f = io.StringIO("")
+ assert asr.parse_run_script_csv_file(f) == []
+
+ # common case
+ f = io.StringIO("1,2,3")
+ assert asr.parse_run_script_csv_file(f) == [1,2,3]
+
+ # ignore trailing comma
+ f = io.StringIO("1,2,3,4,5,")
+ assert asr.parse_run_script_csv_file(f) == [1,2,3,4,5]
+
+
+if __name__ == '__main__':
+ pytest.main()
diff --git a/startop/scripts/app_startup/launch_application b/startop/scripts/app_startup/launch_application
new file mode 100755
index 000000000000..bc4ec51d6d08
--- /dev/null
+++ b/startop/scripts/app_startup/launch_application
@@ -0,0 +1,41 @@
+#!/bin/bash
+#
+# Copyright 2018, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/lib/common"
+
+launch_application() {
+ local package="$1"
+ local activity="$2"
+ local am_output="$(adb shell am start -S -W "$package"/"$activity")"
+ verbose_print adb shell am start -S -W "$package"/"$activity"
+ if [[ $? -ne 0 ]]; then
+ echo "am start failed" >&2
+
+ return 1
+ fi
+
+ # for everything else use the am start "TotalTime" output.
+ verbose_print "$am_output"
+ local total_time="$(echo "$am_output" | grep 'TotalTime:' | sed 's/TotalTime: //g')"
+ verbose_print "total time: $total_time"
+
+ # TODO: Extract alternative metrics such as the #reportFullyDrawn.
+
+ echo "$total_time"
+}
+
+launch_application "$@"
diff --git a/startop/scripts/app_startup/lib/common b/startop/scripts/app_startup/lib/common
new file mode 100755
index 000000000000..4d5a53e4bb0c
--- /dev/null
+++ b/startop/scripts/app_startup/lib/common
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+if [[ -z $ANDROID_BUILD_TOP ]]; then
+ echo "Please run source build/envsetup.sh first" >&2
+ exit 1
+fi
+
+source $ANDROID_BUILD_TOP/build/envsetup.sh
+
+verbose_print() {
+ if [[ "$verbose" == "y" ]]; then
+ echo "$@" >&2
+ fi
+}
diff --git a/startop/scripts/app_startup/run_app_with_prefetch b/startop/scripts/app_startup/run_app_with_prefetch
new file mode 100755
index 000000000000..1ff5fc64116f
--- /dev/null
+++ b/startop/scripts/app_startup/run_app_with_prefetch
@@ -0,0 +1,344 @@
+#!/bin/bash
+#
+# Copyright 2018, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+usage() {
+ cat <<EOF
+Usage: run_app_with_prefetch --package <name> [OPTIONS]...
+
+ -p, --package <name> package of the app to test
+ -a, --activity <name> activity to use
+ -h, --help usage information (this)
+ -v, --verbose enable extra verbose printing
+ -i, --input <file> trace file protobuf (default 'TraceFile.pb')
+ -r, --readahead <mode> cold, warm, fadvise, mlock (default 'warm')
+ -w, --when <when> aot or jit (default 'aot')
+ -c, --count <count> how many times to run (default 1)
+ -s, --sleep <sec> how long to sleep after readahead
+ -t, --timeout <sec> how many seconds to timeout in between each app run (default 10)
+ -o, --output <file.csv> what file to write the performance results into as csv (default stdout)
+EOF
+}
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/lib/common"
+
+needs_trace_file="n"
+input_file=""
+package=""
+mode='warm'
+count=2
+sleep_time=2
+timeout=10
+output="" # stdout by default
+when="aot"
+parse_arguments() {
+ while [[ $# -gt 0 ]]; do
+ case "$1" in
+ -h|--help)
+ usage
+ exit 0
+ ;;
+ -p|--package)
+ package="$2"
+ shift
+ ;;
+ -a|--activity)
+ activity="$2"
+ shift
+ ;;
+ -i|--input)
+ input_file="$2"
+ shift
+ ;;
+ -v|--verbose)
+ export verbose="y"
+ ;;
+ -r|--readahead)
+ mode="$2"
+ shift
+ ;;
+ -c|--count)
+ count="$2"
+ ((count+=1))
+ shift
+ ;;
+ -s|--sleep)
+ sleep_time="$2"
+ shift
+ ;;
+ -t|--timeout)
+ timeout="$2"
+ shift
+ ;;
+ -o|--output)
+ output="$2"
+ shift
+ ;;
+ -w|--when)
+ when="$2"
+ shift
+ ;;
+ --compiler-filter)
+ # ignore any '--compiler-filter xyz' settings.
+ # FIXME: app_startup_runner.py should not be passing this flag.
+ shift
+ ;;
+ *)
+ echo "Invalid argument: $1" >&2
+ exit 1
+ esac
+ shift
+ done
+}
+
+echo_to_output_file() {
+ if [[ "x$output" != x ]]; then
+ echo "$@" >> $output
+ fi
+ # Always echo to stdout as well.
+ echo "$@"
+}
+
+get_activity_name() {
+ local package="$1"
+ local action_key="android.intent.action.MAIN:"
+
+ local activity_line="$(adb shell cmd package query-activities --brief -a android.intent.action.MAIN -c android.intent.category.LAUNCHER | grep "$package")"
+ #echo $activity_line
+ IFS="/" read -a array <<< "$activity_line"
+ local activity_name="${array[1]}"
+ echo "$activity_name"
+ #adb shell am start "$package/$activity_name"
+}
+
+find_package_path() {
+ local pkg="$1"
+
+ res="$(adb shell find "/data/app/$pkg"-'*' -maxdepth 0 2> /dev/null)"
+ if [[ -z $res ]]; then
+ res="$(adb shell find "/system/app/$pkg"-'*' -maxdepth 0 2> /dev/null)"
+ fi
+ echo "$res"
+}
+
+remote_pkill() {
+ local what="$1"
+ adb shell "for i in $(pid $what); do kill \$i; done"
+}
+
+# Main entry point
+if [[ $# -eq 0 ]]; then
+ usage
+ exit 1
+else
+ parse_arguments "$@"
+
+ # if we do not have have package exit early with an error
+ [[ "$package" == "" ]] && echo "--package not specified" 1>&2 && exit 1
+
+ if [[ $mode != "cold" && $mode != "warm" ]]; then
+ needs_trace_file="y"
+ if [[ -z "$input_file" ]] || ! [[ -f $input_file ]]; then
+ echo "--input not specified" 1>&2
+ exit 1
+ fi
+ fi
+
+ if [[ "$activity" == "" ]]; then
+ activity="$(get_activity_name "$package")"
+ if [[ "$activity" == "" ]]; then
+ echo "Activity name could not be found, invalid package name?" 1>&2
+ exit 1
+ else
+ verbose_print "Activity name inferred: " "$activity"
+ fi
+ fi
+fi
+
+adb root > /dev/null
+
+if [[ ($when == jit) || ($when == aot) ]] && [[ "$(adb shell getenforce)" != "Permissive" ]]; then
+ echo "Disable selinux permissions and restart framework."
+ adb shell setenforce 0
+ adb shell stop
+ adb shell start
+ adb wait-for-device
+fi
+
+# TODO: set performance governor etc, preferrably only once
+# before every single app run.
+
+# Kill everything before running.
+remote_pkill "$package"
+sleep 1
+
+timings_array=()
+
+package_path="$(find_package_path "$package")"
+if [[ $? -ne 0 ]]; then
+ echo "Failed to detect package path for '$package'" >&2
+ exit 1
+fi
+verbose_print "Package was in path '$package_path'"
+
+
+
+
+keep_application_trace_file=n
+application_trace_file_path="$package_path/TraceFile.pb"
+trace_file_directory="$package_path"
+if [[ $needs_trace_file == y ]]; then
+ # system server always passes down the package path in a hardcoded spot.
+ if [[ $when == "jit" ]]; then
+ verbose_print adb push "$input_file" "$application_trace_file_path"
+ adb push "$input_file" "$application_trace_file_path"
+ keep_application_trace_file="y"
+ else
+ # otherwise use a temporary directory to get normal non-jit behavior.
+ trace_file_directory="/data/local/tmp/prefetch/$package"
+ adb shell mkdir -p "$trace_file_directory"
+ verbose_print adb push "$input_file" "$trace_file_directory/TraceFile.pb"
+ adb push "$input_file" "$trace_file_directory/TraceFile.pb"
+ fi
+fi
+
+# Everything other than JIT: remove the trace file,
+# otherwise system server activity hints will kick in
+# and the new just-in-time app pre-warmup will happen.
+if [[ $keep_application_trace_file == "n" ]]; then
+ adb shell "[[ -f '$application_trace_file_path' ]] && rm '$application_trace_file_path'"
+fi
+
+# Perform AOT readahead/pinning/etc when an application is about to be launched.
+# For JIT readahead, we allow the system to handle it itself (this is a no-op).
+#
+# For warm, cold, etc modes which don't need readahead this is always a no-op.
+perform_aot() {
+ local the_when="$1" # user: aot, jit
+ local the_mode="$2" # warm, cold, fadvise, mlock, etc.
+
+ if [[ $the_when != "aot" ]]; then
+ # TODO: just in time implementation.. should probably use system server.
+ return 0
+ fi
+
+ # any non-warm/non-cold modes should use the iorap-activity-hint wrapper script.
+ if [[ $the_mode != 'warm' && $the_mode != 'cold' ]]; then
+
+ # TODO: add activity_hint_sender.exp
+ verbose_print "starting with package=$package package_path=$trace_file_directory"
+ coproc hint_sender_fd { $ANDROID_BUILD_TOP/system/iorap/src/sh/activity_hint_sender.exp "$package" "$trace_file_directory" "$the_mode"; }
+ hint_sender_pid=$!
+ verbose_print "Activity hint sender began"
+
+ notification_success="n"
+ while read -r -u "${hint_sender_fd[0]}" hint_sender_output; do
+ verbose_print "$hint_sender_output"
+ if [[ "$hint_sender_output" == "Press any key to send completed event..."* ]]; then
+ verbose_print "WE DID SEE NOTIFICATION SUCCESS."
+ notification_success='y'
+ # Give it some time to actually perform the readaheads.
+ sleep $sleep_time
+ break
+ fi
+ done
+
+ if [[ $notification_success == 'n' ]]; then
+ echo "[FATAL] Activity hint notification failed." 1>&2
+ exit 1
+ fi
+ fi
+}
+
+perform_aot_cleanup() {
+ local the_when="$1" # user: aot, jit
+ local the_mode="$2" # warm, cold, fadvise, mlock, etc.
+
+ if [[ $the_when != "aot" ]]; then
+ # TODO: just in time implementation.. should probably use system server.
+ return 0
+ fi
+
+ # any non-warm/non-cold modes should use the iorap-activity-hint wrapper script.
+ if [[ $the_mode != 'warm' && $the_mode != 'cold' ]]; then
+ # Clean up the hint sender by telling it that the launch was completed,
+ # and to shutdown the watcher.
+ echo "Done\n" >&"${hint_sender_fd[1]}"
+
+ while read -r -u "${hint_sender_fd[0]}" hint_sender_output; do
+ verbose_print "$hint_sender_output"
+ done
+
+ wait $hint_sender_pid
+ fi
+}
+
+# TODO: This loop logic could probably be moved into app_startup_runner.py
+for ((i=0;i<count;++i)) do
+ verbose_print "=========================================="
+ verbose_print "==== ITERATION $i ===="
+ verbose_print "=========================================="
+ if [[ $mode != "warm" ]]; then
+ verbose_print "Drop caches for non-warm start."
+ # Drop all caches to get cold starts.
+ adb shell "echo 3 > /proc/sys/vm/drop_caches"
+ fi
+
+ perform_aot "$when" "$mode"
+
+ verbose_print "Running with timeout $timeout"
+
+ # TODO: multiple metrics output.
+ total_time="$(timeout $timeout $DIR/launch_application "$package" "$activity")"
+
+ if [[ $? -ne 0 ]]; then
+ echo "WARNING: Skip bad result, try iteration again." >&2
+ ((i=i-1))
+ continue
+ fi
+
+ perform_aot_cleanup "$when" "$mode"
+
+ echo "Iteration $i. Total time was: $total_time"
+
+ timings_array+=($total_time)
+done
+
+# drop the first result which is usually garbage.
+timings_array=("${timings_array[@]:1}")
+
+
+# Print out interactive/debugging timings and averages.
+# Other scripts should use the --output flag and parse the CSV.
+for tim in "${timings_array[@]}"; do
+ echo_to_output_file -ne "$tim,"
+done
+echo_to_output_file ""
+
+average_string=$(echo "${timings_array[@]}" | awk '{s+=$0}END{print "Average:",s/NR}' RS=" ")
+echo -ne ${average_string}.
+if [[ x$output != x ]]; then
+ echo " Saved results to '$output'"
+fi
+
+# Temporary hack around multiple activities being launched with different package paths (for same app):
+# Clean up all left-over TraceFile.pb
+adb shell 'for i in $(find /data/app -name TraceFile.pb); do rm \$i; done'
+
+# Kill the process to ensure AM isn't keeping it around.
+remote_pkill "$package"
+
+exit 0
diff --git a/startop/scripts/app_startup/unlock_screen b/startop/scripts/app_startup/unlock_screen
new file mode 100755
index 000000000000..478294c9f35d
--- /dev/null
+++ b/startop/scripts/app_startup/unlock_screen
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright 2018, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This turns the screen on if it's off.
+# If it's on it does nothing unless its on the home screen, in which case it opens up some background
+# menu.
+#
+# However, this menu is ignored because "am start" commands still work as expected.
+adb shell input keyevent MENU