summaryrefslogtreecommitdiff
path: root/startop/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'startop/scripts')
-rwxr-xr-xstartop/scripts/app_startup/app_startup_runner.py372
-rwxr-xr-xstartop/scripts/app_startup/app_startup_runner_test.py129
-rwxr-xr-xstartop/scripts/app_startup/launch_application35
-rwxr-xr-xstartop/scripts/app_startup/lib/common145
-rwxr-xr-xstartop/scripts/app_startup/metrics/com.google.android.GoogleCamera25
-rwxr-xr-xstartop/scripts/app_startup/parse_metrics181
-rwxr-xr-xstartop/scripts/app_startup/run_app_with_prefetch124
-rwxr-xr-xstartop/scripts/iorap/collector402
-rwxr-xr-xstartop/scripts/iorap/common228
-rwxr-xr-xstartop/scripts/iorap/compile_handcrafted_file.py297
-rwxr-xr-xstartop/scripts/iorap/dump_compiled_pb38
-rwxr-xr-xstartop/scripts/iorap/dump_trace_pb38
-rwxr-xr-xstartop/scripts/iorap/pull_textcache24
-rw-r--r--startop/scripts/trace_analyzer/queries_all.sql57
-rw-r--r--startop/scripts/trace_analyzer/queries_app_launch_spans_with_name.sql44
-rw-r--r--startop/scripts/trace_analyzer/queries_block_launch.sql52
-rw-r--r--startop/scripts/trace_analyzer/queries_find_sched_switch_unblocked.sql101
-rw-r--r--startop/scripts/trace_analyzer/queries_get_comm_and_pids.sql42
-rw-r--r--startop/scripts/trace_analyzer/queries_get_procs.sql30
-rw-r--r--startop/scripts/trace_analyzer/queries_get_ui_threads.sql88
-rw-r--r--startop/scripts/trace_analyzer/queries_mark_write_join.sql53
-rw-r--r--startop/scripts/trace_analyzer/queries_pretty_print_block_launch.sql28
-rwxr-xr-xstartop/scripts/trace_analyzer/run-sql-queries79
-rwxr-xr-xstartop/scripts/trace_analyzer/trace_analyzer42
-rwxr-xr-xstartop/scripts/trace_analyzer/trace_analyzer.py350
-rwxr-xr-xstartop/scripts/trace_analyzer/trace_analyzer_recursive78
26 files changed, 3020 insertions, 62 deletions
diff --git a/startop/scripts/app_startup/app_startup_runner.py b/startop/scripts/app_startup/app_startup_runner.py
index 780bb4eaeeef..9a608af346d0 100755
--- a/startop/scripts/app_startup/app_startup_runner.py
+++ b/startop/scripts/app_startup/app_startup_runner.py
@@ -27,12 +27,13 @@
#
import argparse
+import asyncio
import csv
import itertools
import os
-import subprocess
import sys
import tempfile
+import time
from typing import Any, Callable, Dict, Generic, Iterable, List, NamedTuple, TextIO, Tuple, TypeVar, Optional, Union
# The following command line options participate in the combinatorial generation.
@@ -44,7 +45,7 @@ _RUN_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'run_app_w
RunCommandArgs = NamedTuple('RunCommandArgs', [('package', str), ('readahead', str), ('compiler_filter', Optional[str])])
CollectorPackageInfo = NamedTuple('CollectorPackageInfo', [('package', str), ('compiler_filter', str)])
-_COLLECTOR_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'collector')
+_COLLECTOR_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), '../iorap/collector')
_COLLECTOR_TIMEOUT_MULTIPLIER = 2 # take the regular --timeout and multiply by 2; systrace starts up slowly.
_UNLOCK_SCREEN_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'unlock_screen')
@@ -191,7 +192,206 @@ def generate_group_run_combinations(run_combinations: Iterable[NamedTuple], dst_
(group_key_value, args_it) = args_list_it
yield (group_key_value, args_it)
-def parse_run_script_csv_file(csv_file: TextIO) -> List[int]:
+class DataFrame:
+ """Table-like class for storing a 2D cells table with named columns."""
+ def __init__(self, data: Dict[str, List[object]] = {}):
+ """
+ Create a new DataFrame from a dictionary (keys = headers,
+ values = columns).
+ """
+ self._headers = [i for i in data.keys()]
+ self._rows = []
+
+ row_num = 0
+
+ def get_data_row(idx):
+ r = {}
+ for header, header_data in data.items():
+
+ if not len(header_data) > idx:
+ continue
+
+ r[header] = header_data[idx]
+
+ return r
+
+ while True:
+ row_dict = get_data_row(row_num)
+ if len(row_dict) == 0:
+ break
+
+ self._append_row(row_dict.keys(), row_dict.values())
+ row_num = row_num + 1
+
+ def concat_rows(self, other: 'DataFrame') -> None:
+ """
+ In-place concatenate rows of other into the rows of the
+ current DataFrame.
+
+ None is added in pre-existing cells if new headers
+ are introduced.
+ """
+ other_datas = other._data_only()
+
+ other_headers = other.headers
+
+ for d in other_datas:
+ self._append_row(other_headers, d)
+
+ def _append_row(self, headers: List[str], data: List[object]):
+ new_row = {k:v for k,v in zip(headers, data)}
+ self._rows.append(new_row)
+
+ for header in headers:
+ if not header in self._headers:
+ self._headers.append(header)
+
+ def __repr__(self):
+# return repr(self._rows)
+ repr = ""
+
+ header_list = self._headers_only()
+
+ row_format = u""
+ for header in header_list:
+ row_format = row_format + u"{:>%d}" %(len(header) + 1)
+
+ repr = row_format.format(*header_list) + "\n"
+
+ for v in self._data_only():
+ repr = repr + row_format.format(*v) + "\n"
+
+ return repr
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.headers == other.headers and self.data_table == other.data_table
+ else:
+ print("wrong instance", other.__class__)
+ return False
+
+ @property
+ def headers(self) -> List[str]:
+ return [i for i in self._headers_only()]
+
+ @property
+ def data_table(self) -> List[List[object]]:
+ return list(self._data_only())
+
+ @property
+ def data_table_transposed(self) -> List[List[object]]:
+ return list(self._transposed_data())
+
+ @property
+ def data_row_len(self) -> int:
+ return len(self._rows)
+
+ def data_row_at(self, idx) -> List[object]:
+ """
+ Return a single data row at the specified index (0th based).
+
+ Accepts negative indices, e.g. -1 is last row.
+ """
+ row_dict = self._rows[idx]
+ l = []
+
+ for h in self._headers_only():
+ l.append(row_dict.get(h)) # Adds None in blank spots.
+
+ return l
+
+ def copy(self) -> 'DataFrame':
+ """
+ Shallow copy of this DataFrame.
+ """
+ return self.repeat(count=0)
+
+ def repeat(self, count: int) -> 'DataFrame':
+ """
+ Returns a new DataFrame where each row of this dataframe is repeated count times.
+ A repeat of a row is adjacent to other repeats of that same row.
+ """
+ df = DataFrame()
+ df._headers = self._headers.copy()
+
+ rows = []
+ for row in self._rows:
+ for i in range(count):
+ rows.append(row.copy())
+
+ df._rows = rows
+
+ return df
+
+ def merge_data_columns(self, other: 'DataFrame'):
+ """
+ Merge self and another DataFrame by adding the data from other column-wise.
+ For any headers that are the same, data from 'other' is preferred.
+ """
+ for h in other._headers:
+ if not h in self._headers:
+ self._headers.append(h)
+
+ append_rows = []
+
+ for self_dict, other_dict in itertools.zip_longest(self._rows, other._rows):
+ if not self_dict:
+ d = {}
+ append_rows.append(d)
+ else:
+ d = self_dict
+
+ d_other = other_dict
+ if d_other:
+ for k,v in d_other.items():
+ d[k] = v
+
+ for r in append_rows:
+ self._rows.append(r)
+
+ def data_row_reduce(self, fnc) -> 'DataFrame':
+ """
+ Reduces the data row-wise by applying the fnc to each row (column-wise).
+ Empty cells are skipped.
+
+ fnc(Iterable[object]) -> object
+ fnc is applied over every non-empty cell in that column (descending row-wise).
+
+ Example:
+ DataFrame({'a':[1,2,3]}).data_row_reduce(sum) == DataFrame({'a':[6]})
+
+ Returns a new single-row DataFrame.
+ """
+ df = DataFrame()
+ df._headers = self._headers.copy()
+
+ def yield_by_column(header_key):
+ for row_dict in self._rows:
+ val = row_dict.get(header_key)
+ if val:
+ yield val
+
+ new_row_dict = {}
+ for h in df._headers:
+ cell_value = fnc(yield_by_column(h))
+ new_row_dict[h] = cell_value
+
+ df._rows = [new_row_dict]
+ return df
+
+ def _headers_only(self):
+ return self._headers
+
+ def _data_only(self):
+ row_len = len(self._rows)
+
+ for i in range(row_len):
+ yield self.data_row_at(i)
+
+ def _transposed_data(self):
+ return zip(*self._data_only())
+
+def parse_run_script_csv_file_flat(csv_file: TextIO) -> List[int]:
"""Parse a CSV file full of integers into a flat int list."""
csv_reader = csv.reader(csv_file)
arr = []
@@ -201,6 +401,38 @@ def parse_run_script_csv_file(csv_file: TextIO) -> List[int]:
arr.append(int(i))
return arr
+def parse_run_script_csv_file(csv_file: TextIO) -> DataFrame:
+ """Parse a CSV file full of integers into a DataFrame."""
+ csv_reader = csv.reader(csv_file)
+
+ try:
+ header_list = next(csv_reader)
+ except StopIteration:
+ header_list = []
+
+ if not header_list:
+ return None
+
+ headers = [i for i in header_list]
+
+ d = {}
+ for row in csv_reader:
+ header_idx = 0
+
+ for i in row:
+ v = i
+ if i:
+ v = int(i)
+
+ header_key = headers[header_idx]
+ l = d.get(header_key, [])
+ l.append(v)
+ d[header_key] = l
+
+ header_idx = header_idx + 1
+
+ return DataFrame(d)
+
def make_script_command_with_temp_output(script: str, args: List[str], **kwargs)\
-> Tuple[str, TextIO]:
"""
@@ -217,25 +449,95 @@ def make_script_command_with_temp_output(script: str, args: List[str], **kwargs)
cmd = cmd + ["--output", tmp_output_file.name]
return cmd, tmp_output_file
-def execute_arbitrary_command(cmd: List[str], simulate: bool, timeout: int) -> Tuple[bool, str]:
+async def _run_command(*args : List[str], timeout: Optional[int] = None) -> Tuple[int, bytes]:
+ # start child process
+ # NOTE: universal_newlines parameter is not supported
+ process = await asyncio.create_subprocess_exec(*args,
+ stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT)
+
+ script_output = b""
+
+ _debug_print("[PID]", process.pid)
+
+#hack
+# stdout, stderr = await process.communicate()
+# return (process.returncode, stdout)
+
+ timeout_remaining = timeout
+ time_started = time.time()
+
+ # read line (sequence of bytes ending with b'\n') asynchronously
+ while True:
+ try:
+ line = await asyncio.wait_for(process.stdout.readline(), timeout_remaining)
+ _debug_print("[STDOUT]", line)
+ script_output += line
+
+ if timeout_remaining:
+ time_elapsed = time.time() - time_started
+ timeout_remaining = timeout - time_elapsed
+ except asyncio.TimeoutError:
+ _debug_print("[TIMEDOUT] Process ", process.pid)
+
+# if process.returncode is not None:
+# #_debug_print("[WTF] can-write-eof?", process.stdout.can_write_eof())
+#
+# _debug_print("[TIMEDOUT] Process already terminated")
+# (remaining_stdout, remaining_stderr) = await process.communicate()
+# script_output += remaining_stdout
+#
+# code = await process.wait()
+# return (code, script_output)
+
+ _debug_print("[TIMEDOUT] Sending SIGTERM.")
+ process.terminate()
+
+ # 1 second timeout for process to handle SIGTERM nicely.
+ try:
+ (remaining_stdout, remaining_stderr) = await asyncio.wait_for(process.communicate(), 5)
+ script_output += remaining_stdout
+ except asyncio.TimeoutError:
+ _debug_print("[TIMEDOUT] Sending SIGKILL.")
+ process.kill()
+
+ # 1 second timeout to finish with SIGKILL.
+ try:
+ (remaining_stdout, remaining_stderr) = await asyncio.wait_for(process.communicate(), 5)
+ script_output += remaining_stdout
+ except asyncio.TimeoutError:
+ # give up, this will leave a zombie process.
+ _debug_print("[TIMEDOUT] SIGKILL failed for process ", process.pid)
+ time.sleep(100)
+ #await process.wait()
+
+ return (-1, script_output)
+ else:
+ if not line: # EOF
+ break
+
+ #if process.returncode is not None:
+ # _debug_print("[WTF] can-write-eof?", process.stdout.can_write_eof())
+ # process.stdout.write_eof()
+
+ #if process.stdout.at_eof():
+ # break
+
+ code = await process.wait() # wait for child process to exit
+ return (code, script_output)
+
+def execute_arbitrary_command(cmd: List[str], simulate: bool, timeout: Optional[int]) -> Tuple[bool, str]:
if simulate:
print(" ".join(cmd))
return (True, "")
else:
_debug_print("[EXECUTE]", cmd)
- proc = subprocess.Popen(cmd,
- stderr=subprocess.STDOUT,
- stdout=subprocess.PIPE,
- universal_newlines=True)
- try:
- script_output = proc.communicate(timeout=timeout)[0]
- except subprocess.TimeoutExpired:
- print("[TIMEDOUT]")
- proc.kill()
- script_output = proc.communicate()[0]
-
- _debug_print("[STDOUT]", script_output)
- return_code = proc.wait()
+
+ # block until either command finishes or the timeout occurs.
+ loop = asyncio.get_event_loop()
+ (return_code, script_output) = loop.run_until_complete(_run_command(*cmd, timeout=timeout))
+
+ script_output = script_output.decode() # convert bytes to str
+
passed = (return_code == 0)
_debug_print("[$?]", return_code)
if not passed:
@@ -267,27 +569,49 @@ def execute_run_combos(grouped_run_combos: Iterable[Tuple[CollectorPackageInfo,
cmd, tmp_output_file = make_script_command_with_temp_output(_RUN_SCRIPT, args, count=loop_count, input=collector_tmp_output_file.name)
with tmp_output_file:
(passed, script_output) = execute_arbitrary_command(cmd, simulate, timeout)
- parsed_output = simulate and [1,2,3] or parse_run_script_csv_file(tmp_output_file)
+ parsed_output = simulate and DataFrame({'fake_ms':[1,2,3]}) or parse_run_script_csv_file(tmp_output_file)
yield (passed, script_output, parsed_output)
-def gather_results(commands: Iterable[Tuple[bool, str, List[int]]], key_list: List[str], value_list: List[Tuple[str, ...]]):
+def gather_results(commands: Iterable[Tuple[bool, str, DataFrame]], key_list: List[str], value_list: List[Tuple[str, ...]]):
_debug_print("gather_results: key_list = ", key_list)
- yield key_list + ["time(ms)"]
+# yield key_list + ["time(ms)"]
stringify_none = lambda s: s is None and "<none>" or s
for ((passed, script_output, run_result_list), values) in itertools.zip_longest(commands, value_list):
+ _debug_print("run_result_list = ", run_result_list)
+ _debug_print("values = ", values)
if not passed:
continue
- for result in run_result_list:
- yield [stringify_none(i) for i in values] + [result]
- yield ["; avg(%s), min(%s), max(%s), count(%s)" %(sum(run_result_list, 0.0) / len(run_result_list), min(run_result_list), max(run_result_list), len(run_result_list)) ]
+ # RunCommandArgs(package='com.whatever', readahead='warm', compiler_filter=None)
+ # -> {'package':['com.whatever'], 'readahead':['warm'], 'compiler_filter':[None]}
+ values_dict = {k:[v] for k,v in values._asdict().items()}
+
+ values_df = DataFrame(values_dict)
+ # project 'values_df' to be same number of rows as run_result_list.
+ values_df = values_df.repeat(run_result_list.data_row_len)
+
+ # the results are added as right-hand-side columns onto the existing labels for the table.
+ values_df.merge_data_columns(run_result_list)
+
+ yield values_df
def eval_and_save_to_csv(output, annotated_result_values):
+
+ printed_header = False
+
csv_writer = csv.writer(output)
for row in annotated_result_values:
- csv_writer.writerow(row)
+ if not printed_header:
+ headers = row.headers
+ csv_writer.writerow(headers)
+ printed_header = True
+ # TODO: what about when headers change?
+
+ for data_row in row.data_table:
+ csv_writer.writerow(data_row)
+
output.flush() # see the output live.
def main():
diff --git a/startop/scripts/app_startup/app_startup_runner_test.py b/startop/scripts/app_startup/app_startup_runner_test.py
index f96f802a3aef..fd81667fe26f 100755
--- a/startop/scripts/app_startup/app_startup_runner_test.py
+++ b/startop/scripts/app_startup/app_startup_runner_test.py
@@ -192,19 +192,140 @@ def test_make_script_command_with_temp_output():
with tmp_file:
assert cmd_str == ["fake_script", "a", "b", "--count", "2", "--output", tmp_file.name]
-def test_parse_run_script_csv_file():
+def test_parse_run_script_csv_file_flat():
# empty file -> empty list
f = io.StringIO("")
- assert asr.parse_run_script_csv_file(f) == []
+ assert asr.parse_run_script_csv_file_flat(f) == []
# common case
f = io.StringIO("1,2,3")
- assert asr.parse_run_script_csv_file(f) == [1,2,3]
+ assert asr.parse_run_script_csv_file_flat(f) == [1,2,3]
# ignore trailing comma
f = io.StringIO("1,2,3,4,5,")
- assert asr.parse_run_script_csv_file(f) == [1,2,3,4,5]
+ assert asr.parse_run_script_csv_file_flat(f) == [1,2,3,4,5]
+
+def test_data_frame():
+ # trivial empty data frame
+ df = asr.DataFrame()
+ assert df.headers == []
+ assert df.data_table == []
+ assert df.data_table_transposed == []
+
+ # common case, same number of values in each place.
+ df = asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[4,5,6]})
+ assert df.headers == ['TotalTime_ms', 'Displayed_ms']
+ assert df.data_table == [[1, 4], [2, 5], [3, 6]]
+ assert df.data_table_transposed == [(1, 2, 3), (4, 5, 6)]
+
+ # varying num values.
+ df = asr.DataFrame({'many':[1,2], 'none': []})
+ assert df.headers == ['many', 'none']
+ assert df.data_table == [[1, None], [2, None]]
+ assert df.data_table_transposed == [(1, 2), (None, None)]
+
+ df = asr.DataFrame({'many':[], 'none': [1,2]})
+ assert df.headers == ['many', 'none']
+ assert df.data_table == [[None, 1], [None, 2]]
+ assert df.data_table_transposed == [(None, None), (1, 2)]
+
+ # merge multiple data frames
+ df = asr.DataFrame()
+ df.concat_rows(asr.DataFrame())
+ assert df.headers == []
+ assert df.data_table == []
+ assert df.data_table_transposed == []
+
+ df = asr.DataFrame()
+ df2 = asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[4,5,6]})
+
+ df.concat_rows(df2)
+ assert df.headers == ['TotalTime_ms', 'Displayed_ms']
+ assert df.data_table == [[1, 4], [2, 5], [3, 6]]
+ assert df.data_table_transposed == [(1, 2, 3), (4, 5, 6)]
+
+ df = asr.DataFrame({'TotalTime_ms':[1,2]})
+ df2 = asr.DataFrame({'Displayed_ms':[4,5]})
+
+ df.concat_rows(df2)
+ assert df.headers == ['TotalTime_ms', 'Displayed_ms']
+ assert df.data_table == [[1, None], [2, None], [None, 4], [None, 5]]
+
+ df = asr.DataFrame({'TotalTime_ms':[1,2]})
+ df2 = asr.DataFrame({'TotalTime_ms': [3, 4], 'Displayed_ms':[5, 6]})
+
+ df.concat_rows(df2)
+ assert df.headers == ['TotalTime_ms', 'Displayed_ms']
+ assert df.data_table == [[1, None], [2, None], [3, 5], [4, 6]]
+
+ # data_row_at
+ df = asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[4,5,6]})
+ assert df.data_row_at(-1) == [3,6]
+ assert df.data_row_at(2) == [3,6]
+ assert df.data_row_at(1) == [2,5]
+
+ # repeat
+ df = asr.DataFrame({'TotalTime_ms':[1], 'Displayed_ms':[4]})
+ df2 = asr.DataFrame({'TotalTime_ms':[1,1,1], 'Displayed_ms':[4,4,4]})
+ assert df.repeat(3) == df2
+
+ # repeat
+ df = asr.DataFrame({'TotalTime_ms':[1,1,1], 'Displayed_ms':[4,4,4]})
+ assert df.data_row_len == 3
+ df = asr.DataFrame({'TotalTime_ms':[1,1]})
+ assert df.data_row_len == 2
+
+ # repeat
+ df = asr.DataFrame({'TotalTime_ms':[1,1,1], 'Displayed_ms':[4,4,4]})
+ assert df.data_row_len == 3
+ df = asr.DataFrame({'TotalTime_ms':[1,1]})
+ assert df.data_row_len == 2
+
+ # data_row_reduce
+ df = asr.DataFrame({'TotalTime_ms':[1,1,1], 'Displayed_ms':[4,4,4]})
+ df_sum = asr.DataFrame({'TotalTime_ms':[3], 'Displayed_ms':[12]})
+ assert df.data_row_reduce(sum) == df_sum
+
+ # merge_data_columns
+ df = asr.DataFrame({'TotalTime_ms':[1,2,3]})
+ df2 = asr.DataFrame({'Displayed_ms':[3,4,5,6]})
+
+ df.merge_data_columns(df2)
+ assert df == asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[3,4,5,6]})
+
+ df = asr.DataFrame({'TotalTime_ms':[1,2,3]})
+ df2 = asr.DataFrame({'Displayed_ms':[3,4]})
+
+ df.merge_data_columns(df2)
+ assert df == asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[3,4]})
+
+ df = asr.DataFrame({'TotalTime_ms':[1,2,3]})
+ df2 = asr.DataFrame({'TotalTime_ms':[10,11]})
+
+ df.merge_data_columns(df2)
+ assert df == asr.DataFrame({'TotalTime_ms':[10,11,3]})
+
+ df = asr.DataFrame({'TotalTime_ms':[]})
+ df2 = asr.DataFrame({'TotalTime_ms':[10,11]})
+
+ df.merge_data_columns(df2)
+ assert df == asr.DataFrame({'TotalTime_ms':[10,11]})
+
+
+
+
+
+def test_parse_run_script_csv_file():
+ # empty file -> empty list
+ f = io.StringIO("")
+ assert asr.parse_run_script_csv_file(f) == None
+
+ # common case
+ f = io.StringIO("TotalTime_ms,Displayed_ms\n1,2")
+ df = asr.DataFrame({'TotalTime_ms': [1], 'Displayed_ms': [2]})
+ pf = asr.parse_run_script_csv_file(f)
+ assert pf == df
if __name__ == '__main__':
pytest.main()
diff --git a/startop/scripts/app_startup/launch_application b/startop/scripts/app_startup/launch_application
index 8a68e5016190..6704a5a97aa0 100755
--- a/startop/scripts/app_startup/launch_application
+++ b/startop/scripts/app_startup/launch_application
@@ -17,6 +17,19 @@
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "$DIR/lib/common"
+launch_application_usage() {
+ cat <<EOF
+Usage: $(basename $0) <package> <activity>
+
+ Positional Arguments:
+ <package> package of the app to test
+ <activity> activity to use
+
+ Named Arguments:
+ -h, --help usage information (this)
+EOF
+}
+
launch_application() {
local package="$1"
local activity="$2"
@@ -26,22 +39,14 @@ launch_application() {
# if the $ is not escaped, adb shell will try to evaluate $HomeActivity to a variable.
activity=${activity//\$/\\$}
- local am_output="$(adb shell am start -S -W "$package"/"$activity")"
- verbose_print adb shell am start -S -W "$package"/"$activity"
- if [[ $? -ne 0 ]]; then
- echo "am start failed" >&2
+ adb shell am start -S -W "$package"/"$activity"
- return 1
- fi
-
- # for everything else use the am start "TotalTime" output.
- verbose_print "$am_output"
- local total_time="$(echo "$am_output" | grep 'TotalTime:' | sed 's/TotalTime: //g')"
- verbose_print "total time: $total_time"
-
- # TODO: Extract alternative metrics such as the #reportFullyDrawn.
-
- echo "$total_time"
+ # pipe this into 'parse_metrics' to parse the output.
}
+if [[ $# -lt 2 ]]; then
+ launch_application_usage
+ exit 1
+fi
+
launch_application "$@"
diff --git a/startop/scripts/app_startup/lib/common b/startop/scripts/app_startup/lib/common
index 043d8550b64b..bedaa1e10288 100755
--- a/startop/scripts/app_startup/lib/common
+++ b/startop/scripts/app_startup/lib/common
@@ -1,4 +1,17 @@
#!/bin/bash
+# Copyright 2018, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
if [[ -z $ANDROID_BUILD_TOP ]]; then
echo "Please run source build/envsetup.sh first" >&2
@@ -49,5 +62,137 @@ get_activity_name() {
local activity_line="$(adb shell cmd package query-activities --brief -a android.intent.action.MAIN -c android.intent.category.LAUNCHER | grep "$package/")"
IFS="/" read -a array <<< "$activity_line"
local activity_name="${array[1]}"
+
+ # Activities starting with '.' are shorthand for having their package name prefixed.
+ if [[ $activity_name == .* ]]; then
+ activity_name="${package}${activity_name}"
+ fi
echo "$activity_name"
}
+
+# Use with logcat_from_timestamp to skip all past log-lines.
+logcat_save_timestamp() {
+ adb shell 'date -u +"%Y-%m-%d %H:%M:%S.%N"'
+}
+
+# Roll forward logcat to only show events
+# since the specified timestamp.
+#
+# i.e. don't look at historical logcat,
+# only look at FUTURE logcat.
+#
+# First use 'logcat_save_timestamp'
+# Then do whatever action you want.
+# Then use 'logcat_from_timestamp_bg $timestamp'
+logcat_from_timestamp_bg() {
+ local timestamp="$1"
+ shift # drop timestamp from args.
+ verbose_print adb logcat -T \"$timestamp\" \"$@\"
+ adb logcat -v UTC -T "$timestamp" "$@" &
+ logcat_from_timestamp_pid=$!
+}
+
+# Starting at timestamp $2, wait until we seen pattern $3
+# or until a timeout happens in $1 seconds.
+# If successful, also echo the line that matched the pattern.
+#
+# Set VERBOSE_LOGCAT=1 to debug every line of logcat it tries to parse.
+logcat_select_pattern() {
+ local timeout="$1"
+ local timestamp="$2"
+ local pattern="$3"
+
+ local logcat_fd
+
+ coproc logcat_fd {
+ kill_children_quietly() {
+ kill "$logcat_pidd"
+ wait "$logcat_pidd" 2>/dev/null
+ }
+
+ trap 'kill_children_quietly' EXIT # kill logcat when this coproc is killed.
+
+ # run logcat in the background so it can be killed.
+ logcat_from_timestamp_bg "$timestamp"
+ logcat_pidd=$logcat_from_timestamp_pid
+ wait "$logcat_pidd"
+ }
+ local logcat_pid="$!"
+ verbose_print "[LOGCAT] Spawn pid $logcat_pid"
+
+ local timeout_ts="$(date -d "now + ${timeout} seconds" '+%s')"
+ local now_ts="0"
+
+ local return_code=1
+
+ verbose_print "logcat_wait_for_pattern begin"
+
+ while read -t "$timeout" -r -u "${logcat_fd[0]}" logcat_output; do
+ if (( $VERBOSE_LOGCAT )); then
+ verbose_print "LOGCAT: $logcat_output"
+ fi
+ if [[ "$logcat_output:" == *"$pattern"* ]]; then
+ verbose_print "LOGCAT: " "$logcat_output"
+ verbose_print "WE DID SEE PATTERN" '<<' "$pattern" '>>.'
+ echo "$logcat_output"
+ return_code=0
+ break
+ fi
+ now_ts="$(date -d "now" '+%s')"
+ if (( now_ts >= timeout_ts )); then
+ verbose_print "DID TIMEOUT BEFORE SEEING ANYTHING (timeout=$timeout seconds) " '<<' "$pattern" '>>.'
+ break
+ fi
+ done
+
+ # Don't leave logcat lying around since it will keep going.
+ kill "$logcat_pid"
+ # Suppress annoying 'Terminated...' message.
+ wait "$logcat_pid" 2>/dev/null
+
+ verbose_print "[LOGCAT] $logcat_pid should be killed"
+
+ return $return_code
+}
+
+# Starting at timestamp $2, wait until we seen pattern $3
+# or until a timeout happens in $1 seconds.
+#
+# Set VERBOSE_LOGCAT=1 to debug every line of logcat it tries to parse.
+logcat_wait_for_pattern() {
+ logcat_select_pattern "$@" > /dev/null
+}
+
+# Starting at timestamp $2, wait until we seen pattern $3
+# or until a timeout happens in $1 seconds.
+# If successful, extract with the regular expression pattern in #4
+# and return the first capture group.
+#
+# Set VERBOSE_LOGCAT=1 to debug every line of logcat it tries to parse.
+logcat_extract_pattern() {
+ local timeout="$1"
+ local timestamp="$2"
+ local pattern="$3"
+ local re_pattern="$4"
+
+ local result
+ local exit_code
+
+ result="$(logcat_select_pattern "$@")"
+ exit_code=$?
+
+ if [[ $exit_code -ne 0 ]]; then
+ return $exit_code
+ fi
+
+ echo "$result" | sed 's/'"$re_pattern"'/\1/g'
+}
+
+# Join array
+# FOO=(a b c)
+# join_by , "${FOO[@]}" #a,b,c
+join_by() {
+ local IFS="$1"
+ shift
+ echo "$*"
+}
diff --git a/startop/scripts/app_startup/metrics/com.google.android.GoogleCamera b/startop/scripts/app_startup/metrics/com.google.android.GoogleCamera
new file mode 100755
index 000000000000..d95fa32dce3f
--- /dev/null
+++ b/startop/scripts/app_startup/metrics/com.google.android.GoogleCamera
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2019, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# 05-06 14:51:17.688 29691 29897 I CAM_Timing: CameraActivity: START -> ACTIVITY_FIRST_PREVIEW_FRAME_RECEIVED: 385ms
+pattern="CAM_Timing: CameraActivity: START -> ACTIVITY_FIRST_PREVIEW_FRAME_RECEIVED:"
+re_pattern='.*ACTIVITY_FIRST_PREVIEW_FRAME_RECEIVED: \([[:digit:]]\+\)ms'
+parse_metric_from_logcat "ACTIVITY_FIRST_PREVIEW_FRAME_RECEIVED_ms" "$pattern" "$re_pattern"
+
+# 05-06 14:51:17.724 29691 29691 I CAM_Timing: CameraActivity: START -> ACTIVITY_FIRST_PREVIEW_FRAME_RENDERED: 421ms
+pattern="CAM_Timing: CameraActivity: START -> ACTIVITY_FIRST_PREVIEW_FRAME_RENDERED:"
+re_pattern='.*ACTIVITY_FIRST_PREVIEW_FRAME_RENDERED: \([[:digit:]]\+\)ms'
+parse_metric_from_logcat "ACTIVITY_FIRST_PREVIEW_FRAME_RENDERED_ms" "$pattern" "$re_pattern"
diff --git a/startop/scripts/app_startup/parse_metrics b/startop/scripts/app_startup/parse_metrics
new file mode 100755
index 000000000000..c6bf08ee9dfd
--- /dev/null
+++ b/startop/scripts/app_startup/parse_metrics
@@ -0,0 +1,181 @@
+#!/bin/bash
+#
+# Copyright 2019, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+usage() {
+ cat <<EOF
+Usage: launch_application package activity | parse_metrics --package <name> --timestamp <timestamp> [OPTIONS]...
+
+ Reads from stdin the result of 'am start' metrics. May also parse logcat
+ for additional metrics.
+
+ Output form:
+
+ MetricName_unit=numeric_value
+ MetricName2_unit=numeric_value2
+
+ This may block until all desired metrics are parsed from logcat.
+ To get a list of metrics without doing real parsing, use --simulate.
+
+ To add package-specific metrics, add a script called 'metrics/\$full_package_name'
+ that exposes additional metrics in same way as above.
+
+ (required)
+ -p, --package <name> package of the app that is being used
+ -ts, --timestamp <name> logcat timestamp [only looks at logcat entries after this timestamp].
+
+ (optional)
+ -s, --simulate prints dummy values instead of real metrics
+ -a, --activity <name> activity to use (default: inferred)
+ -h, --help usage information (this)
+ -v, --verbose enable extra verbose printing
+ -t, --timeout <sec> how many seconds to timeout when trying to wait for logcat to change
+EOF
+}
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/lib/common"
+
+package=""
+activity=""
+timeout=10
+simulate="n"
+parse_arguments() {
+ while [[ $# -gt 0 ]]; do
+ case "$1" in
+ -h|--help)
+ usage
+ exit 0
+ ;;
+ -p|--package)
+ package="$2"
+ shift
+ ;;
+ -a|--activity)
+ activity="$2"
+ shift
+ ;;
+ -v|--verbose)
+ export verbose="y"
+ ;;
+ -t|--timeout)
+ timeout="$2"
+ shift
+ ;;
+ -ts|--timestamp)
+ timestamp="$2"
+ shift
+ ;;
+ -s|--simulate)
+ simulate="y"
+ ;;
+ *)
+ echo "Invalid argument: $1" >&2
+ exit 1
+ esac
+ shift
+ done
+}
+
+# Main entry point
+if [[ $# -eq 0 ]]; then
+ usage
+ exit 1
+else
+ parse_arguments "$@"
+
+ # if we do not have have package exit early with an error
+ [[ "$package" == "" ]] && echo "--package not specified" 1>&2 && exit 64
+
+ # ignore timestamp for --simulate. it's optional.
+ if [[ $simulate == y ]]; then
+ timestamp=0
+ fi
+
+ # if we do not have timestamp, exit early with an error
+ [[ "$timestamp" == "" ]] && echo "--timestamp not specified" 1>&2 && exit 64
+
+ if [[ "$activity" == "" ]] && [[ "$simulate" != "y" ]]; then
+ activity="$(get_activity_name "$package")"
+ if [[ "$activity" == "" ]]; then
+ echo "Activity name could not be found, invalid package name?" 1>&2
+ exit 64
+ else
+ verbose_print "Activity name inferred: " "$activity"
+ fi
+ fi
+fi
+
+parse_metric_from_logcat() {
+ local metric_name="$1"
+ local pattern="$2"
+ local re_pattern="$3"
+ local retcode
+
+ # parse logcat for 'Displayed...' and that other one...
+
+ # 05-06 14:34:08.854 29460 29481 I ActivityTaskManager: Displayed com.google.android.dialer/.extensions.GoogleDialtactsActivity: +361ms
+ verbose_print "parse_metric_from_logcat: $re_pattern"
+
+
+ echo -ne "$metric_name="
+
+ if [[ $simulate == y ]]; then
+ echo "-1"
+ return 0
+ fi
+
+ logcat_extract_pattern "$timeout" "$timestamp" "$pattern" "$re_pattern"
+ retcode=$?
+
+ if [[ $retcode -ne 0 ]]; then
+ # Timed out before finding the pattern. Could also mean the pattern is wrong.
+ echo "-$?"
+ fi
+
+ return $retcode
+}
+
+
+total_time="-1"
+if [[ $simulate != y ]]; then
+ verbose_print 'logcat timestamp NOW: ' $(logcat_save_timestamp)
+
+ # parse stdin for 'am start' result
+ while read -t "$timeout" -r input_line; do
+ verbose_print 'stdin:' "$input_line"
+ if [[ $input_line == *TotalTime:* ]]; then
+ total_time="$(echo "$input_line" | sed 's/TotalTime: \([[:digit:]]\+\)/\1/g')"
+ # but keep reading the rest from stdin until <EOF>
+ fi
+ done
+fi
+
+echo "TotalTime_ms=$total_time"
+
+# parse logcat for 'Displayed...' and that other one...
+
+# 05-06 14:34:08.854 29460 29481 I ActivityTaskManager: Displayed com.google.android.dialer/.extensions.GoogleDialtactsActivity: +361ms
+pattern="ActivityTaskManager: Displayed ${package}"
+re_pattern='.*Displayed[[:blank:]]\+'"${package}"'[/][^[:blank:]]\+[[:blank:]]+\([[:digit:]]\+\).*'
+
+parse_metric_from_logcat "Displayed_ms" "$pattern" "$re_pattern"
+
+# also call into package-specific scripts if there are additional metrics
+if [[ -x "$DIR/metrics/$package" ]]; then
+ source "$DIR/metrics/$package" "$timestamp"
+else
+ verbose_print parse_metrics: no per-package metrics script found at "$DIR/metrics/$package"
+fi
diff --git a/startop/scripts/app_startup/run_app_with_prefetch b/startop/scripts/app_startup/run_app_with_prefetch
index 56bffa85eb90..643df1ba0825 100755
--- a/startop/scripts/app_startup/run_app_with_prefetch
+++ b/startop/scripts/app_startup/run_app_with_prefetch
@@ -24,7 +24,7 @@ Usage: run_app_with_prefetch --package <name> [OPTIONS]...
-v, --verbose enable extra verbose printing
-i, --input <file> trace file protobuf (default 'TraceFile.pb')
-r, --readahead <mode> cold, warm, fadvise, mlock (default 'warm')
- -w, --when <when> aot or jit (default 'aot')
+ -w, --when <when> aot or jit (default 'jit')
-c, --count <count> how many times to run (default 1)
-s, --sleep <sec> how long to sleep after readahead
-t, --timeout <sec> how many seconds to timeout in between each app run (default 10)
@@ -33,7 +33,7 @@ EOF
}
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-source "$DIR/lib/common"
+source "$DIR/../iorap/common"
needs_trace_file="n"
input_file=""
@@ -43,7 +43,7 @@ count=2
sleep_time=2
timeout=10
output="" # stdout by default
-when="aot"
+when="jit"
parse_arguments() {
while [[ $# -gt 0 ]]; do
case "$1" in
@@ -176,16 +176,19 @@ if [[ $? -ne 0 ]]; then
fi
verbose_print "Package was in path '$package_path'"
-keep_application_trace_file=n
application_trace_file_path="$package_path/TraceFile.pb"
trace_file_directory="$package_path"
if [[ $needs_trace_file == y ]]; then
# system server always passes down the package path in a hardcoded spot.
if [[ $when == "jit" ]]; then
- verbose_print adb push "$input_file" "$application_trace_file_path"
- adb push "$input_file" "$application_trace_file_path"
+ if ! iorapd_compiler_install_trace_file "$package" "$activity" "$input_file"; then
+ echo "Error: Failed to install compiled TraceFile.pb for '$package/$activity'" >&2
+ exit 1
+ fi
keep_application_trace_file="y"
else
+ echo "TODO: --readahead=aot is non-functional and needs to be fixed." >&2
+ exit 1
# otherwise use a temporary directory to get normal non-jit behavior.
trace_file_directory="/data/local/tmp/prefetch/$package"
adb shell mkdir -p "$trace_file_directory"
@@ -198,7 +201,7 @@ fi
# otherwise system server activity hints will kick in
# and the new just-in-time app pre-warmup will happen.
if [[ $keep_application_trace_file == "n" ]]; then
- adb shell "[[ -f '$application_trace_file_path' ]] && rm '$application_trace_file_path'"
+ iorapd_compiler_purge_trace_file "$package" "$activity"
fi
# Perform AOT readahead/pinning/etc when an application is about to be launched.
@@ -242,12 +245,20 @@ perform_aot() {
fi
}
-perform_aot_cleanup() {
+# Perform cleanup at the end of each loop iteration.
+perform_post_launch_cleanup() {
local the_when="$1" # user: aot, jit
local the_mode="$2" # warm, cold, fadvise, mlock, etc.
+ local logcat_timestamp="$3" # timestamp from before am start.
if [[ $the_when != "aot" ]]; then
- # TODO: just in time implementation.. should probably use system server.
+ if [[ $the_mode != 'warm' && $the_mode != 'cold' ]]; then
+ # Validate that readahead completes.
+ # If this fails for some reason, then this will also discard the timing of the run.
+ iorapd_readahead_wait_until_finished "$package" "$activity" "$logcat_timestamp" "$timeout"
+ return $?
+ fi
+ # Don't need to do anything for warm or cold.
return 0
fi
@@ -306,6 +317,74 @@ configure_compiler_filter() {
# No-op if this option was not passed in.
configure_compiler_filter "$compiler_filter" "$package" "$activity" || exit 1
+# convert 'a=b\nc=d\ne=f\n...' into 'b,d,f,...'
+parse_metrics_output_string() {
+ # single string with newlines in it.
+ local input="$1"
+
+ local metric_name
+ local metric_value
+ local rest
+
+ local all_metrics=()
+
+ # (n1=v1 n2=v2 n3=v3 ...)
+ readarray -t all_metrics <<< "$input"
+
+ local kv_pair=()
+ local i
+
+ for i in "${all_metrics[@]}"
+ do
+ verbose_print "parse_metrics_output: element '$i'"
+ # name=value
+
+ IFS='=' read -r metric_name metric_value rest <<< "$i"
+
+ verbose_print "parse_metrics_output: metric_value '$metric_value'"
+
+ # (value1 value2 value3 ...)
+ all_metrics+=(${metric_value})
+ done
+
+ # "value1,value2,value3,..."
+ join_by ',' "${all_metrics[@]}"
+}
+
+# convert 'a=b\nc=d\ne=f\n... into b,d,f,...'
+parse_metrics_output() {
+ local metric_name
+ local metric_value
+ local rest
+
+ local all_metrics=()
+
+ while IFS='=' read -r metric_name metric_value rest; do
+ verbose_print "metric: $metric_name, value: $metric_value; rest: $rest"
+ all_metrics+=($metric_value)
+ done
+
+ join_by ',' "${all_metrics[@]}"
+}
+
+# convert 'a=b\nc=d\ne=f\n... into b,d,f,...'
+parse_metrics_header() {
+ local metric_name
+ local metric_value
+ local rest
+
+ local all_metrics=()
+
+ while IFS='=' read -r metric_name metric_value rest; do
+ verbose_print "metric: $metric_name, value: $metric_value; rest: $rest"
+ all_metrics+=($metric_name)
+ done
+
+ join_by ',' "${all_metrics[@]}"
+}
+
+metrics_header="$("$DIR/parse_metrics" --package "$package" --activity "$activity" --simulate | parse_metrics_header)"
+
# TODO: This loop logic could probably be moved into app_startup_runner.py
for ((i=0;i<count;++i)) do
verbose_print "=========================================="
@@ -321,8 +400,10 @@ for ((i=0;i<count;++i)) do
verbose_print "Running with timeout $timeout"
+ pre_launch_timestamp="$(logcat_save_timestamp)"
+
# TODO: multiple metrics output.
- total_time="$(timeout $timeout $DIR/launch_application "$package" "$activity")"
+ total_time="$(timeout $timeout "$DIR/launch_application" "$package" "$activity" | "$DIR/parse_metrics" --package "$package" --activity "$activity" --timestamp "$pre_launch_timestamp" | parse_metrics_output)"
if [[ $? -ne 0 ]]; then
echo "WARNING: Skip bad result, try iteration again." >&2
@@ -330,33 +411,38 @@ for ((i=0;i<count;++i)) do
continue
fi
- perform_aot_cleanup "$when" "$mode"
+ perform_post_launch_cleanup "$when" "$mode" "$pre_launch_timestamp"
+
+ if [[ $? -ne 0 ]]; then
+ echo "WARNING: Skip bad cleanup, try iteration again." >&2
+ ((i=i-1))
+ continue
+ fi
echo "Iteration $i. Total time was: $total_time"
- timings_array+=($total_time)
+ timings_array+=("$total_time")
done
# drop the first result which is usually garbage.
timings_array=("${timings_array[@]:1}")
+# Print the CSV header first.
+echo_to_output_file "$metrics_header"
# Print out interactive/debugging timings and averages.
# Other scripts should use the --output flag and parse the CSV.
for tim in "${timings_array[@]}"; do
- echo_to_output_file -ne "$tim,"
+ echo_to_output_file "$tim"
done
-echo_to_output_file ""
-average_string=$(echo "${timings_array[@]}" | awk '{s+=$0}END{print "Average:",s/NR}' RS=" ")
-echo -ne ${average_string}.
if [[ x$output != x ]]; then
echo " Saved results to '$output'"
fi
-# Temporary hack around multiple activities being launched with different package paths (for same app):
-# Clean up all left-over TraceFile.pb
-adb shell 'for i in $(find /data/app -name TraceFile.pb); do rm \$i; done'
+if [[ $needs_trace_file == y ]] ; then
+ iorapd_compiler_purge_trace_file "$package" "$activity"
+fi
# Kill the process to ensure AM isn't keeping it around.
remote_pkill "$package"
diff --git a/startop/scripts/iorap/collector b/startop/scripts/iorap/collector
new file mode 100755
index 000000000000..d96125f76a37
--- /dev/null
+++ b/startop/scripts/iorap/collector
@@ -0,0 +1,402 @@
+#!/bin/bash
+#
+# Copyright 2017, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+APP_STARTUP_DIR="$DIR/../app_startup/"
+source "$DIR/common"
+
+usage() {
+ cat <<EOF
+Usage: collector [OPTIONS]...
+
+Runs an application, causes an iorap trace to be collected for it, and then invokes the iorap
+compiler to generate a TraceFile.pb.
+
+ -p, --package package of the app to test
+ -a, --activity activity of the app to test
+ -h, --help usage information (this)
+ -v, --verbose enable extra verbose printing
+ -i, --inodes path to inodes file (system/extras/pagecache/pagecache.py -d inodes)
+ -b, --trace_buffer_size how big to make trace buffer size (default 32768)
+ -w, --wait_time how long to run systrace for (default 10) in seconds
+ -c, --compiler-filter override the compilation filter if set (default none)
+ -o, --output output trace file protobuf (default 'TraceFile.pb')
+EOF
+}
+
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+trace_buffer_size=32768
+wait_time=10
+comp_filter=""
+output_dest="TraceFile.pb"
+
+parse_arguments() {
+ while [[ $# -gt 0 ]]; do
+ case "$1" in
+ -a|--activity)
+ activity="$2"
+ shift
+ ;;
+ -h|--help)
+ usage
+ exit 0
+ ;;
+ -p|--package)
+ package="$2"
+ shift
+ ;;
+ -i|--inodes)
+ inodes="$2"
+ shift
+ ;;
+ -b|--trace_buffer_size)
+ trace_buffer_size="$2"
+ shift
+ ;;
+ -w|--wait_time)
+ wait_time="$2"
+ shift
+ ;;
+ -c|--compiler-filter)
+ comp_filter="$2"
+ shift
+ ;;
+ -o|--output)
+ output_dest="$2"
+ shift
+ ;;
+ -v|--verbose)
+ verbose="y"
+ ;;
+ esac
+ shift
+ done
+}
+
+remote_pidof() {
+ local procname="$1"
+ adb shell ps | grep "$procname" | awk '{print $2;}'
+}
+
+remote_pkill() {
+ local procname="$1"
+ shift
+
+ local the_pids=$(remote_pidof "$procname")
+ local pid
+
+ for pid in $the_pids; do
+ verbose_print adb shell kill "$@" "$pid"
+ adb shell kill "$@" "$pid"
+ done
+}
+
+force_package_compilation() {
+ local arg_comp_filter="$1"
+ local arg_package="$2"
+
+ if [[ $arg_comp_filter == speed-profile ]]; then
+ # Force the running app to dump its profiles to disk.
+ remote_pkill "$arg_package" -SIGUSR1
+ sleep 1 # give some time for above to complete.
+ fi
+
+ adb shell cmd package compile -m "$arg_comp_filter" -f "$arg_package"
+}
+
+parse_package_dumpsys_line() {
+ local what_left="$1"
+ local what_right="$2"
+ local line="$3"
+
+ if [[ $line == *${what_left}*${what_right}* ]]; then
+ found="${line#*$what_left}"
+ found="${found%$what_right*}"
+ echo "$found"
+ return 0
+ fi
+
+ return 1
+}
+
+parse_package_dumpsys_section() {
+ local what_left="$1"
+ local what_right="$2"
+ shift
+ local lines="$@"
+
+ lines="${lines//$'\n'/}"
+
+ local new_lines=()
+
+ local current_line=""
+ local newline=n
+ local line
+ for line in "${lines[@]}"; do
+ if [[ $line == *: ]]; then
+ newline=y
+ current_line=""
+ new_lines+=("$current_line")
+
+ parse_package_dumpsys_line "$what_left" "$what_right" "$current_line" && return 0
+ else
+ # strip all spaces from the start
+ line="${line//$' '/}"
+ current_line+="$line"
+ #prepend to current line
+ fi
+ done
+ [[ "$current_line" != "" ]] && new_lines+=("$current_line")
+
+ parse_package_dumpsys_line "$what_left" "$what_right" "$current_line" && return 0
+
+ return 1
+}
+
+parse_package_compilation() {
+ local pkg="$1"
+# [com.google.android.apps.maps]
+
+ local compilation_filter
+ local is_prebuilt
+ local isa
+ local etc
+
+ local ret_code
+
+ read compilation_filter is_prebuilt isa etc <<< "$("$APP_STARTUP_DIR"/query_compiler_filter.py --package "$pkg")"
+ ret_code=$?
+
+ if [[ $ret_code -eq 0 && x$compilation_filter != x ]]; then
+ verbose_print "Package compilation info for $pkg was '$compilation_filter'"
+ echo "$compilation_filter"
+ return 0
+ else
+ verbose_print "query failed ret code $ret_code filter=$compilation_filter"
+ fi
+
+ return $ret_code
+}
+
+# Main entry point
+if [[ $# -eq 0 ]]; then
+ usage
+ exit 1
+else
+ parse_arguments "$@"
+
+ # if we do not have have package exit early with an error
+ [[ "$package" == "" ]] && echo "--package not specified" 1>&2 && exit 1
+
+ if [[ -z "$inodes" ]] || ! [[ -f $inodes ]]; then
+ echo "--inodes not specified" 1>&2
+ exit 1
+ fi
+
+ if [[ "$activity" == "" ]]; then
+ activity="$(get_activity_name "$package")"
+ if [[ "$activity" == "" ]]; then
+ echo "Activity name could not be found, invalid package name?" 1>&2
+ exit 1
+ else
+ verbose_print "Activity name inferred: " "$activity"
+ fi
+ fi
+fi
+
+adb root > /dev/null
+
+if [[ "$(adb shell getenforce)" != "Permissive" ]]; then
+ adb shell setenforce 0
+ adb shell stop
+ adb shell start
+ adb wait-for-device
+fi
+
+compilation_was="$(parse_package_compilation "$package")"
+if [[ $? -ne 0 ]]; then
+ echo "Could not determine package compilation filter; was this package installed?" >&2
+ exit 1
+fi
+verbose_print "Package compilation: $compilation_was"
+
+# Cannot downgrade (e.g. from speed-profile to quicken) without forceful recompilation.
+# Forceful recompilation will recompile even if compilation filter was unchanged.
+# Therefore avoid recompiling unless the filter is actually different than what we asked for.
+if [[ "x$comp_filter" != "x" ]] && [[ "$compilation_was" != "$comp_filter" ]]; then
+ echo "Current compilation filter is '$compilation_was'; force recompile to '$comp_filter'" >&2
+ #TODO: this matching seems hopelessly broken, it will always recompile.
+
+ force_package_compilation "$comp_filter" "$package"
+fi
+
+# Drop all caches prior to beginning a systrace, otherwise we won't record anything already in pagecache.
+adb shell "echo 3 > /proc/sys/vm/drop_caches"
+
+trace_tmp_file="$(mktemp -t trace.XXXXXXXXX.html)"
+
+function finish {
+ [[ -f "$trace_tmp_file" ]] && rm "$trace_tmp_file"
+}
+trap finish EXIT
+
+launch_application_and_wait_for_trace() {
+ local package="$1"
+ local activity="$2"
+ local timeout=30 # seconds
+
+ # Ensure application isn't running already.
+ remote_pkill "$package"
+
+ # 5 second trace of Home screen causes
+ # a trace of the home screen.
+ # There is no way to abort the trace
+ # so just wait for it to complete instead.
+ sleep 30
+
+ local time_now="$(logcat_save_timestamp)"
+ local retcode=0
+
+ verbose_print "Drop caches for non-warm start."
+ # Drop all caches to get cold starts.
+ adb shell "echo 3 > /proc/sys/vm/drop_caches"
+
+ verbose_print "now launching application"
+ # Launch an application
+ "$APP_STARTUP_DIR"/launch_application "$package" "$activity"
+ retcode=$?
+ if [[ $retcode -ne 0 ]]; then
+ echo "FATAL: Application launch failed." >&2
+ return $retcode
+ fi
+
+ # This blocks until 'am start' returns at which point the application is
+ # already to be considered "started" as the first frame has been drawn.
+
+ # TODO: check for cold start w.r.t to activitymanager?
+
+ # Wait for application to start from the point of view of ActivityTaskManager.
+ local pattern="ActivityTaskManager: Displayed $package"
+ logcat_wait_for_pattern "$timeout" "$time_now" "$pattern"
+ retcode=$?
+ if [[ $retcode -ne 0 ]]; then
+ echo "FATAL: Could not find '$pattern' in logcat." >&2
+ return $retcode
+ fi
+
+ # Wait for iorapd to finish writing out the perfetto traces for this app.
+ iorapd_perfetto_wait_for_app_trace "$package" "$activity" "$timeout" "$time_now"
+ retcode=$?
+ if [[ $retcode -ne 0 ]]; then
+ echo "FATAL: Could not save perfetto app trace file." >&2
+ return $retcode
+ fi
+
+ verbose_print "iorapd has finished collecting app trace file for $package/$activity"
+}
+
+collector_main() {
+ # don't even bother trying to run anything until the screen is unlocked.
+ "$APP_STARTUP_DIR"/unlock_screen
+
+ # Don't mutate state while iorapd is running.
+ iorapd_stop || return $?
+
+ # Remove all existing metadata for a package/activity in iorapd.
+ iorapd_perfetto_purge_app_trace "$package" "$activity" || return $?
+ iorapd_compiler_purge_trace_file "$package" "$activity" || return $?
+
+ iorapd_perfetto_enable || return $?
+ iorapd_start || return $?
+
+ # Wait for perfetto trace to finished writing itself out.
+ launch_application_and_wait_for_trace "$package" "$activity" || return $?
+
+ # Pull the perfetto trace for manual inspection.
+ iorapd_perfetto_pull_trace_file "$package" "$activity" "perfetto_trace.pb"
+
+ # Compile the trace so that the next app run can use prefetching.
+ iorapd_compiler_for_app_trace "$package" "$activity" "$inodes" || return $?
+
+ # Save TraceFile.pb to local file.
+ iorapd_compiler_pull_trace_file "$package" "$activity" "$output_dest" || return $?
+ # Remove the TraceFile.pb from the device.
+ iorapd_compiler_purge_trace_file "$package" "$activity" || return $?
+
+ # TODO: better transactional support for restoring iorapd global properties
+ iorapd_perfetto_disable || return $?
+}
+
+collector_main "$@"
+
+verbose_print "Collector finished. Children: "
+if [[ $verbose == y ]]; then
+ jobs -p
+ ps f -g$$
+fi
+
+exit $?
+
+
+verbose_print "About to begin systrace"
+coproc systrace_fd {
+ # Disable stdout buffering since we need to know the output of systrace RIGHT AWAY.
+ stdbuf -oL "$ANDROID_BUILD_TOP"/external/chromium-trace/systrace.py --target=android -b "$trace_buffer_size" -t "$wait_time" am pagecache dalvik -o "$trace_tmp_file"
+}
+
+verbose_print "Systrace began"
+
+systrace_pid="$!"
+
+while read -r -u "${systrace_fd[0]}" systrace_output; do
+ verbose_print "$systrace_output"
+ if [[ "$systrace_output" == *"Starting tracing"* ]]; then
+ verbose_print "WE DID SEE STARTING TRACING."
+ break
+ fi
+done
+# Systrace has begun recording the tracing.
+# Run the application and collect the results.
+
+am_output="$(adb shell am start -S -W "$package"/"$activity")"
+if [[ $? -ne 0 ]]; then
+ echo "am start failed" >&2
+
+ exit 1
+fi
+
+verbose_print "$am_output"
+total_time="$(echo "$am_output" | grep 'TotalTime:' | sed 's/TotalTime: //g')"
+verbose_print "total time: $total_time"
+
+# Now wait for systrace to finish.
+
+wait "$systrace_pid" || { echo "Systrace finished before am start was finished, try a longer --wait_time"; exit 1; }
+verbose_print "Systrace has now finished"
+verbose_print "$(ls -la "$trace_tmp_file")"
+
+
+iorapd_perfetto_disable
+
+# Now that systrace has finished, convert the trace file html file to a protobuf.
+
+"$ANDROID_BUILD_TOP"/system/iorap/src/py/collector/trace_parser.py -i "$inodes" -t "$trace_tmp_file" -o "$output_dest" || exit 1
+
+echo "Trace file collection complete, trace file saved to \"$output_dest\"!" >&2
+
+finish
diff --git a/startop/scripts/iorap/common b/startop/scripts/iorap/common
new file mode 100755
index 000000000000..c10327ef09c1
--- /dev/null
+++ b/startop/scripts/iorap/common
@@ -0,0 +1,228 @@
+#!/bin/bash
+#
+# Copyright 2019, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR_IORAP_COMMON="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+APP_STARTUP_DIR="$DIR/../app_startup/"
+source "$APP_STARTUP_DIR/lib/common"
+
+IORAPD_DATA_PATH="/data/misc/iorapd"
+
+iorapd_start() {
+ verbose_print 'iorapd_start'
+ adb shell start iorapd
+ sleep 1
+ # TODO: block until logcat prints successfully connecting
+}
+
+iorapd_stop() {
+ verbose_print 'iorapd_stop'
+ adb shell stop iorapd
+}
+
+iorapd_reset() {
+ iorapd_stop
+ iorapd_start
+}
+
+# Enable perfetto tracing.
+# Subsequent launches of an application will record a perfetto trace protobuf.
+iorapd_perfetto_enable() {
+ verbose_print 'enable perfetto'
+ adb shell setprop iorapd.perfetto.enable true
+ iorapd_reset # iorapd only reads this flag when initializing
+}
+
+# Enable perfetto tracing.
+# Subsequent launches of applications will no longer record perfetto trace protobufs.
+iorapd_perfetto_disable() {
+ verbose_print 'disable perfetto'
+ adb shell setprop iorapd.perfetto.enable false
+ iorapd_reset # iorapd only reads this flag when initializing
+}
+
+_iorapd_path_to_data_file() {
+ local package="$1"
+ local activity="$2"
+ local suffix="$3"
+
+ # Match logic of 'AppComponentName' in iorap::compiler C++ code.
+ echo "${IORAPD_DATA_PATH}/${package}%2F${activity}.${suffix}"
+}
+
+iorapd_perfetto_wait_for_app_trace() {
+ local package="$1"
+ local activity="$2"
+ local timeout="$3"
+ local timestamp="$4"
+
+ local remote_path="$(_iorapd_path_to_data_file "$package" "$activity" "perfetto_trace.pb")"
+
+ verbose_print "iorapd_perfetto_wait_for_app_trace on file '$remote_path'"
+
+ # see event_manager.cc
+ local pattern="Perfetto TraceBuffer saved to file: $remote_path"
+ logcat_wait_for_pattern "$timeout" "$timestamp" "$pattern"
+}
+
+# Purge all perfetto traces for a given application.
+iorapd_perfetto_purge_app_trace() {
+ local package="$1"
+ local activity="$2"
+
+ local remote_path="$(_iorapd_path_to_data_file "$package" "$activity" "perfetto_trace.pb")"
+
+ verbose_print 'iorapd-perfetto: purge app trace in ' "$remote_path"
+ adb shell "[[ -f '$remote_path' ]] && rm -f '$remote_path' || exit 0"
+}
+
+# Pull the remote perfetto trace file into a local file.
+iorapd_perfetto_pull_trace_file() {
+ local package="$1"
+ local activity="$2"
+ local output_file="$3" # local path
+
+ local compiled_path="$(_iorapd_path_to_data_file "$package" "$activity" "perfetto_trace.pb")"
+
+ if ! adb shell "[[ -f '$compiled_path' ]]"; then
+ echo "Error: Remote path '$compiled_path' invalid" >&2
+ return 1
+ fi
+ if ! mkdir -p "$(dirname "$output_file")"; then
+ echo "Error: Fail to make output directory for '$output_file'" >&2
+ return 1
+ fi
+ verbose_print adb pull "$compiled_path" "$output_file"
+ adb pull "$compiled_path" "$output_file"
+}
+
+# Compile a perfetto trace for a given application.
+# This requires the app has run at least once with perfetto tracing enabled.
+iorapd_compiler_for_app_trace() {
+ local package="$1"
+ local activity="$2"
+ local inodes="$3" # local path
+
+ # remote path calculations
+ local input_path="$(_iorapd_path_to_data_file "$package" "$activity" "perfetto_trace.pb")"
+ local compiled_path="$(_iorapd_path_to_data_file "$package" "$activity" "compiled_trace.tmp.pb")"
+ local compiled_path_final="$(_iorapd_path_to_data_file "$package" "$activity" "compiled_trace.pb")"
+
+ if ! adb shell "[[ -f '$input_path' ]]"; then
+ echo "Error: Missing perfetto traces; nothing to compile. Expected: '$input_path'" >&2
+ return 1
+ fi
+
+ if ! [[ -f $inodes ]]; then
+ # We could compile using 'diskscan' but it's non-deterministic, so refuse instead.
+ echo "Error: Missing inodes textcache at '$inodes'; refusing to compile." >&2
+ return 1
+ fi
+
+ # inodes file needs to be on the device for iorap.cmd.compiler to access it
+ local remote_inodes=/data/local/tmp/prefetch/inodes.txt
+ adb shell "mkdir -p \"$(dirname "$remote_inodes")\"" || return 1
+ verbose_print adb push "$inodes" "$remote_inodes"
+ adb push "$inodes" "$remote_inodes"
+
+ verbose_print 'iorapd-compiler: compile app trace in ' "$input_path"
+ verbose_print adb shell "iorap.cmd.compiler '$input_path' --inode-textcache '$remote_inodes' --output-proto '$compiled_path'"
+ adb shell "iorap.cmd.compiler '$input_path' --inode-textcache '$remote_inodes' --output-proto '$compiled_path'"
+ retcode=$?
+
+ # Don't overwrite the true 'compiled_trace.pb' unless the compiler completed without error.
+ # TODO: The native compiler code should be handling its own transaction-safety.
+ if [[ $retcode -eq 0 ]]; then
+ adb shell "mv '$compiled_path' '$compiled_path_final'"
+ else
+ adb shell "[[ -f '$compiled_path' ]] && rm -f '$compiled_path'"
+ fi
+
+ # Clean up inodes file we just pushed.
+# adb shell "[[ -f '$remote_inodes' ]] && rm -f '$remote_inodes'"
+
+ return $retcode
+}
+
+# Pull the remote compiled trace file into a local file.
+iorapd_compiler_pull_trace_file() {
+ local package="$1"
+ local activity="$2"
+ local output_file="$3" # local path
+
+ local compiled_path="$(_iorapd_path_to_data_file "$package" "$activity" "compiled_trace.pb")"
+
+ if ! adb shell "[[ -f '$compiled_path' ]]"; then
+ echo "Error: Remote path '$compiled_path' invalid" >&2
+ return 1
+ fi
+ if ! mkdir -p "$(dirname "$output_file")"; then
+ echo "Error: Fail to make output directory for '$output_file'" >&2
+ return 1
+ fi
+ verbose_print adb pull "$compiled_path" "$output_file"
+ adb pull "$compiled_path" "$output_file"
+}
+
+# Install a compiled trace file.
+iorapd_compiler_install_trace_file() {
+ local package="$1"
+ local activity="$2"
+ local input_file="$3" # local path
+
+ # remote path calculations
+ local compiled_path="$(_iorapd_path_to_data_file "$package" "$activity" "compiled_trace.pb")"
+
+ if ! [[ -f $input_file ]]; then
+ echo "Error: File '$input_file' does not exist." >&2
+ return 1
+ fi
+
+ adb shell "mkdir -p \"$(dirname "$compiled_path")\"" || return 1
+
+ verbose_print adb push "$input_file" "$compiled_path"
+ adb push "$input_file" "$compiled_path"
+}
+
+iorapd_compiler_purge_trace_file() {
+ local package="$1"
+ local activity="$2"
+ local input_file="$3" # local path
+
+ local remote_path="$(_iorapd_path_to_data_file "$package" "$activity" "compiled_trace.pb")"
+
+ adb shell "[[ -f '$remote_path' ]] && rm -f '$remote_path' || exit 0"
+}
+
+# Blocks until the readahead for the requested package/activity has finished.
+# This assumes that the trace file was already installed, and also that
+# the application launched but not completed yet.
+iorapd_readahead_wait_until_finished() {
+ local package="$1"
+ local activity="$2"
+ local timestamp="$3"
+ local timeout="$4"
+
+ if [[ $# -lt 4 ]]; then
+ echo "FATAL: Expected 4 arguments (actual $# $@)" >&2
+ exit 1
+ fi
+
+ local remote_path="$(_iorapd_path_to_data_file "$package" "$activity" "compiled_trace.pb")"
+
+ # See 'read_ahead.cc' LOG(INFO).
+ local pattern="ReadAhead completed ($remote_path)"
+ logcat_wait_for_pattern "$timeout" "$timestamp" "$pattern"
+}
diff --git a/startop/scripts/iorap/compile_handcrafted_file.py b/startop/scripts/iorap/compile_handcrafted_file.py
new file mode 100755
index 000000000000..6dbbeaf91571
--- /dev/null
+++ b/startop/scripts/iorap/compile_handcrafted_file.py
@@ -0,0 +1,297 @@
+#!/usr/bin/env python3
+#
+# Copyright 2019, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import asyncio
+import csv
+import itertools
+import os
+import re
+import struct
+import sys
+import tempfile
+import time
+import zipfile
+from typing import Any, Callable, Dict, Generic, Iterable, List, NamedTuple, TextIO, Tuple, TypeVar, Optional, Union
+
+# Include generated protos.
+dir_name = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(dir_name + "/generated")
+
+from TraceFile_pb2 import *
+
+
+def parse_options(argv: List[str] = None):
+ """Parse command line arguments and return an argparse Namespace object."""
+ parser = argparse.ArgumentParser(description="Compile a TraceFile.proto from a manual text file.")
+ # argparse considers args starting with - and -- optional in --help, even though required=True.
+ # by using a named argument group --help will clearly say that it's required instead of optional.
+ required_named = parser.add_argument_group('required named arguments')
+
+ # optional arguments
+ # use a group here to get the required arguments to appear 'above' the optional arguments in help.
+ optional_named = parser.add_argument_group('optional named arguments')
+ optional_named.add_argument('-opb', '--output-proto-binary', dest='output_proto_binary', action='store', help='Write binary proto output to file.')
+ optional_named.add_argument('-pm', '--pinlist-meta', dest='pinlist_meta', action='store', help='Path to pinlist.meta (default=none) binary file.')
+ optional_named.add_argument('-pmp', '--pinlist-meta-parent', dest='pinlist_meta_parent', action='store', help='Device path that the pinlist.meta applies to (e.g. /data/.../somefile.apk)')
+ optional_named.add_argument('-i', '--input', dest='input', action='store', help='Input text file (default stdin).')
+ optional_named.add_argument('-zp', '--zip_path', dest='zip_path', action='append', help='Directory containing zip files.')
+ optional_named.add_argument('-d', '--debug', dest='debug', action='store_true', help='Add extra debugging output')
+ optional_named.add_argument('-ot', '--output-text', dest='output_text', action='store', help='Output text file (default stdout).')
+
+ return parser.parse_args(argv)
+
+# TODO: refactor this with a common library file with analyze_metrics.py
+def _debug_print(*args, **kwargs):
+ """Print the args to sys.stderr if the --debug/-d flag was passed in."""
+ if _debug:
+ print(*args, **kwargs, file=sys.stderr)
+
+class BadInputError(Exception):
+ pass
+
+InputRecord = NamedTuple('InputRecord', [('filepath', str), ('offset', int), ('length', int), ('remark', str)])
+
+def find_zip_in_paths(original_name, zip_paths):
+ # /foo/bar/bax.zip -> bax.zip
+ file_basename = os.path.split(original_name)[1]
+
+ # the file must be located in one of the --zip-path arguments
+ matched = None
+ for zip_path in zip_paths:
+ for dir_entry in os.listdir(zip_path):
+ if dir_entry == file_basename:
+ matched = os.path.join(zip_path, dir_entry)
+ break
+ if matched:
+ break
+
+ if not matched:
+ raise ValueError("%s could not be found in any of the --zip_path specified." %(file_basename))
+
+ _debug_print("found zip file ", file_basename, " in ", matched)
+
+ if not zipfile.is_zipfile(matched):
+ raise ValueError("%s is not a zip file" %(matched))
+
+ return matched
+
+def handle_zip_entry(input_record, zip_paths):
+
+ res = re.match("([^!]+)[!](.*)", input_record.filepath)
+
+ if not res:
+ return input_record
+
+ # 'foo!bar'
+ in_filepath = res[1] # -> 'foo'
+ in_zip_entry = res[2] # -> 'bar'
+
+ matched = find_zip_in_paths(in_filepath, zip_paths)
+
+ zip = zipfile.ZipFile(matched)
+
+ try:
+ zip_info = zip.getinfo(in_zip_entry)
+ except KeyError:
+ raise ValueError("%s is not an item in the zip file %s" %(in_zip_entry, matched))
+
+ # TODO: do we also need to add header size to this?
+ in_offset = zip_info.header_offset
+
+ # TODO: if a range is specified, use that instead.
+ in_length = zip_info.compress_size
+
+ return InputRecord(in_filepath, in_offset, in_length, 'zip entry (%s)' %(in_zip_entry))
+
+def parse_input_file(input: Iterable[str], zip_paths: List[str]) -> Iterable[InputRecord]:
+ for line in input:
+ line = line.strip()
+
+ _debug_print("Line = ", line)
+ if not line:
+ _debug_print(" skip empty line", line)
+ continue
+ elif line[0] == "#":
+ _debug_print(" skip commented line", line)
+ continue
+
+ res = re.match("([^\s]+)\s+(\d+)\s+(\d+)", line)
+ if not res:
+ raise BadInputError("Expected input of form: <str:filepath> <int:offset> <int:length>")
+
+ in_filepath = res[1]
+ in_offset = int(res[2])
+ in_length = int(res[3])
+
+ yield handle_zip_entry(InputRecord(in_filepath, in_offset, in_length, 'regular file'), zip_paths)
+
+# format:
+# (<big_endian(i32):file_offset> <big_endian(i32):range_length>)+
+PIN_META_FORMAT = ">ii"
+PIN_META_READ_SIZE = struct.calcsize(PIN_META_FORMAT)
+
+def parse_pin_meta(pin_meta_file, pinlist_meta_parent, zip_paths):
+ if not pin_meta_file:
+ return ()
+
+ global PIN_META_FORMAT
+ global PIN_META_READ_SIZE
+
+ # '/data/app/com.google.android.GoogleCamera-aNQhzSznf4h_bvJ_MRbweQ==/base.apk'
+ # -> 'com.google.android.GoogleCamera'
+ package_name_match = re.match('/.*/(.*)-.*=/base.apk', pinlist_meta_parent)
+
+ if not package_name_match:
+ raise ValueError("%s did not contain the <packagename>.apk" %(pinlist_meta_parent))
+
+ package_name = package_name_match[1]
+ # "com.google.android.GoogleCamera" -> "GoogleCamera.apk"
+ apk_name = package_name.split(".")[-1] + ".apk"
+
+ path_to_zip_on_host = find_zip_in_paths(apk_name, zip_paths)
+ apk_file_size = os.path.getsize(path_to_zip_on_host)
+ _debug_print("APK path '%s' file size '%d'" %(path_to_zip_on_host, apk_file_size))
+
+ while True:
+ data = pin_meta_file.read(PIN_META_READ_SIZE)
+
+ if not data:
+ break
+
+ (pin_offset, pin_length) = struct.unpack(PIN_META_FORMAT, data) # (offset, length)
+
+ remark = 'regular file (pinlist.meta)'
+
+ remaining_size = apk_file_size - pin_offset
+ if remaining_size < 0:
+ print("WARNING: Clamp entry (%d, %d), offset too large (max file size = %d)" %(pin_offset, pin_length, apk_file_size))
+
+ pin_length = pin_length + remaining_size
+ pin_offset = pin_offset + remaining_size
+
+ if pin_offset < 0:
+ pin_offset = 0
+
+ remark += '[clamped.offset]'
+
+ pin_last_offset = pin_offset + pin_length
+ remaining_size = apk_file_size - pin_last_offset
+
+ if remaining_size < 0:
+ print("WARNING: Clamp entry (%d, %d), length too large (max file size = %d)" %(pin_offset, pin_length, apk_file_size))
+ pin_length = pin_length + remaining_size
+
+ remark += '[clamped.length]'
+
+ yield InputRecord(pinlist_meta_parent, pin_offset, pin_length, remark)
+
+def write_text_file_output(input_records: Iterable[InputRecord], output_text_file):
+ for rec in input_records:
+ output_text_file.write("%s %d %d #%s\n" %(rec.filepath, rec.offset, rec.length, rec.remark))
+
+def build_trace_file(input_records: Iterable[InputRecord]) -> TraceFile:
+ trace_file = TraceFile()
+ trace_file_index = trace_file.index
+
+ file_id_counter = 0
+ file_id_map = {} # filename -> id
+
+ stats_length_total = 0
+ filename_stats = {} # filename -> total size
+
+ for rec in input_records:
+ filename = rec.filepath
+
+ file_id = file_id_map.get(filename)
+ if not file_id:
+ file_id = file_id_counter
+ file_id_map[filename] = file_id_counter
+ file_id_counter = file_id_counter + 1
+
+ file_index_entry = trace_file_index.entries.add()
+ file_index_entry.id = file_id
+ file_index_entry.file_name = filename
+
+ # already in the file index, add the file entry.
+ file_entry = trace_file.list.entries.add()
+ file_entry.index_id = file_id
+ file_entry.file_length = rec.length
+ stats_length_total += file_entry.file_length
+ file_entry.file_offset = rec.offset
+
+ filename_stats[filename] = filename_stats.get(filename, 0) + file_entry.file_length
+
+ return trace_file
+
+def main():
+ global _debug
+
+ options= parse_options()
+ _debug = options.debug
+ _debug_print("parsed options: ", options)
+
+ if not options.input:
+ input_file = sys.stdin
+ _debug_print("input = stdin")
+ else:
+ input_file = open(options.input)
+ _debug_print("input = (file)", options.input)
+
+ if not options.output_proto_binary:
+ output_proto_file = None
+ else:
+ output_proto_file = open(options.output_proto_binary, 'wb')
+ _debug_print("output_proto_binary = ", output_proto_file)
+
+ pinlist_meta_parent = options.pinlist_meta_parent
+ if options.pinlist_meta:
+ pin_meta_file = open(options.pinlist_meta, 'rb')
+ else:
+ pin_meta_file = None
+
+ if (pinlist_meta_parent == None) != (pin_meta_file == None):
+ print("Options must be used together: --pinlist-meta and --pinlist-meta-path")
+ return 1
+
+ if not options.output_text:
+ output_text_file = sys.stdout
+ _debug_print("output = stdout")
+ else:
+ output_text_file = open(options.output_text, 'w')
+ _debug_print("output = (file)", options.output_text)
+
+ zip_paths = options.zip_path or []
+
+ input_records = list(parse_pin_meta(pin_meta_file, pinlist_meta_parent, zip_paths))
+ input_records = input_records + list(parse_input_file(input_file, zip_paths))
+
+ for p in input_records:
+ _debug_print(p)
+
+ write_text_file_output(input_records, output_text_file)
+ output_text_file.close()
+
+ out_proto = build_trace_file(input_records)
+
+ if output_proto_file:
+ output_proto_file.write(out_proto.SerializeToString())
+ output_proto_file.close()
+
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/startop/scripts/iorap/dump_compiled_pb b/startop/scripts/iorap/dump_compiled_pb
new file mode 100755
index 000000000000..ad26a7d72c53
--- /dev/null
+++ b/startop/scripts/iorap/dump_compiled_pb
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# Copyright 2019, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+#
+# Dumps an iorap compiler protobuf from iorap.cmd.compiler into text
+# with gqui.
+#
+
+if [[ "$#" -lt 1 ]]; then
+ echo "Usage: $0 <compiler_trace_file.pb> [...args]" >&2
+ exit 1
+fi
+
+path_to_proto="$DIR/../../../../../system/iorap/src/serialize/TraceFile.proto"
+
+filename="$1"
+shift
+if ! [[ -f $filename ]]; then
+ echo "Error: $filename does not exist." >&2
+ exit 1
+fi
+
+gqui "rawproto:$filename" proto "$path_to_proto":iorap.serialize.proto.TraceFile "$@"
diff --git a/startop/scripts/iorap/dump_trace_pb b/startop/scripts/iorap/dump_trace_pb
new file mode 100755
index 000000000000..bcec4a524994
--- /dev/null
+++ b/startop/scripts/iorap/dump_trace_pb
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# Copyright 2019, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+#
+# Dumps a perfetto protobuf collected by iorapd (from perfetto) into text
+# with gqui.
+#
+
+if [[ "$#" -lt 1 ]]; then
+ echo "Usage: $0 <perfetto_trace.pb> [...args]" >&2
+ exit 1
+fi
+
+path_to_perfetto_proto="$DIR/../../../../../external/perfetto/protos/perfetto/trace/perfetto_trace.proto"
+
+filename="$1"
+shift
+if ! [[ -f $filename ]]; then
+ echo "Error: $filename does not exist." >&2
+ exit 1
+fi
+
+gqui "rawproto:$filename" proto "$path_to_perfetto_proto":perfetto.protos.Trace "$@"
diff --git a/startop/scripts/iorap/pull_textcache b/startop/scripts/iorap/pull_textcache
new file mode 100755
index 000000000000..05544263957d
--- /dev/null
+++ b/startop/scripts/iorap/pull_textcache
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright 2019, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [[ $# -lt 1 ]]; then
+ echo "Usage: $0 <output-filename>" >&2
+ exit 1
+fi
+
+# see compiler/main.cc for list of roots
+adb shell iorap.inode2filename --output-format=textcache --output=/data/local/tmp/dumpcache --all --root=/system --root=/apex --root=/vendor --root=/data --root=/product --root=/metadata
+adb pull /data/local/tmp/dumpcache "$1"
diff --git a/startop/scripts/trace_analyzer/queries_all.sql b/startop/scripts/trace_analyzer/queries_all.sql
new file mode 100644
index 000000000000..41d1c0804a7f
--- /dev/null
+++ b/startop/scripts/trace_analyzer/queries_all.sql
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- filter for atrace writes
+CREATE VIEW IF NOT EXISTS tracing_mark_writes AS
+ SELECT *
+ FROM raw_ftrace_entries
+ WHERE function = 'tracing_mark_write';
+
+-- split the tracing_mark_write function args by ||s
+DROP TABLE IF exists tracing_mark_write_split_array;
+
+CREATE TABLE tracing_mark_write_split_array (
+ predictorset_id INT REFERENCES raw_ftrace_entries (id),
+ predictor_name,
+ rest,
+ gen,
+
+ UNIQUE(predictorset_id, gen) -- drops redundant inserts into table
+);
+
+CREATE INDEX "tracing_mark_write_split_array_id" ON tracing_mark_write_split_array (
+ predictorset_id COLLATE BINARY COLLATE BINARY
+);
+
+INSERT INTO tracing_mark_write_split_array
+ WITH
+ split(predictorset_id, predictor_name, rest, gen) AS (
+ -- split by |
+ SELECT id, '', function_args || '|', 0 FROM tracing_mark_writes WHERE id
+ UNION ALL
+ SELECT predictorset_id,
+ substr(rest, 0, instr(rest, '|')),
+ substr(rest, instr(rest, '|')+1),
+ gen + 1
+ FROM split
+ WHERE rest <> ''),
+ split_results AS (
+ SELECT * FROM split WHERE predictor_name <> ''
+ )
+ SELECT * from split_results
+;
+
+
diff --git a/startop/scripts/trace_analyzer/queries_app_launch_spans_with_name.sql b/startop/scripts/trace_analyzer/queries_app_launch_spans_with_name.sql
new file mode 100644
index 000000000000..c28475eec73d
--- /dev/null
+++ b/startop/scripts/trace_analyzer/queries_app_launch_spans_with_name.sql
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- use the 'launching: $process_name' async slice to figure out launch duration.
+DROP VIEW IF EXISTS launch_durations_named;
+
+CREATE VIEW launch_durations_named AS
+WITH
+ launch_traces_raw AS (
+ SELECT *
+ FROM tracing_mark_write_split AS tmw,
+ raw_ftrace_entries AS rfe
+ WHERE atrace_message LIKE 'launching: %' AND rfe.id = tmw.raw_ftrace_entry_id
+ ),
+ launch_traces_joined AS (
+ SELECT started.timestamp AS started_timestamp,
+ finished.timestamp AS finished_timestamp,
+ started.id AS started_id,
+ finished.id AS finished_id,
+ SUBSTR(started.atrace_message, 12) AS proc_name -- crop out "launching: " from the string.
+ FROM launch_traces_raw AS started,
+ launch_traces_raw AS finished
+ -- async slices ('S' -> 'F') have matching counters given the same PID.
+ WHERE started.atrace_type == 'S'
+ AND finished.atrace_type == 'F'
+ AND started.atrace_count == finished.atrace_count
+ AND started.atrace_pid == finished.atrace_pid
+ )
+SELECT * from launch_traces_joined;
+
+SELECT * FROM launch_durations_named;
diff --git a/startop/scripts/trace_analyzer/queries_block_launch.sql b/startop/scripts/trace_analyzer/queries_block_launch.sql
new file mode 100644
index 000000000000..34e5f03d0c48
--- /dev/null
+++ b/startop/scripts/trace_analyzer/queries_block_launch.sql
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+DROP VIEW IF EXISTS blocked_iowait_for_app_launches;
+
+CREATE VIEW blocked_iowait_for_app_launches AS
+WITH
+ block_launch_join AS (
+ SELECT *
+ FROM blocking_durations AS bd,
+ launch_durations_named AS ld
+ WHERE bd.block_timestamp >= ld.started_timestamp
+ AND bd.unblock_timestamp <= ld.finished_timestamp
+ ),
+ blocked_ui_threads AS (
+ SELECT *
+ FROM start_process_ui_threads AS sp,
+ block_launch_join AS blj
+ WHERE sp.atm_ui_thread_tid == unblock_pid
+ AND sp.process_name = blj.proc_name
+ ),
+ summed_raw AS (
+ SELECT SUM(unblock_timestamp-block_timestamp)*1000 AS sum_block_duration_ms,
+ *
+ FROM blocked_ui_threads
+ GROUP BY unblock_pid
+ ),
+ summed_neat AS (
+ SELECT sum_block_duration_ms AS blocked_iowait_duration_ms,
+ process_name,
+ (finished_timestamp - started_timestamp) * 1000 AS launching_duration_ms,
+ started_timestamp * 1000 AS launching_started_timestamp_ms,
+ finished_timestamp * 1000 AS launching_finished_timestamp_ms
+ -- filter out the rest because its just selecting 1 arbitrary row (due to the SUM aggregate).,
+ FROM summed_raw
+ )
+SELECT * FROM summed_neat;
+
+SELECT * FROM blocked_iowait_for_app_launches;
diff --git a/startop/scripts/trace_analyzer/queries_find_sched_switch_unblocked.sql b/startop/scripts/trace_analyzer/queries_find_sched_switch_unblocked.sql
new file mode 100644
index 000000000000..788d0dae47d3
--- /dev/null
+++ b/startop/scripts/trace_analyzer/queries_find_sched_switch_unblocked.sql
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+DROP VIEW IF EXISTS sched_switch_iowaits_pre;
+
+-- scan for the closest pair such that:
+-- sched_block_reason pid=$PID iowait=1 ...
+-- ...
+-- sched_switch next_pid=$PID
+CREATE VIEW sched_switch_iowaits_pre AS
+ SELECT MAX(sbr.id) AS blocked_id,
+ ss.id AS sched_switch_id,
+ pid, -- iow.pid
+ iowait, -- iowait=0 or iowait=1
+ caller,
+ sbr_f.timestamp AS blocked_timestamp,
+ ss_f.timestamp AS sched_switch_timestamp,
+ next_comm, -- name of next_pid
+ next_pid -- same as iow.pid
+ FROM sched_blocked_reasons AS sbr,
+ raw_ftrace_entries AS sbr_f,
+ sched_switches AS ss,
+ raw_ftrace_entries AS ss_f
+ WHERE sbr_f.id == sbr.id
+ AND ss_f.id == ss.id
+ AND sbr.pid == ss.next_pid
+ AND sbr.iowait = 1
+ AND sbr_f.timestamp < ss_f.timestamp -- ensures the 'closest' sched_blocked_reason is selected.
+ GROUP BY ss.id
+;
+
+DROP VIEW IF EXISTS sched_switch_iowaits;
+
+CREATE VIEW sched_switch_iowaits AS
+ SELECT *, MIN(sched_switch_timestamp) AS ss_timestamp -- drop all of the 'too large' sched_switch entries except the closest one.
+ FROM sched_switch_iowaits_pre
+ GROUP BY blocked_id;
+
+SELECT * FROM sched_switch_iowaits;
+
+-- use a real table here instead of a view, otherwise SQLiteStudio segfaults for some reason.
+DROP TABLE IF EXISTS blocking_durations;
+
+CREATE TABLE blocking_durations AS
+WITH
+ blocking_durations_raw AS (
+ SELECT MAX(ss.id) AS block_id,
+ ssf.timestamp AS block_timestamp,
+ iow.sched_switch_timestamp AS unblock_timestamp,
+ ss.prev_comm as block_prev_comm,
+ iow.next_comm AS unblock_next_comm,
+ ss.prev_state AS block_prev_state,
+ iow.sched_switch_id AS unblock_id,
+ iow.pid AS unblock_pid,
+ iow.caller AS unblock_caller
+ FROM sched_switches AS ss, -- this is the sched_switch that caused a block (in the future when it unblocks, the reason is iowait=1).
+ sched_switch_iowaits AS iow, -- this is the sched_switch that removes the block (it is now running again).
+ raw_ftrace_entries AS ssf
+ WHERE ssf.id = ss.id AND ss.prev_pid == iow.next_pid AND ssf.timestamp < iow.sched_switch_timestamp
+ GROUP BY unblock_timestamp
+ ),
+ blocking_durations_tmp AS (
+ SELECT block_id,
+ unblock_timestamp,
+ block_timestamp,
+ block_prev_comm as comm,
+ block_prev_state as block_state,
+ unblock_id,
+ unblock_pid,
+ unblock_caller
+ FROM blocking_durations_raw
+ )
+ SELECT * FROM blocking_durations_tmp;-- ORDER BY block_id ASC;
+ --SELECT SUM(block_duration_ms) AS sum, * FROM blocking_durations GROUP BY unblock_pid ORDER BY sum DESC;
+
+DROP INDEX IF EXISTS "blocking_durations_block_timestamp";
+
+CREATE INDEX "blocking_durations_block_timestamp" ON blocking_durations (
+ block_timestamp COLLATE BINARY COLLATE BINARY
+);
+
+DROP INDEX IF EXISTS "blocking_durations_unblock_timestamp";
+
+CREATE INDEX "blocking_durations_unblock_timestamp" ON blocking_durations (
+ unblock_timestamp COLLATE BINARY COLLATE BINARY
+);
+
+SELECT * FROM blocking_durations;
diff --git a/startop/scripts/trace_analyzer/queries_get_comm_and_pids.sql b/startop/scripts/trace_analyzer/queries_get_comm_and_pids.sql
new file mode 100644
index 000000000000..0c166b09784c
--- /dev/null
+++ b/startop/scripts/trace_analyzer/queries_get_comm_and_pids.sql
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+DROP VIEW IF EXISTS sched_switch_next_comm_pids;
+
+CREATE VIEW IF NOT EXISTS sched_switch_next_comm_pids AS
+
+-- TODO: switch to using sched_switches table.
+
+WITH
+ sched_switchs AS (
+ SELECT * FROM raw_ftrace_entries WHERE function = 'sched_switch' AND function_args LIKE '% next_pid=%' AND function_args NOT LIKE '% next_comm=main %'
+ ),
+ comm_and_pids_raws AS (
+ SELECT id,
+ SUBSTR(function_args, instr(function_args, "next_comm="), instr(function_args, "next_pid=") - instr(function_args, "next_comm=")) AS next_comm_raw,
+ SUBSTR(function_args, instr(function_args, "next_pid="), instr(function_args, "next_prio=") - instr(function_args, "next_pid=")) AS next_pid_raw
+ FROM sched_switchs
+ ),
+ comm_and_pids AS (
+ SELECT id,
+ id AS raw_ftrace_entry_id,
+ TRIM(SUBSTR(next_pid_raw, 10)) AS next_pid, -- len("next_pid=") is 10
+ TRIM(SUBSTR(next_comm_raw, 11)) AS next_comm -- len("next_comm=") is 11
+ FROM comm_and_pids_raws
+ )
+SELECT * from comm_and_pids;
+
+SELECT * from sched_switch_next_comm_pids;
diff --git a/startop/scripts/trace_analyzer/queries_get_procs.sql b/startop/scripts/trace_analyzer/queries_get_procs.sql
new file mode 100644
index 000000000000..06871c6e16b6
--- /dev/null
+++ b/startop/scripts/trace_analyzer/queries_get_procs.sql
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+DROP VIEW IF EXISTS start_procs;
+
+CREATE VIEW IF NOT EXISTS start_procs AS
+WITH
+ start_procs_raw AS (
+ SELECT * from tracing_mark_write_split WHERE atrace_message LIKE 'Start proc: %'
+ ),
+ start_procs_substr AS (
+ -- note: "12" is len("Start proc: ")+1. sqlite indices start at 1.
+ SELECT raw_ftrace_entry_id, atrace_pid, SUBSTR(atrace_message, 13) AS process_name FROM start_procs_raw
+ )
+SELECT * from start_procs_substr;
+
+SELECT * from start_procs;
diff --git a/startop/scripts/trace_analyzer/queries_get_ui_threads.sql b/startop/scripts/trace_analyzer/queries_get_ui_threads.sql
new file mode 100644
index 000000000000..876e50e9880a
--- /dev/null
+++ b/startop/scripts/trace_analyzer/queries_get_ui_threads.sql
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- note: These queries do comparisons based on raw_ftrace_entries.id by treating it as if it was equivalent to the temporal timestamp.
+-- in practice, the ID of raw_ftrace_entries is based on its order in the ftrace buffer [and on the same cpu its equivalent].
+-- we can always resort raw_ftrace_entries to ensure id order matches timestamp order. We should rarely need to compare by timestamp directly.
+-- accessing 'floats' is inferior as they are harder to index, and will result in slower queries.
+--
+-- Naming convention note: '_fid' corresponds to 'raw_ftrace_entry.id'.
+DROP VIEW IF EXISTS start_process_ui_threads;
+
+-- Map of started process names to their UI thread's TID (as returned by gettid).
+CREATE VIEW IF NOT EXISTS start_process_ui_threads AS
+WITH
+ start_proc_tids AS (
+ SELECT sp.raw_ftrace_entry_id AS start_proc_fid,
+ sp.atrace_pid AS atrace_pid,
+ sp.process_name AS process_name,
+ --MIN(nc.raw_ftrace_entry_id) as next_comm_fid,
+ nc.raw_ftrace_entry_id AS next_comm_fid,
+ nc.next_pid as next_pid,
+ nc.next_comm as next_comm,
+ SUBSTR(sp.process_name, -15) AS cut -- why -15? See TASK_MAX in kernel, the sched_switch name is truncated to 16 bytes.
+ FROM start_procs AS sp,
+ sched_switch_next_comm_pids AS nc
+ WHERE sp.process_name LIKE '%' || nc.next_comm -- kernel truncates the sched_switch::next_comm event, so we must match the prefix of the full name.
+ --WHERE SUBSTR(sp.process_name, -16) == nc.next_comm
+ --WHERE cut == nc.next_comm
+ ),
+ start_proc_tids_filtered AS (
+ SELECT *
+ FROM start_proc_tids
+ WHERE next_comm_fid > start_proc_fid -- safeguard that avoids choosing "earlier" sched_switch before process was even started.
+ --ORDER BY start_proc_fid, next_comm_fid
+ ),
+ start_proc_all_threads AS (
+ SELECT DISTINCT
+ start_proc_fid, -- this is the ftrace entry of the system server 'Start proc: $process_name'. only need this to join for timestamp.
+ process_name, -- this is the '$process_name' from the system server entry.
+ -- next up we have all the possible thread IDs as parsed from sched_switch that corresponds most closest to the start proc.
+ next_pid AS ui_thread_tpid, -- sched_switch.next_pid. This can be any of the threads in that process, it's not necessarily the main UI thread yet.
+ next_comm,
+ MIN(next_comm_fid) AS next_comm_fid -- don't pick the 'later' next_comm_fid because it could correspond to another app start.
+ FROM start_proc_tids_filtered
+ GROUP BY start_proc_fid, ui_thread_tpid
+ ),
+ activity_thread_mains AS (
+ SELECT * FROM tracing_mark_write_split WHERE atrace_message = 'ActivityThreadMain'
+ ),
+ start_proc_ui_threads AS (
+ SELECT start_proc_fid,
+ process_name,
+ ui_thread_tpid,
+ next_comm,
+ next_comm_fid,
+ atm.raw_ftrace_entry_id as atm_fid,
+ atm.atrace_pid as atm_ui_thread_tid
+ FROM start_proc_all_threads AS spt,
+ activity_thread_mains AS atm
+ WHERE atm.atrace_pid == spt.ui_thread_tpid AND atm.raw_ftrace_entry_id > spt.start_proc_fid -- Ensure we ignore earlier ActivityThreadMains prior to their Start proc.
+ ),
+ start_proc_ui_threads_filtered AS (
+ SELECT start_proc_fid,
+ process_name, -- e.g. 'com.android.settings'
+ --ui_thread_tpid,
+ --next_comm,
+ --next_comm_fid,
+ MIN(atm_fid) AS atm_fid,
+ atm_ui_thread_tid -- equivalent to gettid() for the process's UI thread.
+ FROM start_proc_ui_threads
+ GROUP BY start_proc_fid, atm_ui_thread_tid -- find the temporally closest ActivityTaskMain to a "Start proc: $process_name"
+ )
+SELECT * FROM start_proc_ui_threads_filtered;
+
+SELECT * FROM start_process_ui_threads;
diff --git a/startop/scripts/trace_analyzer/queries_mark_write_join.sql b/startop/scripts/trace_analyzer/queries_mark_write_join.sql
new file mode 100644
index 000000000000..100f07403423
--- /dev/null
+++ b/startop/scripts/trace_analyzer/queries_mark_write_join.sql
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+DROP TABLE IF EXISTS tracing_mark_write_split;
+
+CREATE TABLE tracing_mark_write_split (
+ raw_ftrace_entry_id INT REFERENCES raw_ftrace_entries (id),
+ atrace_type CHAR(1), -- only null for the first 2 sync timers. usually 'B', 'C', E', ...
+ atrace_pid INT, -- only null for first 2 sync timers
+ atrace_message, -- usually null for type='E' etc.
+ atrace_count, -- usually non-null only for 'C'
+
+ UNIQUE(raw_ftrace_entry_id) -- drops redundant inserts into table
+);
+
+INSERT INTO tracing_mark_write_split
+WITH
+ pivoted AS (
+ SELECT tx.predictorset_id,
+ --ty.predictorset_id,
+ --tz.predictorset_id,
+ --tzz.predictorset_id,
+ tx.predictor_name AS atrace_type,
+ CAST(ty.predictor_name AS integer) AS atrace_pid,
+ tz.predictor_name AS atrace_message,
+ CAST(tzz.predictor_name AS integer) AS atrace_count
+ FROM (SELECT * from tracing_mark_write_split_array WHERE gen = 1) AS tx
+ LEFT JOIN
+ (SELECT * FROM tracing_mark_write_split_array WHERE gen = 2) AS ty
+ ON tx.predictorset_id = ty.predictorset_id
+ LEFT JOIN
+ (SELECT * FROM tracing_mark_write_split_array WHERE gen = 3) AS tz
+ ON tx.predictorset_id = tz.predictorset_id
+ LEFT JOIN
+ (SELECT * FROM tracing_mark_write_split_array WHERE gen = 4) AS tzz
+ ON tx.predictorset_id = tzz.predictorset_id
+ )
+SELECT * from pivoted ORDER BY predictorset_id;-- LIMIT 100;
+
+SELECT * FROM tracing_mark_write_split;
diff --git a/startop/scripts/trace_analyzer/queries_pretty_print_block_launch.sql b/startop/scripts/trace_analyzer/queries_pretty_print_block_launch.sql
new file mode 100644
index 000000000000..bf5e3ccb4ac7
--- /dev/null
+++ b/startop/scripts/trace_analyzer/queries_pretty_print_block_launch.sql
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+.headers on
+.mode quote
+
+SELECT * FROM blocked_iowait_for_app_launches;
+
+/*
+Output as CSV example:
+
+'blocked_iowait_duration_ms','process_name','launching_duration_ms','launching_started_timestamp_ms','launching_finished_timestamp_ms'
+125.33199995596078224,'com.android.settings',1022.4840000009862706,17149896.822000000626,17150919.305999998003
+
+*/
diff --git a/startop/scripts/trace_analyzer/run-sql-queries b/startop/scripts/trace_analyzer/run-sql-queries
new file mode 100755
index 000000000000..61a0ad42a339
--- /dev/null
+++ b/startop/scripts/trace_analyzer/run-sql-queries
@@ -0,0 +1,79 @@
+#!/bin/bash
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$(dirname "$0")" ; pwd -P )"
+
+if [[ $# -lt 1 ]]; then
+ echo "Usage: $0 <db-file>"
+fi
+
+DB_TARGET=$1
+
+if ! [[ -f $DB_TARGET ]]; then
+ echo "ERROR: File '$DB_TARGET' does not exist." >&2
+ exit 1
+fi
+
+exec_sql_file() {
+ local filename="$1"
+ if ! [[ -f $filename ]]; then
+ echo "ERROR: Can't exec SQL file, '$filename' does not exist." >&2
+ return 1
+ fi
+
+ sqlite3 "$DB_TARGET" < "$DIR"/"$filename"
+}
+
+exec_sql_file_quiet() {
+ exec_sql_file "$@" > /dev/null
+}
+
+# Some views/tables need other views already created, so order does matter.
+# x -> y , means x depends on y.
+
+# View: tracing_mark_writes
+# Table: tracing_mark_write_split_array -> tracing_mark_writes
+exec_sql_file_quiet "queries_all.sql"
+
+# Table: tracing_mark_write_split -> tracing_mark_write_split_array
+exec_sql_file_quiet "queries_mark_write_join.sql"
+
+# View: start_procs -> tracing_mark_write_split
+exec_sql_file_quiet "queries_get_procs.sql"
+
+# View: sched_switch_next_comm_pids
+exec_sql_file_quiet "queries_get_comm_and_pids.sql"
+
+# View: start_process_ui_threads -> start_procs, sched_switch_next_comm_pids
+exec_sql_file_quiet "queries_get_ui_threads.sql"
+
+# View: launch_durations_named -> tracing_mark_write_split
+exec_sql_file_quiet "queries_app_launch_spans_with_name.sql"
+
+# View: sched_switch_iowaits_pre
+# View: sched_switch_iowaits -> sched_switch_iowaits_pre
+# Table: blocking_durations -> sched_switch_iowaits
+exec_sql_file_quiet "queries_find_sched_switch_unblocked.sql"
+
+# View: blocked_iowait_for_app_launches -> launch_durations_named, blocking_durations
+exec_sql_file_quiet "queries_block_launch.sql"
+
+#####
+#####
+#####
+
+# Final queries
+
+exec_sql_file "queries_pretty_print_block_launch.sql"
diff --git a/startop/scripts/trace_analyzer/trace_analyzer b/startop/scripts/trace_analyzer/trace_analyzer
new file mode 100755
index 000000000000..8c0396430c40
--- /dev/null
+++ b/startop/scripts/trace_analyzer/trace_analyzer
@@ -0,0 +1,42 @@
+#!/bin/bash
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$(dirname "$0")" ; pwd -P )"
+
+if [[ "$#" -lt 2 ]]; then
+ echo "Usage: $0 <filename.trace> <sqlite-filename.db>" >&2
+ exit 1
+fi
+
+TRACE_FILENAME="$1"
+SQLITE_FILENAME="$2"
+
+#echo "Trace filename: $TRACE_FILENAME"
+#echo "SQLite filename: $SQLITE_FILENAME"
+
+if ! [[ -f "$TRACE_FILENAME" ]]; then
+ echo "Error: Trace '$TRACE_FILENAME' does not exist." >&2
+ exit 1
+fi
+
+if ! "$DIR/trace_analyzer.py" "$SQLITE_FILENAME" "$TRACE_FILENAME" > /dev/null; then
+ echo "Fatal: trace_analyzer.py failed, aborting." >&2
+ exit 1
+fi
+
+if ! "$DIR/run-sql-queries" "$SQLITE_FILENAME"; then
+ echo "Fatal: Failed to run sql queries, aborting." >&2
+ exit 1
+fi
diff --git a/startop/scripts/trace_analyzer/trace_analyzer.py b/startop/scripts/trace_analyzer/trace_analyzer.py
new file mode 100755
index 000000000000..45429339bea6
--- /dev/null
+++ b/startop/scripts/trace_analyzer/trace_analyzer.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python3
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+import sys
+
+from sqlalchemy import create_engine
+from sqlalchemy import Column, Date, Integer, Float, String, ForeignKey
+from sqlalchemy.ext.declarative import declarative_base
+
+from sqlalchemy.orm import sessionmaker
+
+import sqlalchemy
+
+_DEBUG = False
+#_LIMIT = 100000
+_LIMIT = None
+_FLUSH_LIMIT = 10000
+
+Base = declarative_base()
+
+class RawFtraceEntry(Base):
+ __tablename__ = 'raw_ftrace_entries'
+
+ id = Column(Integer, primary_key=True)
+ task_name = Column(String, nullable=True) # <...> -> None.
+ task_pid = Column(String, nullable=False)
+ tgid = Column(Integer, nullable=True) # ----- -> None.
+ cpu = Column(Integer, nullable=False)
+ timestamp = Column(Float, nullable=False)
+ function = Column(String, nullable=False)
+ function_args = Column(String, nullable=False)
+
+# __mapper_args__ = {
+# 'polymorphic_identity':'raw_ftrace_entry',
+# 'polymorphic_on':function
+# }
+
+ @staticmethod
+ def parse(line):
+ # ' <...>-5521 (-----) [003] ...1 17148.446877: tracing_mark_write: trace_event_clock_sync: parent_ts=17148.447266'
+ m = re.match('\s*(.*)-(\d+)\s+\(([^\)]+)\)\s+\[(\d+)\]\s+([\w.]{4})\s+(\d+[.]\d+):\s+(\w+):\s+(.*)', line)
+ if not m:
+ return None
+
+ groups = m.groups()
+ # groups example:
+ # ('<...>',
+ # '5521',
+ # '-----',
+ # '003',
+ # '...1',
+ # '17148.446877',
+ # 'tracing_mark_write',
+ # 'trace_event_clock_sync: parent_ts=17148.447266')
+ task_name = groups[0]
+ if task_name == '<...>':
+ task_name = None
+
+ task_pid = int(groups[1])
+ tgid = groups[2]
+ if tgid == '-----':
+ tgid = None
+
+ cpu = int(groups[3])
+ # irq_flags = groups[4]
+ timestamp = float(groups[5])
+ function = groups[6]
+ function_args = groups[7]
+
+ return RawFtraceEntry(task_name=task_name, task_pid=task_pid, tgid=tgid, cpu=cpu,
+ timestamp=timestamp, function=function, function_args=function_args)
+
+ @staticmethod
+ def parse_dict(line):
+ # ' <...>-5521 (-----) [003] ...1 17148.446877: tracing_mark_write: trace_event_clock_sync: parent_ts=17148.447266'
+ m = re.match('\s*(.*)-(\d+)\s+\(([^\)]+)\)\s+\[(\d+)\]\s+([\w.]{4})\s+(\d+[.]\d+):\s+(\w+):\s+(.*)', line)
+ if not m:
+ return None
+
+ groups = m.groups()
+ # groups example:
+ # ('<...>',
+ # '5521',
+ # '-----',
+ # '003',
+ # '...1',
+ # '17148.446877',
+ # 'tracing_mark_write',
+ # 'trace_event_clock_sync: parent_ts=17148.447266')
+ task_name = groups[0]
+ if task_name == '<...>':
+ task_name = None
+
+ task_pid = int(groups[1])
+ tgid = groups[2]
+ if tgid == '-----':
+ tgid = None
+
+ cpu = int(groups[3])
+ # irq_flags = groups[4]
+ timestamp = float(groups[5])
+ function = groups[6]
+ function_args = groups[7]
+
+ return {'task_name': task_name, 'task_pid': task_pid, 'tgid': tgid, 'cpu': cpu, 'timestamp': timestamp, 'function': function, 'function_args': function_args}
+
+#class TracingMarkWriteFtraceEntry(RawFtraceEntry):
+# __tablename__ = 'tracing_mark_write_ftrace_entries'
+#
+# id = Column(Integer, ForeignKey('raw_ftrace_entries.id'), primary_key=True)
+# mark_type = Column(String(1), nullable=False)
+# mark_id = Column(Integer, nullable=False)
+# message = Column(String)
+#
+## __mapper_args__ = {
+## 'polymorphic_identity':'tracing_mark_write',
+## }
+#
+# @staticmethod
+# def decode(raw_ftrace_entry):
+# if raw_ftrace_entry.function != 'tracing_mark_write':
+# raise ValueError("raw_ftrace_entry must be function 'tracing_mark_write':" + raw_ftrace_entry)
+#
+# #"B|2446|(Paused)ClearCards|Foo"
+# match = re.match("([^|]*)\|([^|]*)\|(.*)", raw_ftrace_entry.function_args)
+#
+# if not match:
+# return None
+#
+# # ('B', '2446', '(Paused)ClearCards|Foo')
+# groups = match.groups()
+#
+# mark_type = groups[0]
+# mark_id = int(groups[1])
+# message = groups[2]
+#
+# return TracingMarkWriteFtraceEntry(id = raw_ftrace_entry.id,
+# mark_type = mark_type,
+# mark_id = mark_id,
+# message = message)
+
+class SchedSwitch(Base):
+ __tablename__ = 'sched_switches'
+
+ id = Column(Integer, ForeignKey('raw_ftrace_entries.id'), primary_key=True)
+
+ prev_comm = Column(String, nullable=False)
+ prev_pid = Column(Integer, nullable=False)
+ prev_prio = Column(Integer, nullable=False)
+ prev_state = Column(String, nullable=False)
+
+ next_comm = Column(String, nullable=False)
+ next_pid = Column(Integer, nullable=False)
+ next_prio = Column(Integer, nullable=False)
+
+# __mapper_args__ = {
+# 'polymorphic_identity':'raw_ftrace_entry',
+# 'polymorphic_on':function
+# }
+
+ @staticmethod
+ def parse_dict(function_args, id = None):
+ # 'prev_comm=kworker/u16:5 prev_pid=13971 prev_prio=120 prev_state=S ==> next_comm=swapper/4 next_pid=0 next_prio=120'
+ m = re.match("prev_comm=(.*) prev_pid=(\d+) prev_prio=(\d+) prev_state=(.*) ==> next_comm=(.*) next_pid=(\d+) next_prio=(\d+) ?", function_args)
+ if not m:
+ return None
+
+ groups = m.groups()
+ # ('kworker/u16:5', '13971', '120', 'S', 'swapper/4', '0', '120')
+ d = {}
+ if id is not None:
+ d['id'] = id
+ d['prev_comm'] = groups[0]
+ d['prev_pid'] = int(groups[1])
+ d['prev_prio'] = int(groups[2])
+ d['prev_state'] = groups[3]
+ d['next_comm'] = groups[4]
+ d['next_pid'] = int(groups[5])
+ d['next_prio'] = int(groups[6])
+
+ return d
+
+class SchedBlockedReason(Base):
+ __tablename__ = 'sched_blocked_reasons'
+
+ id = Column(Integer, ForeignKey('raw_ftrace_entries.id'), primary_key=True)
+
+ pid = Column(Integer, nullable=False)
+ iowait = Column(Integer, nullable=False)
+ caller = Column(String, nullable=False)
+
+ @staticmethod
+ def parse_dict(function_args, id = None):
+ # 'pid=2289 iowait=1 caller=wait_on_page_bit_common+0x2a8/0x5f'
+ m = re.match("pid=(\d+) iowait=(\d+) caller=(.*) ?", function_args)
+ if not m:
+ return None
+
+ groups = m.groups()
+ # ('2289', '1', 'wait_on_page_bit_common+0x2a8/0x5f8')
+ d = {}
+ if id is not None:
+ d['id'] = id
+ d['pid'] = int(groups[0])
+ d['iowait'] = int(groups[1])
+ d['caller'] = groups[2]
+
+ return d
+
+def init_sqlalchemy(db_filename):
+ global _DEBUG
+ engine = create_engine('sqlite:///' + db_filename, echo=_DEBUG)
+
+ # DROP TABLES
+# Base.metadata.drop_all(engine)
+ # CREATE ... (tables)
+ Base.metadata.create_all(engine)
+
+ Session = sessionmaker(bind=engine)
+ session = Session()
+ return (session, engine)
+
+def insert_pending_entries(engine, kls, lst):
+ if len(lst) > 0:
+ # for some reason, it tries to generate an empty INSERT statement with len=0,
+ # which of course violates the first non-null constraint.
+ try:
+ # Performance-sensitive parsing according to:
+ # https://docs.sqlalchemy.org/en/13/faq/performance.html#i-m-inserting-400-000-rows-with-the-orm-and-it-s-really-slow
+ engine.execute(kls.__table__.insert(), lst)
+ lst.clear()
+ except sqlalchemy.exc.IntegrityError as err:
+ # possibly violating some SQL constraint, print data here.
+ print(err)
+ print(lst)
+ raise
+
+def parse_file(filename, session, engine):
+ global _LIMIT
+ global _FLUSH_LIMIT
+ count = 0
+
+ pending_entries = []
+ pending_sched_switch = []
+ pending_sched_blocked_reasons = []
+
+ def insert_all_pending_entries():
+ insert_pending_entries(engine, RawFtraceEntry, pending_entries)
+ insert_pending_entries(engine, SchedSwitch, pending_sched_switch)
+ insert_pending_entries(engine, SchedBlockedReason, pending_sched_blocked_reasons)
+
+ # use explicit encoding to avoid UnicodeDecodeError.
+ with open(filename, encoding="ISO-8859-1") as f:
+ for l in f:
+
+ if len(l) > 1 and l[0] == '#':
+ continue
+
+ count = count + 1
+
+ if _LIMIT and count >= _LIMIT:
+ break
+
+ raw_ftrace_entry = RawFtraceEntry.parse_dict(l)
+ if not raw_ftrace_entry:
+ print("WARNING: Failed to parse raw ftrace entry: " + l)
+ continue
+
+ pending_entries.append(raw_ftrace_entry)
+
+ if raw_ftrace_entry['function'] == 'sched_switch':
+ sched_switch = SchedSwitch.parse_dict(raw_ftrace_entry['function_args'], count)
+
+ if not sched_switch:
+ print("WARNING: Failed to parse sched_switch: " + l)
+ else:
+ pending_sched_switch.append(sched_switch)
+
+ elif raw_ftrace_entry['function'] == 'sched_blocked_reason':
+ sbr = SchedBlockedReason.parse_dict(raw_ftrace_entry['function_args'], count)
+
+ if not sbr:
+ print("WARNING: Failed to parse sched_blocked_reason: " + l)
+ else:
+ pending_sched_blocked_reasons.append(sbr)
+
+ # Objects are cached in python memory, not yet sent to SQL database.
+ #session.add(raw_ftrace_entry)
+
+ # Send INSERT/UPDATE/etc statements to the underlying SQL database.
+ if count % _FLUSH_LIMIT == 0:
+ # session.flush()
+ #session.bulk_save_objects(pending_entries)
+ #session.bulk_insert_mappings(RawFtraceEntry, pending_entries)
+ insert_all_pending_entries()
+
+ insert_all_pending_entries()
+
+ # Ensure underlying database commits changes from memory to disk.
+ session.commit()
+
+ return count
+
+#def decode_raw_traces(session, engine):
+# count = 0
+# global _FLUSH_LIMIT
+#
+# for tmw in session.query(RawFtraceEntry).filter_by(function = 'tracing_mark_write'):
+# print(tmw)
+# decoded = TracingMarkWriteFtraceEntry.decode(tmw)
+# session.add(decoded)
+#
+# if count % _FLUSH_LIMIT == 0:
+# session.flush()
+#
+# session.commit()
+#
+# return count
+
+def main(argv):
+ db_filename = sys.argv[1]
+ trace_filename = sys.argv[2]
+
+ session, engine = init_sqlalchemy(db_filename)
+ print("SQL Alchemy db initialized")
+
+ # parse 'raw_ftrace_entries' table
+ count = parse_file(trace_filename, session, engine)
+ print("Count was ", count)
+
+ # create other tables
+# count = decode_raw_traces(session, engine)
+
+ return 0
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/startop/scripts/trace_analyzer/trace_analyzer_recursive b/startop/scripts/trace_analyzer/trace_analyzer_recursive
new file mode 100755
index 000000000000..4d9ee0eec9b0
--- /dev/null
+++ b/startop/scripts/trace_analyzer/trace_analyzer_recursive
@@ -0,0 +1,78 @@
+#!/bin/bash
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$(dirname "$0")" ; pwd -P )"
+
+if [[ "$#" -lt 3 ]]; then
+ echo "Usage: $0 <trace-dir> <db-dir> <output.csv>" >&2
+ exit 1
+fi
+
+simulate="n"
+
+TRACE_DIRNAME="$1"
+SQLITE_DIRNAME="$2"
+OUTPUT_FILENAME="$3"
+
+echo "Trace filename: $TRACE_DIRNAME"
+echo "SQLite filename: $SQLITE_DIRNAME"
+
+if ! [[ -d "$TRACE_DIRNAME" ]]; then
+ echo "Error: Trace '$TRACE_DIRNAME' does not exist." >&2
+ exit 1
+fi
+
+process_trace_file() {
+ local trace_filename="$1"
+ local db_dirname="$2"
+ local output_file="$3"
+
+ local db_filename="$db_dirname/$(basename "$trace_filename").db"
+
+ if [[ $simulate == y ]]; then
+ echo "$DIR/trace_analyzer.py" "$db_filename" "$trace_filename" "> /dev/null"
+ else
+ if ! "$DIR/trace_analyzer.py" "$db_filename" "$trace_filename" > /dev/null; then
+ echo "Fatal: trace_analyzer.py failed, aborting." >&2
+ return 1
+ fi
+ fi
+
+ if [[ $simulate == y ]]; then
+ echo "$DIR/run-sql-queries" "$db_filename" ">> '$output_file'"
+ else
+ # append name of trace to CSV, so we can see where data came from
+ echo "; $trace_filename" >> "$output_file"
+ if ! "$DIR/run-sql-queries" "$db_filename" >> "$output_file"; then
+ echo "Fatal: Failed to run sql queries, aborting." >&2
+ return 1
+ fi
+ fi
+
+ return 0
+}
+
+find "$TRACE_DIRNAME" -type f -name '*.trace' -print0 |
+while IFS= read -r -d '' file; do
+ if [[ $file == *#*.trace && $file != *#1.trace ]]; then
+ echo "Skip $file"
+ continue
+ fi
+
+ printf '%s\n' "$file"
+ process_trace_file "$file" "$SQLITE_DIRNAME" "$OUTPUT_FILENAME"
+done
+
+echo "Done"