blob: 1d2c5cec1304fdae4c049a51e7b2b30efe224260 [file] [log] [blame]
Javi Merino491cf732014-03-31 17:34:44 +01001#!/usr/bin/python
Javi Merino5bd3d442014-04-08 12:55:13 +01002"""Process the output of the power allocator trace in the current
3directory's trace.dat"""
Javi Merino572049d2014-03-31 16:45:23 +01004
5import os
Javi Merinoee56c362014-03-31 17:30:34 +01006import re
Javi Merino952815a2014-03-31 18:08:32 +01007from StringIO import StringIO
Javi Merinof78ea5b2014-03-31 17:51:38 +01008import pandas as pd
Javi Merinoa6399fb2014-03-31 19:17:08 +01009from matplotlib import pyplot as plt
Javi Merino572049d2014-03-31 16:45:23 +010010
Javi Merino3a736552014-06-19 19:22:44 +010011from plot_utils import normalize_title, pre_plot_setup, post_plot_setup
Javi Merino51db3632014-06-13 11:24:51 +010012
Javi Merino9ce2fb62014-07-04 20:02:13 +010013def trace_parser_explode_array(string, array_lengths):
Javi Merinob2ff5692014-06-30 17:41:50 +010014 """Explode an array in the trace into individual elements for easy parsing
15
16 Basically, turn "load={1 1 2 2}" into "load0=1 load1=1 load2=2
Javi Merino9ce2fb62014-07-04 20:02:13 +010017 load3=2". array_lengths is a dictionary of array names and their
18 expected length. If we get array that's shorter than the expected
19 length, additional keys have to be introduced with value 0 to
20 compensate. For example, "load={1 2}" with array_lengths being
21 {"load": 4} returns "load0=1 load1=2 load2=0 load3=0"
Javi Merinob2ff5692014-06-30 17:41:50 +010022
23 """
24
Javi Merino9ce2fb62014-07-04 20:02:13 +010025 while True:
Javi Merinod44312f2014-07-02 18:34:24 +010026 match = re.search(r"[^ ]+={[^}]+}", string)
27 if match is None:
28 break
Javi Merinob2ff5692014-06-30 17:41:50 +010029
Javi Merinod44312f2014-07-02 18:34:24 +010030 to_explode = match.group()
31 col_basename = re.match(r"([^=]+)=", to_explode).groups()[0]
32 vals_str = re.search(r"{(.+)}", to_explode).groups()[0]
33 vals_array = vals_str.split(' ')
Javi Merinob2ff5692014-06-30 17:41:50 +010034
Javi Merinod44312f2014-07-02 18:34:24 +010035 exploded_str = ""
36 for (idx, val) in enumerate(vals_array):
37 exploded_str += "{}{}={} ".format(col_basename, idx, val)
Javi Merinob2ff5692014-06-30 17:41:50 +010038
Javi Merino9ce2fb62014-07-04 20:02:13 +010039 vals_added = len(vals_array)
40 if vals_added < array_lengths[col_basename]:
41 for idx in range(vals_added, array_lengths[col_basename]):
42 exploded_str += "{}{}=0 ".format(col_basename, idx)
43
Javi Merinod44312f2014-07-02 18:34:24 +010044 exploded_str = exploded_str[:-1]
45 begin_idx = match.start()
46 end_idx = match.end()
Javi Merinob2ff5692014-06-30 17:41:50 +010047
Javi Merinod44312f2014-07-02 18:34:24 +010048 string = string[:begin_idx] + exploded_str + string[end_idx:]
49
50 return string
Javi Merinob2ff5692014-06-30 17:41:50 +010051
Javi Merinoc2ec5682014-04-01 15:16:06 +010052class BaseThermal(object):
Javi Merino5bd3d442014-04-08 12:55:13 +010053 """Base class to parse trace.dat dumps.
54
55 Don't use directly, create a subclass that defines the unique_word
56 you want to match in the output"""
Javi Merino68461552014-04-08 11:40:09 +010057 def __init__(self, basepath, unique_word):
58 if basepath is None:
59 basepath = "."
Javi Merinoc2ec5682014-04-01 15:16:06 +010060
Javi Merino68461552014-04-08 11:40:09 +010061 self.basepath = basepath
Javi Merino952815a2014-03-31 18:08:32 +010062 self.data_csv = ""
Javi Merino22d85f82014-06-21 19:15:04 +010063 self.data_frame = None
Javi Merino68461552014-04-08 11:40:09 +010064 self.unique_word = unique_word
65
66 if not os.path.isfile(os.path.join(basepath, "trace.txt")):
67 self.__run_trace_cmd_report()
Javi Merino572049d2014-03-31 16:45:23 +010068
Javi Merinod5562282014-08-08 17:28:03 +010069 self.__parse_into_csv()
70
Javi Merino572049d2014-03-31 16:45:23 +010071 def __run_trace_cmd_report(self):
Javi Merino5bd3d442014-04-08 12:55:13 +010072 """Run "trace-cmd report > trace.txt".
73
74 Overwrites the contents of trace.txt if it exists."""
Javi Merinoee56c362014-03-31 17:30:34 +010075 from subprocess import check_output
76
Javi Merino68461552014-04-08 11:40:09 +010077 if not os.path.isfile(os.path.join(self.basepath, "trace.dat")):
Javi Merino1a3725a2014-03-31 18:35:15 +010078 raise IOError("No such file or directory: trace.dat")
79
Javi Merino68461552014-04-08 11:40:09 +010080 previous_path = os.getcwd()
81 os.chdir(self.basepath)
Javi Merino572049d2014-03-31 16:45:23 +010082
Javi Merino68461552014-04-08 11:40:09 +010083 # This would better be done with a context manager (i.e.
84 # http://stackoverflow.com/a/13197763/970766)
85 try:
86 with open(os.devnull) as devnull:
87 out = check_output(["trace-cmd", "report"], stderr=devnull)
88
89 finally:
90 os.chdir(previous_path)
91
Javi Merino5bd3d442014-04-08 12:55:13 +010092 with open(os.path.join(self.basepath, "trace.txt"), "w") as fout:
93 fout.write(out)
Javi Merinoc08ca682014-03-31 17:29:27 +010094
Javi Merino9ce2fb62014-07-04 20:02:13 +010095 def get_trace_array_lengths(self, fname):
96 """Calculate the lengths of all arrays in the trace
97
98 Returns a dict with the name of each array found in the trace
99 as keys and their corresponding length as value
100
101 """
Javi Merinoa3e98c82014-08-01 14:38:22 +0100102 from collections import defaultdict
Javi Merino9ce2fb62014-07-04 20:02:13 +0100103
104 pat_array = re.compile(r"([A-Za-z0-9_]+)={([^}]+)}")
105
Javi Merinoa3e98c82014-08-01 14:38:22 +0100106 ret = defaultdict(int)
Javi Merino9ce2fb62014-07-04 20:02:13 +0100107
108 with open(fname) as fin:
109 for line in fin:
110 if not re.search(self.unique_word, line):
111 continue
112
113 while True:
114 match = re.search(pat_array, line)
115 if not match:
116 break
117
118 (array_name, array_elements) = match.groups()
119
120 array_len = len(array_elements.split(' '))
121
Javi Merinoa3e98c82014-08-01 14:38:22 +0100122 if array_len > ret[array_name]:
Javi Merino9ce2fb62014-07-04 20:02:13 +0100123 ret[array_name] = array_len
124
125 line = line[match.end():]
126
127 return ret
128
Javi Merinod5562282014-08-08 17:28:03 +0100129 def __parse_into_csv(self):
Javi Merino5bd3d442014-04-08 12:55:13 +0100130 """Create a csv representation of the thermal data and store
131 it in self.data_csv"""
Javi Merino9ce2fb62014-07-04 20:02:13 +0100132
133 fin_fname = os.path.join(self.basepath, "trace.txt")
134
135 array_lengths = self.get_trace_array_lengths(fin_fname)
136
Javi Merinoc08ca682014-03-31 17:29:27 +0100137 pat_timestamp = re.compile(r"([0-9]+\.[0-9]+):")
Javi Merinocfb49b72014-06-30 17:51:51 +0100138 pat_data = re.compile(r"[A-Za-z0-9_]+=([^ {]+)")
Javi Merino0e83b612014-06-05 11:45:23 +0100139 pat_header = re.compile(r"([A-Za-z0-9_]+)=[^ ]+")
Javi Merino45a59c32014-07-02 19:21:17 +0100140 pat_empty_array = re.compile(r"[A-Za-z0-9_]+=\{\} ")
Javi Merinoc08ca682014-03-31 17:29:27 +0100141 header = ""
142
Javi Merino9ce2fb62014-07-04 20:02:13 +0100143 with open(fin_fname) as fin:
Javi Merino952815a2014-03-31 18:08:32 +0100144 for line in fin:
Javi Merinoc2ec5682014-04-01 15:16:06 +0100145 if not re.search(self.unique_word, line):
Javi Merino952815a2014-03-31 18:08:32 +0100146 continue
147
148 line = line[:-1]
149
Javi Merino5bd3d442014-04-08 12:55:13 +0100150 timestamp_match = re.search(pat_timestamp, line)
151 timestamp = timestamp_match.group(1)
Javi Merino952815a2014-03-31 18:08:32 +0100152
Javi Merinoc2ec5682014-04-01 15:16:06 +0100153 data_start_idx = re.search(r"[A-Za-z0-9_]+=", line).start()
154 data_str = line[data_start_idx:]
Javi Merino952815a2014-03-31 18:08:32 +0100155
Javi Merino45a59c32014-07-02 19:21:17 +0100156 # Remove empty arrays from the trace
157 data_str = re.sub(pat_empty_array, r"", data_str)
158
Javi Merino9ce2fb62014-07-04 20:02:13 +0100159 data_str = trace_parser_explode_array(data_str, array_lengths)
Javi Merinocfb49b72014-06-30 17:51:51 +0100160
Javi Merino952815a2014-03-31 18:08:32 +0100161 if not header:
Javi Merino0af47212014-04-02 16:23:23 +0100162 header = re.sub(pat_header, r"\1", data_str)
163 header = re.sub(r" ", r",", header)
Javi Merino2a16a442014-06-21 19:23:45 +0100164 header = "Time," + header + "\n"
Javi Merino952815a2014-03-31 18:08:32 +0100165 self.data_csv = header
166
Javi Merino0af47212014-04-02 16:23:23 +0100167 parsed_data = re.sub(pat_data, r"\1", data_str)
Javi Merino35c1ac72014-07-04 10:29:04 +0100168 parsed_data = re.sub(r",", r"", parsed_data)
Javi Merino0af47212014-04-02 16:23:23 +0100169 parsed_data = re.sub(r" ", r",", parsed_data)
Javi Merino952815a2014-03-31 18:08:32 +0100170
171 parsed_data = timestamp + "," + parsed_data + "\n"
172 self.data_csv += parsed_data
173
Javi Merinof78ea5b2014-03-31 17:51:38 +0100174 def get_data_frame(self):
175 """Return a pandas data frame for the run"""
Javi Merino22d85f82014-06-21 19:15:04 +0100176 if self.data_frame is not None:
Javi Merinof78ea5b2014-03-31 17:51:38 +0100177 return self.data_frame
178
Javi Merinof0f51ff2014-04-10 12:34:53 +0100179 if self.data_csv is "":
180 return pd.DataFrame()
181
182 unordered_df = pd.read_csv(StringIO(self.data_csv))
Javi Merino2a16a442014-06-21 19:23:45 +0100183 self.data_frame = unordered_df.set_index("Time")
Javi Merino04f27492014-04-02 09:59:23 +0100184
Javi Merinof78ea5b2014-03-31 17:51:38 +0100185 return self.data_frame
Javi Merinodf8316a2014-03-31 18:39:42 +0100186
Javi Merino0e83b612014-06-05 11:45:23 +0100187class Thermal(BaseThermal):
188 """Process the thermal framework data in a ftrace dump"""
189 def __init__(self, path=None):
190 super(Thermal, self).__init__(
191 basepath=path,
Javi Merinoaf45d872014-07-03 09:49:40 +0100192 unique_word="thermal_temperature:",
Javi Merino0e83b612014-06-05 11:45:23 +0100193 )
194
Javi Merino516d5942014-06-26 15:06:04 +0100195 def plot_temperature(self, control_temperature=None, title="", width=None,
Javi Merino80114162014-08-08 16:48:32 +0100196 height=None, ylim="range", ax=None, legend_label=""):
Javi Merino516d5942014-06-26 15:06:04 +0100197 """Plot the temperature.
198
199 If control_temp is a pd.Series() representing the (possible)
200 variation of control_temp during the run, draw it using a
201 dashed yellow line. Otherwise, only the temperature is
202 plotted.
203
204 """
Javi Merinoc68737a2014-06-10 15:21:59 +0100205 dfr = self.get_data_frame()
206 title = normalize_title("Temperature", title)
207
Javi Merino49cbcfe2014-08-08 16:03:49 +0100208 setup_plot = False
209 if not ax:
210 ax = pre_plot_setup(width, height)
211 setup_plot = True
212
Javi Merino80114162014-08-08 16:48:32 +0100213 temp_label = normalize_title("Temperature", legend_label)
214 (dfr["temp"] / 1000).plot(ax=ax, label=temp_label)
Javi Merino516d5942014-06-26 15:06:04 +0100215 if control_temperature is not None:
Javi Merino80114162014-08-08 16:48:32 +0100216 ct_label = normalize_title("Control", legend_label)
Javi Merino516d5942014-06-26 15:06:04 +0100217 control_temperature.plot(ax=ax, color="y", linestyle="--",
Javi Merino80114162014-08-08 16:48:32 +0100218 label=ct_label)
Javi Merinoc68737a2014-06-10 15:21:59 +0100219
Javi Merino49cbcfe2014-08-08 16:03:49 +0100220 if setup_plot:
221 post_plot_setup(ax, title=title, ylim=ylim)
222 plt.legend()
Javi Merinoc68737a2014-06-10 15:21:59 +0100223
Javi Merino1e69e2c2014-06-04 18:25:09 +0100224class ThermalGovernor(BaseThermal):
Javi Merino5bd3d442014-04-08 12:55:13 +0100225 """Process the power allocator data in a ftrace dump"""
Javi Merino68461552014-04-08 11:40:09 +0100226 def __init__(self, path=None):
Javi Merino1e69e2c2014-06-04 18:25:09 +0100227 super(ThermalGovernor, self).__init__(
Javi Merino68461552014-04-08 11:40:09 +0100228 basepath=path,
Javi Merinoa67b86f2014-07-03 15:44:19 +0100229 unique_word="thermal_power_allocator:",
Javi Merinoc2ec5682014-04-01 15:16:06 +0100230 )
231
232 def write_thermal_csv(self):
233 """Write the csv info in thermal.csv"""
Javi Merinoc2ec5682014-04-01 15:16:06 +0100234 with open("thermal.csv", "w") as fout:
235 fout.write(self.data_csv)
236
Javi Merinod6d5f892014-07-03 16:24:23 +0100237 def plot_input_power(self, actor_order, title="", width=None, height=None):
238 """Plot input power
239
240 actor_order is an array with the order in which the actors were registered.
241 """
242
Javi Merinoe0ddf0d2014-05-07 18:40:12 +0100243 dfr = self.get_data_frame()
Javi Merinof7968a72014-07-03 15:35:02 +0100244 in_cols = [s for s in dfr.columns if re.match("req_power[0-9]+", s)]
Javi Merinoe0ddf0d2014-05-07 18:40:12 +0100245
Javi Merinod6d5f892014-07-03 16:24:23 +0100246 plot_dfr = dfr[in_cols]
247 # Rename the columns from "req_power0" to "A15" or whatever is
248 # in actor_order. Note that we can do it just with an
249 # assignment because the columns are already sorted (i.e.:
250 # req_power0, req_power1...)
251 plot_dfr.columns = actor_order
252
Javi Merinoc00feff2014-04-14 15:41:51 +0100253 title = normalize_title("Input Power", title)
Javi Merino8ecd8172014-07-03 16:09:01 +0100254
255 ax = pre_plot_setup(width, height)
Javi Merinod6d5f892014-07-03 16:24:23 +0100256 plot_dfr.plot(ax=ax)
Javi Merino8ecd8172014-07-03 16:09:01 +0100257 post_plot_setup(ax, title=title)
Javi Merino9c010772014-04-02 16:54:41 +0100258
Javi Merinod6d5f892014-07-03 16:24:23 +0100259 def plot_output_power(self, actor_order, title="", width=None, height=None):
260 """Plot output power
261
262 actor_order is an array with the order in which the actors were registered.
263 """
264
Javi Merinoe0ddf0d2014-05-07 18:40:12 +0100265 dfr = self.get_data_frame()
266 out_cols = [s for s in dfr.columns
Javi Merinof7968a72014-07-03 15:35:02 +0100267 if re.match("granted_power[0-9]+", s)]
Javi Merinoe0ddf0d2014-05-07 18:40:12 +0100268
Javi Merinod6d5f892014-07-03 16:24:23 +0100269 # See the note in plot_input_power()
270 plot_dfr = dfr[out_cols]
271 plot_dfr.columns = actor_order
272
Javi Merinoc00feff2014-04-14 15:41:51 +0100273 title = normalize_title("Output Power", title)
Javi Merino8ecd8172014-07-03 16:09:01 +0100274
275 ax = pre_plot_setup(width, height)
Javi Merinod6d5f892014-07-03 16:24:23 +0100276 plot_dfr.plot(ax=ax)
Javi Merino8ecd8172014-07-03 16:09:01 +0100277 post_plot_setup(ax, title=title)
Javi Merinocd4a8272014-04-14 15:50:01 +0100278
Javi Merino9fc54852014-05-07 19:06:53 +0100279 def plot_inout_power(self, title="", width=None, height=None):
280 """Make multiple plots showing input and output power for each actor"""
281 dfr = self.get_data_frame()
282
283 actors = []
284 for col in dfr.columns:
285 match = re.match("P(.*)_in", col)
286 if match and col != "Ptot_in":
287 actors.append(match.group(1))
288
289 for actor in actors:
290 cols = ["P" + actor + "_in", "P" + actor + "_out"]
291 this_title = normalize_title(actor, title)
292 dfr[cols].plot(title=this_title)