blob: 3d2f839c5bb382505ad2e454a5302eb3c5d606c2 [file] [log] [blame]
Javi Merino491cf732014-03-31 17:34:44 +01001#!/usr/bin/python
Javi Merino5bd3d442014-04-08 12:55:13 +01002"""Process the output of the power allocator trace in the current
3directory's trace.dat"""
Javi Merino572049d2014-03-31 16:45:23 +01004
5import os
Javi Merinoee56c362014-03-31 17:30:34 +01006import re
Javi Merino952815a2014-03-31 18:08:32 +01007from StringIO import StringIO
Javi Merinof78ea5b2014-03-31 17:51:38 +01008import pandas as pd
Javi Merinoa6399fb2014-03-31 19:17:08 +01009from matplotlib import pyplot as plt
Javi Merino572049d2014-03-31 16:45:23 +010010
Javi Merino3a736552014-06-19 19:22:44 +010011from plot_utils import normalize_title, pre_plot_setup, post_plot_setup
Javi Merino51db3632014-06-13 11:24:51 +010012
Javi Merino9ce2fb62014-07-04 20:02:13 +010013def trace_parser_explode_array(string, array_lengths):
Javi Merinob2ff5692014-06-30 17:41:50 +010014 """Explode an array in the trace into individual elements for easy parsing
15
16 Basically, turn "load={1 1 2 2}" into "load0=1 load1=1 load2=2
Javi Merino9ce2fb62014-07-04 20:02:13 +010017 load3=2". array_lengths is a dictionary of array names and their
18 expected length. If we get array that's shorter than the expected
19 length, additional keys have to be introduced with value 0 to
20 compensate. For example, "load={1 2}" with array_lengths being
21 {"load": 4} returns "load0=1 load1=2 load2=0 load3=0"
Javi Merinob2ff5692014-06-30 17:41:50 +010022
23 """
24
Javi Merino9ce2fb62014-07-04 20:02:13 +010025 while True:
Javi Merinod44312f2014-07-02 18:34:24 +010026 match = re.search(r"[^ ]+={[^}]+}", string)
27 if match is None:
28 break
Javi Merinob2ff5692014-06-30 17:41:50 +010029
Javi Merinod44312f2014-07-02 18:34:24 +010030 to_explode = match.group()
31 col_basename = re.match(r"([^=]+)=", to_explode).groups()[0]
32 vals_str = re.search(r"{(.+)}", to_explode).groups()[0]
33 vals_array = vals_str.split(' ')
Javi Merinob2ff5692014-06-30 17:41:50 +010034
Javi Merinod44312f2014-07-02 18:34:24 +010035 exploded_str = ""
36 for (idx, val) in enumerate(vals_array):
37 exploded_str += "{}{}={} ".format(col_basename, idx, val)
Javi Merinob2ff5692014-06-30 17:41:50 +010038
Javi Merino9ce2fb62014-07-04 20:02:13 +010039 vals_added = len(vals_array)
40 if vals_added < array_lengths[col_basename]:
41 for idx in range(vals_added, array_lengths[col_basename]):
42 exploded_str += "{}{}=0 ".format(col_basename, idx)
43
Javi Merinod44312f2014-07-02 18:34:24 +010044 exploded_str = exploded_str[:-1]
45 begin_idx = match.start()
46 end_idx = match.end()
Javi Merinob2ff5692014-06-30 17:41:50 +010047
Javi Merinod44312f2014-07-02 18:34:24 +010048 string = string[:begin_idx] + exploded_str + string[end_idx:]
49
50 return string
Javi Merinob2ff5692014-06-30 17:41:50 +010051
Javi Merinoc2ec5682014-04-01 15:16:06 +010052class BaseThermal(object):
Javi Merino5bd3d442014-04-08 12:55:13 +010053 """Base class to parse trace.dat dumps.
54
55 Don't use directly, create a subclass that defines the unique_word
56 you want to match in the output"""
Javi Merino68461552014-04-08 11:40:09 +010057 def __init__(self, basepath, unique_word):
58 if basepath is None:
59 basepath = "."
Javi Merinoc2ec5682014-04-01 15:16:06 +010060
Javi Merino68461552014-04-08 11:40:09 +010061 self.basepath = basepath
Javi Merino952815a2014-03-31 18:08:32 +010062 self.data_csv = ""
Javi Merino22d85f82014-06-21 19:15:04 +010063 self.data_frame = None
Javi Merino68461552014-04-08 11:40:09 +010064 self.unique_word = unique_word
65
66 if not os.path.isfile(os.path.join(basepath, "trace.txt")):
67 self.__run_trace_cmd_report()
Javi Merino572049d2014-03-31 16:45:23 +010068
Javi Merino572049d2014-03-31 16:45:23 +010069 def __run_trace_cmd_report(self):
Javi Merino5bd3d442014-04-08 12:55:13 +010070 """Run "trace-cmd report > trace.txt".
71
72 Overwrites the contents of trace.txt if it exists."""
Javi Merinoee56c362014-03-31 17:30:34 +010073 from subprocess import check_output
74
Javi Merino68461552014-04-08 11:40:09 +010075 if not os.path.isfile(os.path.join(self.basepath, "trace.dat")):
Javi Merino1a3725a2014-03-31 18:35:15 +010076 raise IOError("No such file or directory: trace.dat")
77
Javi Merino68461552014-04-08 11:40:09 +010078 previous_path = os.getcwd()
79 os.chdir(self.basepath)
Javi Merino572049d2014-03-31 16:45:23 +010080
Javi Merino68461552014-04-08 11:40:09 +010081 # This would better be done with a context manager (i.e.
82 # http://stackoverflow.com/a/13197763/970766)
83 try:
84 with open(os.devnull) as devnull:
85 out = check_output(["trace-cmd", "report"], stderr=devnull)
86
87 finally:
88 os.chdir(previous_path)
89
Javi Merino5bd3d442014-04-08 12:55:13 +010090 with open(os.path.join(self.basepath, "trace.txt"), "w") as fout:
91 fout.write(out)
Javi Merinoc08ca682014-03-31 17:29:27 +010092
Javi Merino9ce2fb62014-07-04 20:02:13 +010093 def get_trace_array_lengths(self, fname):
94 """Calculate the lengths of all arrays in the trace
95
96 Returns a dict with the name of each array found in the trace
97 as keys and their corresponding length as value
98
99 """
Javi Merinoa3e98c82014-08-01 14:38:22 +0100100 from collections import defaultdict
Javi Merino9ce2fb62014-07-04 20:02:13 +0100101
102 pat_array = re.compile(r"([A-Za-z0-9_]+)={([^}]+)}")
103
Javi Merinoa3e98c82014-08-01 14:38:22 +0100104 ret = defaultdict(int)
Javi Merino9ce2fb62014-07-04 20:02:13 +0100105
106 with open(fname) as fin:
107 for line in fin:
108 if not re.search(self.unique_word, line):
109 continue
110
111 while True:
112 match = re.search(pat_array, line)
113 if not match:
114 break
115
116 (array_name, array_elements) = match.groups()
117
118 array_len = len(array_elements.split(' '))
119
Javi Merinoa3e98c82014-08-01 14:38:22 +0100120 if array_len > ret[array_name]:
Javi Merino9ce2fb62014-07-04 20:02:13 +0100121 ret[array_name] = array_len
122
123 line = line[match.end():]
124
125 return ret
126
Javi Merinoc2ec5682014-04-01 15:16:06 +0100127 def parse_into_csv(self):
Javi Merino5bd3d442014-04-08 12:55:13 +0100128 """Create a csv representation of the thermal data and store
129 it in self.data_csv"""
Javi Merino9ce2fb62014-07-04 20:02:13 +0100130
131 fin_fname = os.path.join(self.basepath, "trace.txt")
132
133 array_lengths = self.get_trace_array_lengths(fin_fname)
134
Javi Merinoc08ca682014-03-31 17:29:27 +0100135 pat_timestamp = re.compile(r"([0-9]+\.[0-9]+):")
Javi Merinocfb49b72014-06-30 17:51:51 +0100136 pat_data = re.compile(r"[A-Za-z0-9_]+=([^ {]+)")
Javi Merino0e83b612014-06-05 11:45:23 +0100137 pat_header = re.compile(r"([A-Za-z0-9_]+)=[^ ]+")
Javi Merino45a59c32014-07-02 19:21:17 +0100138 pat_empty_array = re.compile(r"[A-Za-z0-9_]+=\{\} ")
Javi Merinoc08ca682014-03-31 17:29:27 +0100139 header = ""
140
Javi Merino9ce2fb62014-07-04 20:02:13 +0100141 with open(fin_fname) as fin:
Javi Merino952815a2014-03-31 18:08:32 +0100142 for line in fin:
Javi Merinoc2ec5682014-04-01 15:16:06 +0100143 if not re.search(self.unique_word, line):
Javi Merino952815a2014-03-31 18:08:32 +0100144 continue
145
146 line = line[:-1]
147
Javi Merino5bd3d442014-04-08 12:55:13 +0100148 timestamp_match = re.search(pat_timestamp, line)
149 timestamp = timestamp_match.group(1)
Javi Merino952815a2014-03-31 18:08:32 +0100150
Javi Merinoc2ec5682014-04-01 15:16:06 +0100151 data_start_idx = re.search(r"[A-Za-z0-9_]+=", line).start()
152 data_str = line[data_start_idx:]
Javi Merino952815a2014-03-31 18:08:32 +0100153
Javi Merino45a59c32014-07-02 19:21:17 +0100154 # Remove empty arrays from the trace
155 data_str = re.sub(pat_empty_array, r"", data_str)
156
Javi Merino9ce2fb62014-07-04 20:02:13 +0100157 data_str = trace_parser_explode_array(data_str, array_lengths)
Javi Merinocfb49b72014-06-30 17:51:51 +0100158
Javi Merino952815a2014-03-31 18:08:32 +0100159 if not header:
Javi Merino0af47212014-04-02 16:23:23 +0100160 header = re.sub(pat_header, r"\1", data_str)
161 header = re.sub(r" ", r",", header)
Javi Merino2a16a442014-06-21 19:23:45 +0100162 header = "Time," + header + "\n"
Javi Merino952815a2014-03-31 18:08:32 +0100163 self.data_csv = header
164
Javi Merino0af47212014-04-02 16:23:23 +0100165 parsed_data = re.sub(pat_data, r"\1", data_str)
Javi Merino35c1ac72014-07-04 10:29:04 +0100166 parsed_data = re.sub(r",", r"", parsed_data)
Javi Merino0af47212014-04-02 16:23:23 +0100167 parsed_data = re.sub(r" ", r",", parsed_data)
Javi Merino952815a2014-03-31 18:08:32 +0100168
169 parsed_data = timestamp + "," + parsed_data + "\n"
170 self.data_csv += parsed_data
171
Javi Merinof78ea5b2014-03-31 17:51:38 +0100172 def get_data_frame(self):
173 """Return a pandas data frame for the run"""
Javi Merino22d85f82014-06-21 19:15:04 +0100174 if self.data_frame is not None:
Javi Merinof78ea5b2014-03-31 17:51:38 +0100175 return self.data_frame
176
Javi Merino952815a2014-03-31 18:08:32 +0100177 if not self.data_csv:
Javi Merinoc2ec5682014-04-01 15:16:06 +0100178 self.parse_into_csv()
Javi Merinof78ea5b2014-03-31 17:51:38 +0100179
Javi Merinof0f51ff2014-04-10 12:34:53 +0100180 if self.data_csv is "":
181 return pd.DataFrame()
182
183 unordered_df = pd.read_csv(StringIO(self.data_csv))
Javi Merino2a16a442014-06-21 19:23:45 +0100184 self.data_frame = unordered_df.set_index("Time")
Javi Merino04f27492014-04-02 09:59:23 +0100185
Javi Merinof78ea5b2014-03-31 17:51:38 +0100186 return self.data_frame
Javi Merinodf8316a2014-03-31 18:39:42 +0100187
Javi Merino0e83b612014-06-05 11:45:23 +0100188class Thermal(BaseThermal):
189 """Process the thermal framework data in a ftrace dump"""
190 def __init__(self, path=None):
191 super(Thermal, self).__init__(
192 basepath=path,
Javi Merinoaf45d872014-07-03 09:49:40 +0100193 unique_word="thermal_temperature:",
Javi Merino0e83b612014-06-05 11:45:23 +0100194 )
195
Javi Merino516d5942014-06-26 15:06:04 +0100196 def plot_temperature(self, control_temperature=None, title="", width=None,
Javi Merino49cbcfe2014-08-08 16:03:49 +0100197 height=None, ylim="range", ax=None):
Javi Merino516d5942014-06-26 15:06:04 +0100198 """Plot the temperature.
199
200 If control_temp is a pd.Series() representing the (possible)
201 variation of control_temp during the run, draw it using a
202 dashed yellow line. Otherwise, only the temperature is
203 plotted.
204
205 """
Javi Merinoc68737a2014-06-10 15:21:59 +0100206 dfr = self.get_data_frame()
207 title = normalize_title("Temperature", title)
208
Javi Merino49cbcfe2014-08-08 16:03:49 +0100209 setup_plot = False
210 if not ax:
211 ax = pre_plot_setup(width, height)
212 setup_plot = True
213
Javi Merino3a736552014-06-19 19:22:44 +0100214 (dfr["temp"] / 1000).plot(ax=ax)
Javi Merino516d5942014-06-26 15:06:04 +0100215 if control_temperature is not None:
216 control_temperature.plot(ax=ax, color="y", linestyle="--",
217 label="control temperature")
Javi Merinoc68737a2014-06-10 15:21:59 +0100218
Javi Merino49cbcfe2014-08-08 16:03:49 +0100219 if setup_plot:
220 post_plot_setup(ax, title=title, ylim=ylim)
221 plt.legend()
Javi Merinoc68737a2014-06-10 15:21:59 +0100222
Javi Merino1e69e2c2014-06-04 18:25:09 +0100223class ThermalGovernor(BaseThermal):
Javi Merino5bd3d442014-04-08 12:55:13 +0100224 """Process the power allocator data in a ftrace dump"""
Javi Merino68461552014-04-08 11:40:09 +0100225 def __init__(self, path=None):
Javi Merino1e69e2c2014-06-04 18:25:09 +0100226 super(ThermalGovernor, self).__init__(
Javi Merino68461552014-04-08 11:40:09 +0100227 basepath=path,
Javi Merinoa67b86f2014-07-03 15:44:19 +0100228 unique_word="thermal_power_allocator:",
Javi Merinoc2ec5682014-04-01 15:16:06 +0100229 )
230
231 def write_thermal_csv(self):
232 """Write the csv info in thermal.csv"""
233 if not self.data_csv:
234 self.parse_into_csv()
235
236 with open("thermal.csv", "w") as fout:
237 fout.write(self.data_csv)
238
Javi Merinod6d5f892014-07-03 16:24:23 +0100239 def plot_input_power(self, actor_order, title="", width=None, height=None):
240 """Plot input power
241
242 actor_order is an array with the order in which the actors were registered.
243 """
244
Javi Merinoe0ddf0d2014-05-07 18:40:12 +0100245 dfr = self.get_data_frame()
Javi Merinof7968a72014-07-03 15:35:02 +0100246 in_cols = [s for s in dfr.columns if re.match("req_power[0-9]+", s)]
Javi Merinoe0ddf0d2014-05-07 18:40:12 +0100247
Javi Merinod6d5f892014-07-03 16:24:23 +0100248 plot_dfr = dfr[in_cols]
249 # Rename the columns from "req_power0" to "A15" or whatever is
250 # in actor_order. Note that we can do it just with an
251 # assignment because the columns are already sorted (i.e.:
252 # req_power0, req_power1...)
253 plot_dfr.columns = actor_order
254
Javi Merinoc00feff2014-04-14 15:41:51 +0100255 title = normalize_title("Input Power", title)
Javi Merino8ecd8172014-07-03 16:09:01 +0100256
257 ax = pre_plot_setup(width, height)
Javi Merinod6d5f892014-07-03 16:24:23 +0100258 plot_dfr.plot(ax=ax)
Javi Merino8ecd8172014-07-03 16:09:01 +0100259 post_plot_setup(ax, title=title)
Javi Merino9c010772014-04-02 16:54:41 +0100260
Javi Merinod6d5f892014-07-03 16:24:23 +0100261 def plot_output_power(self, actor_order, title="", width=None, height=None):
262 """Plot output power
263
264 actor_order is an array with the order in which the actors were registered.
265 """
266
Javi Merinoe0ddf0d2014-05-07 18:40:12 +0100267 dfr = self.get_data_frame()
268 out_cols = [s for s in dfr.columns
Javi Merinof7968a72014-07-03 15:35:02 +0100269 if re.match("granted_power[0-9]+", s)]
Javi Merinoe0ddf0d2014-05-07 18:40:12 +0100270
Javi Merinod6d5f892014-07-03 16:24:23 +0100271 # See the note in plot_input_power()
272 plot_dfr = dfr[out_cols]
273 plot_dfr.columns = actor_order
274
Javi Merinoc00feff2014-04-14 15:41:51 +0100275 title = normalize_title("Output Power", title)
Javi Merino8ecd8172014-07-03 16:09:01 +0100276
277 ax = pre_plot_setup(width, height)
Javi Merinod6d5f892014-07-03 16:24:23 +0100278 plot_dfr.plot(ax=ax)
Javi Merino8ecd8172014-07-03 16:09:01 +0100279 post_plot_setup(ax, title=title)
Javi Merinocd4a8272014-04-14 15:50:01 +0100280
Javi Merino9fc54852014-05-07 19:06:53 +0100281 def plot_inout_power(self, title="", width=None, height=None):
282 """Make multiple plots showing input and output power for each actor"""
283 dfr = self.get_data_frame()
284
285 actors = []
286 for col in dfr.columns:
287 match = re.match("P(.*)_in", col)
288 if match and col != "Ptot_in":
289 actors.append(match.group(1))
290
291 for actor in actors:
292 cols = ["P" + actor + "_in", "P" + actor + "_out"]
293 this_title = normalize_title(actor, title)
294 dfr[cols].plot(title=this_title)