Primiano Tucci | e36ca63 | 2018-08-21 14:32:23 +0200 | [diff] [blame] | 1 | // Copyright (C) 2018 The Android Open Source Project |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | import '../tracks/all_controller'; |
| 16 | |
| 17 | import {assertExists, assertTrue} from '../base/logging'; |
| 18 | import { |
| 19 | Action, |
| 20 | addChromeSliceTrack, |
| 21 | addTrack, |
| 22 | navigate, |
| 23 | setEngineReady, |
| 24 | setTraceTime, |
| 25 | setVisibleTraceTime, |
| 26 | updateStatus |
| 27 | } from '../common/actions'; |
| 28 | import {TimeSpan} from '../common/time'; |
| 29 | import {QuantizedLoad, ThreadDesc} from '../frontend/globals'; |
| 30 | import {SLICE_TRACK_KIND} from '../tracks/chrome_slices/common'; |
| 31 | import {CPU_SLICE_TRACK_KIND} from '../tracks/cpu_slices/common'; |
| 32 | |
| 33 | import {Child, Children, Controller} from './controller'; |
| 34 | import {Engine} from './engine'; |
| 35 | import {globals} from './globals'; |
| 36 | import {QueryController, QueryControllerArgs} from './query_controller'; |
| 37 | import {TrackControllerArgs, trackControllerRegistry} from './track_controller'; |
| 38 | |
| 39 | type States = 'init'|'loading_trace'|'ready'; |
| 40 | |
| 41 | |
Primiano Tucci | 7e33029 | 2018-08-24 19:10:52 +0200 | [diff] [blame] | 42 | declare interface FileReaderSync { readAsArrayBuffer(blob: Blob): ArrayBuffer; } |
| 43 | |
| 44 | declare var FileReaderSync: |
| 45 | {prototype: FileReaderSync; new (): FileReaderSync;}; |
| 46 | |
Primiano Tucci | e36ca63 | 2018-08-21 14:32:23 +0200 | [diff] [blame] | 47 | // TraceController handles handshakes with the frontend for everything that |
| 48 | // concerns a single trace. It owns the WASM trace processor engine, handles |
| 49 | // tracks data and SQL queries. There is one TraceController instance for each |
| 50 | // trace opened in the UI (for now only one trace is supported). |
| 51 | export class TraceController extends Controller<States> { |
| 52 | private readonly engineId: string; |
| 53 | private engine?: Engine; |
| 54 | |
| 55 | constructor(engineId: string) { |
| 56 | super('init'); |
| 57 | this.engineId = engineId; |
| 58 | } |
| 59 | |
Primiano Tucci | 7e33029 | 2018-08-24 19:10:52 +0200 | [diff] [blame] | 60 | onDestroy() { |
| 61 | if (this.engine !== undefined) globals.destroyEngine(this.engine.id); |
| 62 | } |
| 63 | |
Primiano Tucci | e36ca63 | 2018-08-21 14:32:23 +0200 | [diff] [blame] | 64 | run() { |
| 65 | const engineCfg = assertExists(globals.state.engines[this.engineId]); |
| 66 | switch (this.state) { |
| 67 | case 'init': |
| 68 | globals.dispatch(setEngineReady(this.engineId, false)); |
| 69 | this.loadTrace().then(() => { |
| 70 | globals.dispatch(setEngineReady(this.engineId, true)); |
| 71 | }); |
| 72 | globals.dispatch(updateStatus('Opening trace')); |
| 73 | this.setState('loading_trace'); |
| 74 | break; |
| 75 | |
| 76 | case 'loading_trace': |
| 77 | // Stay in this state until loadTrace() returns and marks the engine as |
| 78 | // ready. |
| 79 | if (this.engine === undefined || !engineCfg.ready) return; |
| 80 | this.setState('ready'); |
| 81 | break; |
| 82 | |
| 83 | case 'ready': |
| 84 | // At this point we are ready to serve queries and handle tracks. |
| 85 | const engine = assertExists(this.engine); |
| 86 | assertTrue(engineCfg.ready); |
| 87 | const childControllers: Children = []; |
| 88 | |
| 89 | // Create a TrackController for each track. |
| 90 | for (const trackId of Object.keys(globals.state.tracks)) { |
| 91 | const trackCfg = globals.state.tracks[trackId]; |
| 92 | if (trackCfg.engineId !== this.engineId) continue; |
| 93 | if (!trackControllerRegistry.has(trackCfg.kind)) continue; |
| 94 | const trackCtlFactory = trackControllerRegistry.get(trackCfg.kind); |
| 95 | const trackArgs: TrackControllerArgs = {trackId, engine}; |
| 96 | childControllers.push(Child(trackId, trackCtlFactory, trackArgs)); |
| 97 | } |
| 98 | |
| 99 | // Create a QueryController for each query. |
| 100 | for (const queryId of Object.keys(globals.state.queries)) { |
| 101 | const queryArgs: QueryControllerArgs = {queryId, engine}; |
| 102 | childControllers.push(Child(queryId, QueryController, queryArgs)); |
| 103 | } |
| 104 | |
| 105 | return childControllers; |
| 106 | |
| 107 | default: |
| 108 | throw new Error(`unknown state ${this.state}`); |
| 109 | } |
| 110 | return; |
| 111 | } |
| 112 | |
| 113 | private async loadTrace() { |
Primiano Tucci | 7e33029 | 2018-08-24 19:10:52 +0200 | [diff] [blame] | 114 | globals.dispatch(updateStatus('Creating trace processor')); |
Primiano Tucci | e36ca63 | 2018-08-21 14:32:23 +0200 | [diff] [blame] | 115 | const engineCfg = assertExists(globals.state.engines[this.engineId]); |
Primiano Tucci | 7e33029 | 2018-08-24 19:10:52 +0200 | [diff] [blame] | 116 | this.engine = await globals.createEngine(); |
| 117 | |
| 118 | const statusHeader = 'Opening trace'; |
Primiano Tucci | e36ca63 | 2018-08-21 14:32:23 +0200 | [diff] [blame] | 119 | if (engineCfg.source instanceof File) { |
Primiano Tucci | 7e33029 | 2018-08-24 19:10:52 +0200 | [diff] [blame] | 120 | const blob = engineCfg.source as Blob; |
| 121 | const reader = new FileReaderSync(); |
| 122 | const SLICE_SIZE = 1024 * 1024; |
| 123 | for (let off = 0; off < blob.size; off += SLICE_SIZE) { |
| 124 | const slice = blob.slice(off, off + SLICE_SIZE); |
| 125 | const arrBuf = reader.readAsArrayBuffer(slice); |
| 126 | await this.engine.parse(new Uint8Array(arrBuf)); |
| 127 | const progress = Math.round((off + slice.size) / blob.size * 100); |
| 128 | globals.dispatch(updateStatus(`${statusHeader} ${progress} %`)); |
| 129 | } |
Primiano Tucci | e36ca63 | 2018-08-21 14:32:23 +0200 | [diff] [blame] | 130 | } else { |
Primiano Tucci | 7e33029 | 2018-08-24 19:10:52 +0200 | [diff] [blame] | 131 | const resp = await fetch(engineCfg.source); |
| 132 | if (resp.status !== 200) { |
| 133 | globals.dispatch(updateStatus(`HTTP error ${resp.status}`)); |
| 134 | throw new Error(`fetch() failed with HTTP error ${resp.status}`); |
| 135 | } |
| 136 | // tslint:disable-next-line no-any |
| 137 | const rd = (resp.body as any).getReader() as ReadableStreamReader; |
| 138 | const tStartMs = performance.now(); |
| 139 | let tLastUpdateMs = 0; |
| 140 | for (let off = 0;;) { |
| 141 | const readRes = await rd.read() as {value: Uint8Array, done: boolean}; |
| 142 | if (readRes.value !== undefined) { |
| 143 | off += readRes.value.length; |
| 144 | await this.engine.parse(readRes.value); |
| 145 | } |
| 146 | // For traces loaded from the network there doesn't seem to be a |
| 147 | // reliable way to compute the %. The content-length exposed by GCS is |
| 148 | // before compression (which is handled transparently by the browser). |
| 149 | const nowMs = performance.now(); |
| 150 | if (nowMs - tLastUpdateMs > 100) { |
| 151 | tLastUpdateMs = nowMs; |
| 152 | const mb = off / 1e6; |
| 153 | const tElapsed = (nowMs - tStartMs) / 1e3; |
| 154 | let status = `${statusHeader} ${mb.toFixed(1)} MB `; |
| 155 | status += `(${(mb / tElapsed).toFixed(1)} MB/s)`; |
| 156 | globals.dispatch(updateStatus(status)); |
| 157 | } |
| 158 | if (readRes.done) break; |
| 159 | } |
Primiano Tucci | e36ca63 | 2018-08-21 14:32:23 +0200 | [diff] [blame] | 160 | } |
| 161 | |
Hector Dearman | 1d28921 | 2018-09-05 14:05:29 +0100 | [diff] [blame^] | 162 | await this.engine.notifyEof(); |
| 163 | |
Primiano Tucci | e36ca63 | 2018-08-21 14:32:23 +0200 | [diff] [blame] | 164 | const traceTime = await this.engine.getTraceTimeBounds(); |
| 165 | const actions = [ |
| 166 | setTraceTime(traceTime), |
| 167 | navigate('/viewer'), |
| 168 | ]; |
| 169 | |
| 170 | if (globals.state.visibleTraceTime.lastUpdate === 0) { |
| 171 | actions.push(setVisibleTraceTime(traceTime)); |
| 172 | } |
| 173 | |
| 174 | globals.dispatchMultiple(actions); |
| 175 | |
| 176 | await this.listTracks(); |
| 177 | await this.listThreads(); |
| 178 | await this.loadTimelineOverview(traceTime); |
| 179 | } |
| 180 | |
| 181 | private async listTracks() { |
| 182 | globals.dispatch(updateStatus('Loading tracks')); |
| 183 | const engine = assertExists<Engine>(this.engine); |
| 184 | const addToTrackActions: Action[] = []; |
| 185 | const numCpus = await engine.getNumberOfCpus(); |
| 186 | for (let cpu = 0; cpu < numCpus; cpu++) { |
| 187 | addToTrackActions.push( |
| 188 | addTrack(this.engineId, CPU_SLICE_TRACK_KIND, cpu)); |
| 189 | } |
| 190 | |
| 191 | const threadQuery = await engine.rawQuery({ |
| 192 | sqlQuery: 'select upid, utid, tid, thread.name, max(slices.depth) ' + |
| 193 | 'from thread inner join slices using(utid) group by utid' |
| 194 | }); |
| 195 | for (let i = 0; i < threadQuery.numRecords; i++) { |
| 196 | const upid = threadQuery.columns[0].longValues![i]; |
| 197 | const utid = threadQuery.columns[1].longValues![i]; |
| 198 | const threadId = threadQuery.columns[2].longValues![i]; |
| 199 | let threadName = threadQuery.columns[3].stringValues![i]; |
| 200 | threadName += `[${threadId}]`; |
| 201 | const maxDepth = threadQuery.columns[4].longValues![i]; |
| 202 | addToTrackActions.push(addChromeSliceTrack( |
| 203 | this.engineId, |
| 204 | SLICE_TRACK_KIND, |
| 205 | upid as number, |
| 206 | utid as number, |
| 207 | threadName, |
| 208 | maxDepth as number)); |
| 209 | } |
| 210 | globals.dispatchMultiple(addToTrackActions); |
| 211 | } |
| 212 | |
| 213 | private async listThreads() { |
| 214 | globals.dispatch(updateStatus('Reading thread list')); |
| 215 | const sqlQuery = 'select utid, tid, pid, thread.name, process.name ' + |
| 216 | 'from thread inner join process using(upid)'; |
| 217 | const threadRows = await assertExists(this.engine).rawQuery({sqlQuery}); |
| 218 | const threads: ThreadDesc[] = []; |
| 219 | for (let i = 0; i < threadRows.numRecords; i++) { |
| 220 | const utid = threadRows.columns[0].longValues![i] as number; |
| 221 | const tid = threadRows.columns[1].longValues![i] as number; |
| 222 | const pid = threadRows.columns[2].longValues![i] as number; |
| 223 | const threadName = threadRows.columns[3].stringValues![i]; |
| 224 | const procName = threadRows.columns[4].stringValues![i]; |
| 225 | threads.push({utid, tid, threadName, pid, procName}); |
| 226 | } // for (record ...) |
| 227 | globals.publish('Threads', threads); |
| 228 | } |
| 229 | |
| 230 | private async loadTimelineOverview(traceTime: TimeSpan) { |
| 231 | const engine = assertExists<Engine>(this.engine); |
| 232 | const numSteps = 100; |
| 233 | const stepSec = traceTime.duration / numSteps; |
| 234 | for (let step = 0; step < numSteps; step++) { |
| 235 | globals.dispatch(updateStatus( |
| 236 | 'Loading overview ' + |
| 237 | `${Math.round((step + 1) / numSteps * 1000) / 10}%`)); |
| 238 | const startSec = traceTime.start + step * stepSec; |
| 239 | const startNs = Math.floor(startSec * 1e9); |
| 240 | const endSec = startSec + stepSec; |
| 241 | const endNs = Math.ceil(endSec * 1e9); |
| 242 | |
| 243 | // Sched overview. |
| 244 | const schedRows = await engine.rawQuery({ |
| 245 | sqlQuery: `select sum(dur)/${stepSec}/1e9, cpu from sched ` + |
| 246 | `where ts >= ${startNs} and ts < ${endNs} ` + |
| 247 | 'group by cpu order by cpu' |
| 248 | }); |
| 249 | const schedData: {[key: string]: QuantizedLoad} = {}; |
| 250 | for (let i = 0; i < schedRows.numRecords; i++) { |
| 251 | const load = schedRows.columns[0].doubleValues![i]; |
| 252 | const cpu = schedRows.columns[1].longValues![i] as number; |
| 253 | schedData[cpu] = {startSec, endSec, load}; |
| 254 | } // for (record ...) |
| 255 | globals.publish('OverviewData', schedData); |
| 256 | |
| 257 | // Slices overview. |
| 258 | const slicesRows = await engine.rawQuery({ |
| 259 | sqlQuery: |
| 260 | `select sum(dur)/${stepSec}/1e9, process.name, process.pid, upid ` + |
| 261 | 'from slices inner join thread using(utid) ' + |
| 262 | 'inner join process using(upid) where depth = 0 ' + |
| 263 | `and ts >= ${startNs} and ts < ${endNs} ` + |
| 264 | 'group by upid' |
| 265 | }); |
| 266 | const slicesData: {[key: string]: QuantizedLoad} = {}; |
| 267 | for (let i = 0; i < slicesRows.numRecords; i++) { |
| 268 | const load = slicesRows.columns[0].doubleValues![i]; |
| 269 | let procName = slicesRows.columns[1].stringValues![i]; |
| 270 | const pid = slicesRows.columns[2].longValues![i]; |
| 271 | procName += ` [${pid}]`; |
| 272 | slicesData[procName] = {startSec, endSec, load}; |
| 273 | } |
| 274 | globals.publish('OverviewData', slicesData); |
| 275 | } // for (step ...) |
| 276 | } |
| 277 | } |