blob: febea2cbd8451c185bf4871ace5af1fdc34f2cca [file] [log] [blame]
lakshmana5d24e0b2017-08-14 14:18:43 -07001// Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "perf_parser.h"
6
7#include <fcntl.h>
8#include <stdint.h>
9#include <stdio.h>
10#include <sys/stat.h>
11#include <sys/types.h>
12#include <unistd.h>
lakshmanad825e1e2017-11-03 10:26:31 -070013#include <algorithm>
lakshmana5d24e0b2017-08-14 14:18:43 -070014
15#include <memory>
16#include <set>
17#include <sstream>
18
19#include "base/logging.h"
20
21#include "address_mapper.h"
22#include "binary_data_utils.h"
23#include "compat/proto.h"
24#include "compat/string.h"
25#include "dso.h"
26#include "huge_page_deducer.h"
27
28namespace quipper {
29
30using BranchStackEntry = PerfDataProto_BranchStackEntry;
31using CommEvent = PerfDataProto_CommEvent;
32using ForkEvent = PerfDataProto_ForkEvent;
33using MMapEvent = PerfDataProto_MMapEvent;
lakshmana5d24e0b2017-08-14 14:18:43 -070034using SampleEvent = PerfDataProto_SampleEvent;
35
36namespace {
37
38// MMAPs are aligned to pages of this many bytes.
39const uint64_t kMmapPageAlignment = sysconf(_SC_PAGESIZE);
40
41// Name and ID of the kernel swapper process.
42const char kSwapperCommandName[] = "swapper";
43const uint32_t kSwapperPid = 0;
44
45// Returns the offset within a page of size |kMmapPageAlignment|, given an
46// address. Requires that |kMmapPageAlignment| be a power of 2.
47uint64_t GetPageAlignedOffset(uint64_t addr) {
48 return addr % kMmapPageAlignment;
49}
50
51bool IsNullBranchStackEntry(const BranchStackEntry& entry) {
52 return (!entry.from_ip() && !entry.to_ip());
53}
54
55} // namespace
56
57PerfParser::PerfParser(PerfReader* reader) : reader_(reader) {}
58
59PerfParser::~PerfParser() {}
60
61PerfParser::PerfParser(PerfReader* reader, const PerfParserOptions& options)
lakshmana3fa0a5f2018-02-08 12:20:56 -080062 : reader_(reader), options_(options) {}
lakshmana5d24e0b2017-08-14 14:18:43 -070063
64bool PerfParser::ParseRawEvents() {
65 if (options_.sort_events_by_time) {
66 reader_->MaybeSortEventsByTime();
67 }
68
69 // Just in case there was data from a previous call.
70 process_mappers_.clear();
71
ckennelly3a88ebd2017-08-31 20:46:41 +020072 // Find huge page mappings.
73 if (options_.deduce_huge_page_mappings) {
vlankhaara6b8f1a2018-02-13 09:29:24 -080074 DeduceHugePages(reader_->mutable_events());
ckennelly3a88ebd2017-08-31 20:46:41 +020075 }
76
77 // Combine split mappings. Because the remapping process makes addresses
78 // contiguous, we cannot try to combine mappings in these situations (as we
79 // collapse maps that were non-contiguous).
80 if (options_.combine_mappings && !options_.do_remap) {
81 CombineMappings(reader_->mutable_events());
lakshmana5d24e0b2017-08-14 14:18:43 -070082 }
83
84 // Clear the parsed events to reset their fields. Otherwise, non-sample events
85 // may have residual DSO+offset info.
86 parsed_events_.clear();
87
88 // Events of type PERF_RECORD_FINISHED_ROUND don't have a timestamp, and are
89 // not needed.
90 // use the partial-sorting of events between rounds to sort faster.
91 parsed_events_.resize(reader_->events().size());
92 size_t write_index = 0;
93 for (int i = 0; i < reader_->events().size(); ++i) {
94 if (reader_->events().Get(i).header().type() == PERF_RECORD_FINISHED_ROUND)
95 continue;
96 parsed_events_[write_index++].event_ptr =
97 reader_->mutable_events()->Mutable(i);
98 }
99 parsed_events_.resize(write_index);
100
101 ProcessEvents();
102
lakshmana3fa0a5f2018-02-08 12:20:56 -0800103 if (!options_.discard_unused_events) return true;
lakshmana5d24e0b2017-08-14 14:18:43 -0700104
105 // Some MMAP/MMAP2 events' mapped regions will not have any samples. These
106 // MMAP/MMAP2 events should be dropped. |parsed_events_| should be
107 // reconstructed without these events.
108 write_index = 0;
109 size_t read_index;
110 for (read_index = 0; read_index < parsed_events_.size(); ++read_index) {
111 const ParsedEvent& event = parsed_events_[read_index];
112 if (event.event_ptr->has_mmap_event() &&
113 event.num_samples_in_mmap_region == 0) {
114 continue;
115 }
lakshmana3fa0a5f2018-02-08 12:20:56 -0800116 if (read_index != write_index) parsed_events_[write_index] = event;
lakshmana5d24e0b2017-08-14 14:18:43 -0700117 ++write_index;
118 }
119 CHECK_LE(write_index, parsed_events_.size());
120 parsed_events_.resize(write_index);
121
122 // Update the events in |reader_| to match the updated events.
123 UpdatePerfEventsFromParsedEvents();
124
125 return true;
126}
127
lannadorai91a5cde2018-01-18 09:38:44 -0800128bool PerfParser::ProcessUserEvents(PerfEvent& event) {
129 // New user events from PERF-4.13 is not yet supported
lakshmana98ed13a2018-02-22 17:48:32 -0800130 switch (event.header().type()) {
131 case PERF_RECORD_AUXTRACE:
132 VLOG(1) << "Parsed event type: " << event.header().type()
133 << ". Doing nothing.";
134 break;
135 default:
136 VLOG(1) << "Unsupported event type: " << event.header().type();
137 break;
138 }
lannadorai91a5cde2018-01-18 09:38:44 -0800139 return true;
140}
141
lakshmana5d24e0b2017-08-14 14:18:43 -0700142bool PerfParser::ProcessEvents() {
143 stats_ = {0};
144
lakshmana3fa0a5f2018-02-08 12:20:56 -0800145 stats_.did_remap = false; // Explicitly clear the remap flag.
lakshmana5d24e0b2017-08-14 14:18:43 -0700146
147 // Pid 0 is called the swapper process. Even though perf does not record a
148 // COMM event for pid 0, we act like we did receive a COMM event for it. Perf
149 // does this itself, example:
150 // http://lxr.free-electrons.com/source/tools/perf/util/session.c#L1120
151 commands_.insert(kSwapperCommandName);
152 pidtid_to_comm_map_[std::make_pair(kSwapperPid, kSwapperPid)] =
153 &(*commands_.find(kSwapperCommandName));
154
155 // NB: Not necessarily actually sorted by time.
156 for (size_t i = 0; i < parsed_events_.size(); ++i) {
157 ParsedEvent& parsed_event = parsed_events_[i];
158 PerfEvent& event = *parsed_event.event_ptr;
lannadorai91a5cde2018-01-18 09:38:44 -0800159
160 // Process user events
161 if (event.header().type() >= PERF_RECORD_USER_TYPE_START) {
162 if (!ProcessUserEvents(event)) {
163 return false;
164 }
165 continue;
166 }
167
lakshmana5d24e0b2017-08-14 14:18:43 -0700168 switch (event.header().type()) {
169 case PERF_RECORD_SAMPLE:
170 // SAMPLE doesn't have any fields to log at a fixed,
171 // previously-endian-swapped location. This used to log ip.
172 VLOG(1) << "SAMPLE";
173 ++stats_.num_sample_events;
lakshmana3fa0a5f2018-02-08 12:20:56 -0800174 if (MapSampleEvent(&parsed_event)) ++stats_.num_sample_events_mapped;
lakshmana5d24e0b2017-08-14 14:18:43 -0700175 break;
176 case PERF_RECORD_MMAP:
lakshmana3fa0a5f2018-02-08 12:20:56 -0800177 case PERF_RECORD_MMAP2: {
lakshmana5d24e0b2017-08-14 14:18:43 -0700178 const char* mmap_type_name =
179 event.header().type() == PERF_RECORD_MMAP ? "MMAP" : "MMAP2";
180 VLOG(1) << mmap_type_name << ": " << event.mmap_event().filename();
181 ++stats_.num_mmap_events;
182 // Use the array index of the current mmap event as a unique identifier.
183 CHECK(MapMmapEvent(event.mutable_mmap_event(), i))
184 << "Unable to map " << mmap_type_name << " event!";
185 // No samples in this MMAP region yet, hopefully.
186 parsed_event.num_samples_in_mmap_region = 0;
187 DSOInfo dso_info;
188 dso_info.name = event.mmap_event().filename();
189 if (event.header().type() == PERF_RECORD_MMAP2) {
190 dso_info.maj = event.mmap_event().maj();
191 dso_info.min = event.mmap_event().min();
192 dso_info.ino = event.mmap_event().ino();
193 }
194 name_to_dso_.emplace(dso_info.name, dso_info);
195 break;
196 }
197 case PERF_RECORD_FORK:
lakshmana3fa0a5f2018-02-08 12:20:56 -0800198 // clang-format off
lakshmana5d24e0b2017-08-14 14:18:43 -0700199 VLOG(1) << "FORK: " << event.fork_event().ppid()
200 << ":" << event.fork_event().ptid()
201 << " -> " << event.fork_event().pid()
202 << ":" << event.fork_event().tid();
lakshmana3fa0a5f2018-02-08 12:20:56 -0800203 // clang-format on
lakshmana5d24e0b2017-08-14 14:18:43 -0700204 ++stats_.num_fork_events;
205 CHECK(MapForkEvent(event.fork_event())) << "Unable to map FORK event!";
206 break;
207 case PERF_RECORD_EXIT:
208 // EXIT events have the same structure as FORK events.
lakshmana3fa0a5f2018-02-08 12:20:56 -0800209 // clang-format off
lakshmana5d24e0b2017-08-14 14:18:43 -0700210 VLOG(1) << "EXIT: " << event.fork_event().ppid()
211 << ":" << event.fork_event().ptid();
lakshmana3fa0a5f2018-02-08 12:20:56 -0800212 // clang-format on
lakshmana5d24e0b2017-08-14 14:18:43 -0700213 ++stats_.num_exit_events;
214 break;
215 case PERF_RECORD_COMM:
216 {
lakshmana3fa0a5f2018-02-08 12:20:56 -0800217 // clang-format off
lakshmana5d24e0b2017-08-14 14:18:43 -0700218 VLOG(1) << "COMM: " << event.comm_event().pid()
219 << ":" << event.comm_event().tid() << ": "
220 << event.comm_event().comm();
lakshmana3fa0a5f2018-02-08 12:20:56 -0800221 // clang-format on
lakshmana5d24e0b2017-08-14 14:18:43 -0700222 ++stats_.num_comm_events;
223 CHECK(MapCommEvent(event.comm_event()));
224 commands_.insert(event.comm_event().comm());
lakshmana3fa0a5f2018-02-08 12:20:56 -0800225 const PidTid pidtid =
226 std::make_pair(event.comm_event().pid(), event.comm_event().tid());
lakshmana5d24e0b2017-08-14 14:18:43 -0700227 pidtid_to_comm_map_[pidtid] =
228 &(*commands_.find(event.comm_event().comm()));
229 break;
230 }
231 case PERF_RECORD_LOST:
232 case PERF_RECORD_THROTTLE:
233 case PERF_RECORD_UNTHROTTLE:
234 case PERF_RECORD_READ:
lakshmana98ed13a2018-02-22 17:48:32 -0800235 case PERF_RECORD_AUX:
lakshmana5d24e0b2017-08-14 14:18:43 -0700236 VLOG(1) << "Parsed event type: " << event.header().type()
237 << ". Doing nothing.";
238 break;
lannadorai91a5cde2018-01-18 09:38:44 -0800239 case PERF_RECORD_ITRACE_START:
240 case PERF_RECORD_LOST_SAMPLES:
241 case PERF_RECORD_SWITCH:
242 case PERF_RECORD_SWITCH_CPU_WIDE:
243 case PERF_RECORD_NAMESPACES:
244 VLOG(1) << "Parsed event type: " << event.header().type()
245 << ". Not yet supported.";
246 break;
lakshmana5d24e0b2017-08-14 14:18:43 -0700247 default:
248 LOG(ERROR) << "Unknown event type: " << event.header().type();
249 return false;
250 }
251 }
lakshmana3fa0a5f2018-02-08 12:20:56 -0800252 if (!FillInDsoBuildIds()) return false;
lakshmana5d24e0b2017-08-14 14:18:43 -0700253
254 // Print stats collected from parsing.
lakshmana3fa0a5f2018-02-08 12:20:56 -0800255 // clang-format off
Andreas Gampe77162c92018-03-15 20:42:10 -0700256 VLOG(1) << "Parser processed: "
lakshmana5d24e0b2017-08-14 14:18:43 -0700257 << stats_.num_mmap_events << " MMAP/MMAP2 events, "
258 << stats_.num_comm_events << " COMM events, "
259 << stats_.num_fork_events << " FORK events, "
260 << stats_.num_exit_events << " EXIT events, "
261 << stats_.num_sample_events << " SAMPLE events, "
262 << stats_.num_sample_events_mapped << " of these were mapped";
lakshmana3fa0a5f2018-02-08 12:20:56 -0800263 // clang-format on
lakshmana5d24e0b2017-08-14 14:18:43 -0700264
265 float sample_mapping_percentage =
266 static_cast<float>(stats_.num_sample_events_mapped) /
267 stats_.num_sample_events * 100.;
268 float threshold = options_.sample_mapping_percentage_threshold;
269 if (sample_mapping_percentage < threshold) {
270 LOG(ERROR) << "Mapped " << static_cast<int>(sample_mapping_percentage)
271 << "% of samples, expected at least "
272 << static_cast<int>(threshold) << "%";
273 return false;
274 }
275 stats_.did_remap = options_.do_remap;
276 return true;
277}
278
279namespace {
280
281class FdCloser {
282 public:
283 explicit FdCloser(int fd) : fd_(fd) {}
lakshmana3fa0a5f2018-02-08 12:20:56 -0800284 ~FdCloser() {
285 if (fd_ != -1) close(fd_);
286 }
287
lakshmana5d24e0b2017-08-14 14:18:43 -0700288 private:
289 FdCloser() = delete;
290 FdCloser(FdCloser&) = delete;
291
292 int fd_;
293};
294
lakshmanad825e1e2017-11-03 10:26:31 -0700295// Merges two uint32_t into a uint64_t for hashing in an unordered_set because
296// there is no default hash method for a pair.
297uint64_t mergeTwoU32(uint32_t first, uint32_t second) {
298 return (uint64_t)first << 32 | second;
299}
300
301// Splits a given uint64_t into two uint32_t. This reverts the above merge
302// operation to retrieve the two uint32_t from an unordered_set.
303std::pair<uint32_t, uint32_t> splitU64(uint64_t value) {
304 return std::make_pair(value >> 32,
305 std::numeric_limits<uint32_t>::max() & value);
306}
307
lakshmana5d24e0b2017-08-14 14:18:43 -0700308bool ReadElfBuildIdIfSameInode(const string& dso_path, const DSOInfo& dso,
309 string* buildid) {
310 int fd = open(dso_path.c_str(), O_RDONLY);
311 FdCloser fd_closer(fd);
312 if (fd == -1) {
lakshmana3fa0a5f2018-02-08 12:20:56 -0800313 if (errno != ENOENT) LOG(ERROR) << "Failed to open ELF file: " << dso_path;
lakshmana5d24e0b2017-08-14 14:18:43 -0700314 return false;
315 }
316
317 struct stat s;
318 CHECK_GE(fstat(fd, &s), 0);
319 // Only reject based on inode if we actually have device info (from MMAP2).
lakshmana3fa0a5f2018-02-08 12:20:56 -0800320 if (dso.maj != 0 && dso.min != 0 && !SameInode(dso, &s)) return false;
lakshmana5d24e0b2017-08-14 14:18:43 -0700321
322 return ReadElfBuildId(fd, buildid);
323}
324
325// Looks up build ID of a given DSO by reading directly from the file system.
326// - Does not support reading build ID of the main kernel binary.
327// - Reads build IDs of kernel modules and other DSOs using functions in dso.h.
328string FindDsoBuildId(const DSOInfo& dso_info) {
329 string buildid_bin;
330 const string& dso_name = dso_info.name;
lakshmana3fa0a5f2018-02-08 12:20:56 -0800331 if (IsKernelNonModuleName(dso_name)) return buildid_bin; // still empty
lakshmana5d24e0b2017-08-14 14:18:43 -0700332 // Does this look like a kernel module?
333 if (dso_name.size() >= 2 && dso_name[0] == '[' && dso_name.back() == ']') {
334 // This may not be successful, but either way, just return. buildid_bin
335 // will be empty if the module was not found.
lakshmana3fa0a5f2018-02-08 12:20:56 -0800336 ReadModuleBuildId(dso_name.substr(1, dso_name.size() - 2), &buildid_bin);
lakshmana5d24e0b2017-08-14 14:18:43 -0700337 return buildid_bin;
338 }
339 // Try normal files, possibly inside containers.
340 u32 last_pid = 0;
lakshmanad825e1e2017-11-03 10:26:31 -0700341 std::vector<uint64_t> threads(dso_info.threads.begin(),
342 dso_info.threads.end());
343 std::sort(threads.begin(), threads.end());
344 for (auto pidtid_it : threads) {
345 uint32_t pid, tid;
346 std::tie(pid, tid) = splitU64(pidtid_it);
lakshmana5d24e0b2017-08-14 14:18:43 -0700347 std::stringstream dso_path_stream;
348 dso_path_stream << "/proc/" << tid << "/root/" << dso_name;
349 string dso_path = dso_path_stream.str();
350 if (ReadElfBuildIdIfSameInode(dso_path, dso_info, &buildid_bin)) {
351 return buildid_bin;
352 }
353 // Avoid re-trying the parent process if it's the same for multiple threads.
354 // dso_info.threads is sorted, so threads in a process should be adjacent.
lakshmana3fa0a5f2018-02-08 12:20:56 -0800355 if (pid == last_pid || pid == tid) continue;
lakshmana5d24e0b2017-08-14 14:18:43 -0700356 last_pid = pid;
357 // Try the parent process:
358 std::stringstream parent_dso_path_stream;
359 parent_dso_path_stream << "/proc/" << pid << "/root/" << dso_name;
360 string parent_dso_path = parent_dso_path_stream.str();
361 if (ReadElfBuildIdIfSameInode(parent_dso_path, dso_info, &buildid_bin)) {
362 return buildid_bin;
363 }
364 }
365 // Still don't have a buildid. Try our own filesystem:
366 if (ReadElfBuildIdIfSameInode(dso_name, dso_info, &buildid_bin)) {
367 return buildid_bin;
368 }
369 return buildid_bin; // still empty.
370}
371
372} // namespace
373
374bool PerfParser::FillInDsoBuildIds() {
375 std::map<string, string> filenames_to_build_ids;
376 reader_->GetFilenamesToBuildIDs(&filenames_to_build_ids);
377
378 std::map<string, string> new_buildids;
379
380 for (std::pair<const string, DSOInfo>& kv : name_to_dso_) {
381 DSOInfo& dso_info = kv.second;
382 const auto it = filenames_to_build_ids.find(dso_info.name);
383 if (it != filenames_to_build_ids.end()) {
384 dso_info.build_id = it->second;
385 }
386 // If there is both an existing build ID and a new build ID returned by
387 // FindDsoBuildId(), overwrite the existing build ID.
388 if (options_.read_missing_buildids && dso_info.hit) {
389 string buildid_bin = FindDsoBuildId(dso_info);
390 if (!buildid_bin.empty()) {
391 dso_info.build_id = RawDataToHexString(buildid_bin);
392 new_buildids[dso_info.name] = dso_info.build_id;
393 }
394 }
395 }
396
lakshmana3fa0a5f2018-02-08 12:20:56 -0800397 if (new_buildids.empty()) return true;
lakshmana5d24e0b2017-08-14 14:18:43 -0700398 return reader_->InjectBuildIDs(new_buildids);
399}
400
401void PerfParser::UpdatePerfEventsFromParsedEvents() {
402 // Reorder the events in |reader_| to match the order of |parsed_events_|.
403 // The |event_ptr|'s in |parsed_events_| are pointers to existing events in
404 // |reader_|.
405 RepeatedPtrField<PerfEvent> new_events;
406 new_events.Reserve(parsed_events_.size());
407 for (ParsedEvent& parsed_event : parsed_events_) {
408 PerfEvent* new_event = new_events.Add();
409 new_event->Swap(parsed_event.event_ptr);
410 parsed_event.event_ptr = new_event;
411 }
412
413 reader_->mutable_events()->Swap(&new_events);
414}
415
416bool PerfParser::MapSampleEvent(ParsedEvent* parsed_event) {
417 bool mapping_failed = false;
418
419 const PerfEvent& event = *parsed_event->event_ptr;
420 if (!event.has_sample_event() ||
lakshmana3fa0a5f2018-02-08 12:20:56 -0800421 !(event.sample_event().has_ip() && event.sample_event().has_pid() &&
lakshmana5d24e0b2017-08-14 14:18:43 -0700422 event.sample_event().has_tid())) {
423 return false;
424 }
425 SampleEvent& sample_info = *parsed_event->event_ptr->mutable_sample_event();
426
427 // Find the associated command.
428 PidTid pidtid = std::make_pair(sample_info.pid(), sample_info.tid());
429 const auto comm_iter = pidtid_to_comm_map_.find(pidtid);
430 if (comm_iter != pidtid_to_comm_map_.end())
431 parsed_event->set_command(comm_iter->second);
432
433 const uint64_t unmapped_event_ip = sample_info.ip();
434 uint64_t remapped_event_ip = 0;
435
436 // Map the event IP itself.
lakshmana3fa0a5f2018-02-08 12:20:56 -0800437 if (!MapIPAndPidAndGetNameAndOffset(sample_info.ip(), pidtid,
lakshmana5d24e0b2017-08-14 14:18:43 -0700438 &remapped_event_ip,
439 &parsed_event->dso_and_offset)) {
440 mapping_failed = true;
441 } else {
442 sample_info.set_ip(remapped_event_ip);
443 }
444
445 if (sample_info.callchain_size() &&
lakshmana3fa0a5f2018-02-08 12:20:56 -0800446 !MapCallchain(sample_info.ip(), pidtid, unmapped_event_ip,
447 sample_info.mutable_callchain(), parsed_event)) {
lakshmana5d24e0b2017-08-14 14:18:43 -0700448 mapping_failed = true;
449 }
450
451 if (sample_info.branch_stack_size() &&
lakshmana3fa0a5f2018-02-08 12:20:56 -0800452 !MapBranchStack(pidtid, sample_info.mutable_branch_stack(),
lakshmana5d24e0b2017-08-14 14:18:43 -0700453 parsed_event)) {
454 mapping_failed = true;
455 }
456
457 return !mapping_failed;
458}
459
lakshmana3fa0a5f2018-02-08 12:20:56 -0800460bool PerfParser::MapCallchain(const uint64_t ip, const PidTid pidtid,
lakshmana5d24e0b2017-08-14 14:18:43 -0700461 const uint64_t original_event_addr,
462 RepeatedField<uint64>* callchain,
463 ParsedEvent* parsed_event) {
464 if (!callchain) {
465 LOG(ERROR) << "NULL call stack data.";
466 return false;
467 }
468
469 bool mapping_failed = false;
470
lakshmana3fa0a5f2018-02-08 12:20:56 -0800471 // If the callchain is empty, there is no work to do.
472 if (callchain->empty()) return true;
lakshmana5d24e0b2017-08-14 14:18:43 -0700473
474 // Keeps track of whether the current entry is kernel or user.
475 parsed_event->callchain.resize(callchain->size());
476 int num_entries_mapped = 0;
477 for (int i = 0; i < callchain->size(); ++i) {
478 uint64_t entry = callchain->Get(i);
479 // When a callchain context entry is found, do not attempt to symbolize it.
480 if (entry >= PERF_CONTEXT_MAX) {
481 continue;
482 }
483 // The sample address has already been mapped so no need to map it.
484 if (entry == original_event_addr) {
485 callchain->Set(i, ip);
486 continue;
487 }
488 uint64_t mapped_addr = 0;
489 if (!MapIPAndPidAndGetNameAndOffset(
lakshmana3fa0a5f2018-02-08 12:20:56 -0800490 entry, pidtid, &mapped_addr,
lakshmana5d24e0b2017-08-14 14:18:43 -0700491 &parsed_event->callchain[num_entries_mapped++])) {
492 mapping_failed = true;
493 } else {
494 callchain->Set(i, mapped_addr);
495 }
496 }
497 // Not all the entries were mapped. Trim |parsed_event->callchain| to
498 // remove unused entries at the end.
499 parsed_event->callchain.resize(num_entries_mapped);
500
501 return !mapping_failed;
502}
503
504bool PerfParser::MapBranchStack(
lakshmana3fa0a5f2018-02-08 12:20:56 -0800505 const PidTid pidtid, RepeatedPtrField<BranchStackEntry>* branch_stack,
lakshmana5d24e0b2017-08-14 14:18:43 -0700506 ParsedEvent* parsed_event) {
507 if (!branch_stack) {
508 LOG(ERROR) << "NULL branch stack data.";
509 return false;
510 }
511
512 // First, trim the branch stack to remove trailing null entries.
513 size_t trimmed_size = 0;
514 for (const BranchStackEntry& entry : *branch_stack) {
515 // Count the number of non-null entries before the first null entry.
lakshmana3fa0a5f2018-02-08 12:20:56 -0800516 if (IsNullBranchStackEntry(entry)) break;
lakshmana5d24e0b2017-08-14 14:18:43 -0700517 ++trimmed_size;
518 }
519
520 // If a null entry was found, make sure all subsequent null entries are NULL
521 // as well.
522 for (int i = trimmed_size; i < branch_stack->size(); ++i) {
523 const BranchStackEntry& entry = branch_stack->Get(i);
524 if (!IsNullBranchStackEntry(entry)) {
525 LOG(ERROR) << "Non-null branch stack entry found after null entry: "
526 << reinterpret_cast<void*>(entry.from_ip()) << " -> "
527 << reinterpret_cast<void*>(entry.to_ip());
528 return false;
529 }
530 }
531
532 // Map branch stack addresses.
533 parsed_event->branch_stack.resize(trimmed_size);
534 for (unsigned int i = 0; i < trimmed_size; ++i) {
535 BranchStackEntry* entry = branch_stack->Mutable(i);
536 ParsedEvent::BranchEntry& parsed_entry = parsed_event->branch_stack[i];
537
538 uint64_t from_mapped = 0;
lakshmana3fa0a5f2018-02-08 12:20:56 -0800539 if (!MapIPAndPidAndGetNameAndOffset(entry->from_ip(), pidtid, &from_mapped,
lakshmana5d24e0b2017-08-14 14:18:43 -0700540 &parsed_entry.from)) {
541 return false;
542 }
543 entry->set_from_ip(from_mapped);
544
545 uint64_t to_mapped = 0;
lakshmana3fa0a5f2018-02-08 12:20:56 -0800546 if (!MapIPAndPidAndGetNameAndOffset(entry->to_ip(), pidtid, &to_mapped,
lakshmana5d24e0b2017-08-14 14:18:43 -0700547 &parsed_entry.to)) {
548 return false;
549 }
550 entry->set_to_ip(to_mapped);
551
552 parsed_entry.predicted = !entry->mispredicted();
553 }
554
555 return true;
556}
557
558bool PerfParser::MapIPAndPidAndGetNameAndOffset(
lakshmana3fa0a5f2018-02-08 12:20:56 -0800559 uint64_t ip, PidTid pidtid, uint64_t* new_ip,
lakshmana5d24e0b2017-08-14 14:18:43 -0700560 ParsedEvent::DSOAndOffset* dso_and_offset) {
561 DCHECK(dso_and_offset);
562 // Attempt to find the synthetic address of the IP sample in this order:
563 // 1. Address space of the kernel.
564 // 2. Address space of its own process.
565 // 3. Address space of the parent process.
566
567 uint64_t mapped_addr = 0;
568
569 // Sometimes the first event we see is a SAMPLE event and we don't have the
570 // time to create an address mapper for a process. Example, for pid 0.
571 AddressMapper* mapper = GetOrCreateProcessMapper(pidtid.first).first;
lakshmanad1d18fc2017-11-03 12:33:31 -0700572 AddressMapper::MappingList::const_iterator ip_iter;
573 bool mapped =
574 mapper->GetMappedAddressAndListIterator(ip, &mapped_addr, &ip_iter);
lakshmana5d24e0b2017-08-14 14:18:43 -0700575 if (mapped) {
576 uint64_t id = UINT64_MAX;
lakshmanad1d18fc2017-11-03 12:33:31 -0700577 mapper->GetMappedIDAndOffset(ip, ip_iter, &id, &dso_and_offset->offset_);
lakshmana5d24e0b2017-08-14 14:18:43 -0700578 // Make sure the ID points to a valid event.
579 CHECK_LE(id, parsed_events_.size());
580 ParsedEvent& parsed_event = parsed_events_[id];
581 const auto& event = parsed_event.event_ptr;
582 DCHECK(event->has_mmap_event()) << "Expected MMAP or MMAP2 event";
583
584 // Find the mmap DSO filename in the set of known DSO names.
585 auto dso_iter = name_to_dso_.find(event->mmap_event().filename());
586 CHECK(dso_iter != name_to_dso_.end());
587 dso_and_offset->dso_info_ = &dso_iter->second;
588
589 dso_iter->second.hit = true;
lakshmanad825e1e2017-11-03 10:26:31 -0700590 dso_iter->second.threads.insert(mergeTwoU32(pidtid.first, pidtid.second));
lakshmana5d24e0b2017-08-14 14:18:43 -0700591 ++parsed_event.num_samples_in_mmap_region;
592
593 if (options_.do_remap) {
594 if (GetPageAlignedOffset(mapped_addr) != GetPageAlignedOffset(ip)) {
595 LOG(ERROR) << "Remapped address " << std::hex << mapped_addr << " "
596 << "does not have the same page alignment offset as "
597 << "original address " << ip;
598 return false;
599 }
600 *new_ip = mapped_addr;
601 } else {
602 *new_ip = ip;
603 }
604 }
605 return mapped;
606}
607
608bool PerfParser::MapMmapEvent(PerfDataProto_MMapEvent* event, uint64_t id) {
609 // We need to hide only the real kernel addresses. However, to make things
610 // more secure, and make the mapping idempotent, we should remap all
611 // addresses, both kernel and non-kernel.
612
613 AddressMapper* mapper = GetOrCreateProcessMapper(event->pid()).first;
614
615 uint64_t start = event->start();
616 uint64_t len = event->len();
617 uint64_t pgoff = event->pgoff();
618
619 // |id| == 0 corresponds to the kernel mmap. We have several cases here:
620 //
621 // For ARM and x86, in sudo mode, pgoff == start, example:
622 // start=0x80008200
623 // pgoff=0x80008200
624 // len =0xfffffff7ff7dff
625 //
626 // For x86-64, in sudo mode, pgoff is between start and start + len. SAMPLE
627 // events lie between pgoff and pgoff + length of the real kernel binary,
628 // example:
629 // start=0x3bc00000
630 // pgoff=0xffffffffbcc00198
631 // len =0xffffffff843fffff
632 // SAMPLE events will be found after pgoff. For kernels with ASLR, pgoff will
633 // be something only visible to the root user, and will be randomized at
634 // startup. With |remap| set to true, we should hide pgoff in this case. So we
635 // normalize all SAMPLE events relative to pgoff.
636 //
637 // For non-sudo mode, the kernel will be mapped from 0 to the pointer limit,
638 // example:
639 // start=0x0
640 // pgoff=0x0
641 // len =0xffffffff
642 if (id == 0) {
643 // If pgoff is between start and len, we normalize the event by setting
644 // start to be pgoff just like how it is for ARM and x86. We also set len to
645 // be a much smaller number (closer to the real length of the kernel binary)
646 // because SAMPLEs are actually only seen between |event->pgoff| and
647 // |event->pgoff + kernel text size|.
648 if (pgoff > start && pgoff < start + len) {
649 len = len + start - pgoff;
650 start = pgoff;
651 }
652 // For kernels with ALSR pgoff is critical information that should not be
653 // revealed when |remap| is true.
654 pgoff = 0;
655 }
656
657 if (!mapper->MapWithID(start, len, id, pgoff, true)) {
658 mapper->DumpToLog();
659 return false;
660 }
661
662 if (options_.do_remap) {
663 uint64_t mapped_addr;
lakshmanad1d18fc2017-11-03 12:33:31 -0700664 AddressMapper::MappingList::const_iterator start_iter;
665 if (!mapper->GetMappedAddressAndListIterator(start, &mapped_addr,
666 &start_iter)) {
lakshmana5d24e0b2017-08-14 14:18:43 -0700667 LOG(ERROR) << "Failed to map starting address " << std::hex << start;
668 return false;
669 }
670 if (GetPageAlignedOffset(mapped_addr) != GetPageAlignedOffset(start)) {
671 LOG(ERROR) << "Remapped address " << std::hex << mapped_addr << " "
672 << "does not have the same page alignment offset as start "
673 << "address " << start;
674 return false;
675 }
676
677 event->set_start(mapped_addr);
678 event->set_len(len);
679 event->set_pgoff(pgoff);
680 }
681 return true;
682}
683
684bool PerfParser::MapCommEvent(const PerfDataProto_CommEvent& event) {
685 GetOrCreateProcessMapper(event.pid());
686 return true;
687}
688
689bool PerfParser::MapForkEvent(const PerfDataProto_ForkEvent& event) {
690 PidTid parent = std::make_pair(event.ppid(), event.ptid());
691 PidTid child = std::make_pair(event.pid(), event.tid());
692 if (parent != child) {
693 auto parent_iter = pidtid_to_comm_map_.find(parent);
694 if (parent_iter != pidtid_to_comm_map_.end())
695 pidtid_to_comm_map_[child] = parent_iter->second;
696 }
697
698 const uint32_t pid = event.pid();
699
700 // If the parent and child pids are the same, this is just a new thread
701 // within the same process, so don't do anything.
lakshmana3fa0a5f2018-02-08 12:20:56 -0800702 if (event.ppid() == pid) return true;
lakshmana5d24e0b2017-08-14 14:18:43 -0700703
704 if (!GetOrCreateProcessMapper(pid, event.ppid()).second) {
705 DVLOG(1) << "Found an existing process mapper with pid: " << pid;
706 }
707
708 return true;
709}
710
711std::pair<AddressMapper*, bool> PerfParser::GetOrCreateProcessMapper(
712 uint32_t pid, uint32_t ppid) {
713 const auto& search = process_mappers_.find(pid);
714 if (search != process_mappers_.end()) {
715 return std::make_pair(search->second.get(), false);
716 }
717
718 auto parent_mapper = process_mappers_.find(ppid);
719 // Recent perf implementations (at least as recent as perf 4.4), add an
720 // explicit FORK event from the swapper process to the init process. There may
721 // be no explicit memory mappings created for the swapper process. In such
722 // cases, we must use the mappings from the kernel process, which are used by
723 // default for a new PID in the absence of an explicit FORK event.
724 if (parent_mapper == process_mappers_.end()) {
725 parent_mapper = process_mappers_.find(kKernelPid);
726 }
727 std::unique_ptr<AddressMapper> mapper;
728 if (parent_mapper != process_mappers_.end()) {
729 mapper.reset(new AddressMapper(*parent_mapper->second));
730 } else {
731 mapper.reset(new AddressMapper());
732 mapper->set_page_alignment(kMmapPageAlignment);
733 }
734
735 const auto inserted =
736 process_mappers_.insert(search, std::make_pair(pid, std::move(mapper)));
737 return std::make_pair(inserted->second.get(), true);
738}
739
740} // namespace quipper