blob: 75541b4dcf0d212683816cab84a8469a00dfd45a [file] [log] [blame]
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -08001#!/usr/bin/env python
Sasha Goldshtein50459642016-02-10 08:35:20 -08002#
Sasha Goldshtein0e856f42016-03-21 07:26:52 -07003# memleak Trace and display outstanding allocations to detect
4# memory leaks in user-mode processes and the kernel.
Sasha Goldshtein50459642016-02-10 08:35:20 -08005#
Sasha Goldshtein29e37d92016-02-14 06:56:07 -08006# USAGE: memleak [-h] [-p PID] [-t] [-a] [-o OLDER] [-c COMMAND]
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +03007# [--combined-only] [-s SAMPLE_RATE] [-T TOP] [-z MIN_SIZE]
8# [-Z MAX_SIZE] [-O OBJ]
Sasha Goldshtein0e856f42016-03-21 07:26:52 -07009# [interval] [count]
Sasha Goldshtein50459642016-02-10 08:35:20 -080010#
Sasha Goldshtein43fa0412016-02-10 22:17:26 -080011# Licensed under the Apache License, Version 2.0 (the "License")
Sasha Goldshtein50459642016-02-10 08:35:20 -080012# Copyright (C) 2016 Sasha Goldshtein.
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080013
Sasha Goldshtein49df9942017-02-08 23:22:06 -050014from bcc import BPF
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080015from time import sleep
Sasha Goldshteinc8148c82016-02-09 11:15:41 -080016from datetime import datetime
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080017import argparse
18import subprocess
Sasha Goldshteincfce3112016-02-07 11:09:36 -080019import os
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080020
Vicent Martie25ae032016-03-25 17:14:34 +010021class Allocation(object):
22 def __init__(self, stack, size):
23 self.stack = stack
24 self.count = 1
25 self.size = size
26
27 def update(self, size):
28 self.count += 1
29 self.size += size
Sasha Goldshtein29228612016-02-07 12:20:19 -080030
Sasha Goldshtein751fce52016-02-08 02:57:02 -080031def run_command_get_output(command):
Sasha Goldshtein33522d72016-02-08 03:39:44 -080032 p = subprocess.Popen(command.split(),
33 stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
34 return iter(p.stdout.readline, b'')
Sasha Goldshtein29228612016-02-07 12:20:19 -080035
Sasha Goldshtein751fce52016-02-08 02:57:02 -080036def run_command_get_pid(command):
Sasha Goldshtein33522d72016-02-08 03:39:44 -080037 p = subprocess.Popen(command.split())
38 return p.pid
Sasha Goldshtein751fce52016-02-08 02:57:02 -080039
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080040examples = """
41EXAMPLES:
42
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080043./memleak -p $(pidof allocs)
Sasha Goldshtein33522d72016-02-08 03:39:44 -080044 Trace allocations and display a summary of "leaked" (outstanding)
45 allocations every 5 seconds
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080046./memleak -p $(pidof allocs) -t
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +030047 Trace allocations and display each individual allocator function call
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080048./memleak -ap $(pidof allocs) 10
Sasha Goldshtein33522d72016-02-08 03:39:44 -080049 Trace allocations and display allocated addresses, sizes, and stacks
50 every 10 seconds for outstanding allocations
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080051./memleak -c "./allocs"
Sasha Goldshtein33522d72016-02-08 03:39:44 -080052 Run the specified command and trace its allocations
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080053./memleak
Sasha Goldshtein33522d72016-02-08 03:39:44 -080054 Trace allocations in kernel mode and display a summary of outstanding
55 allocations every 5 seconds
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080056./memleak -o 60000
Sasha Goldshtein33522d72016-02-08 03:39:44 -080057 Trace allocations in kernel mode and display a summary of outstanding
58 allocations that are at least one minute (60 seconds) old
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080059./memleak -s 5
Sasha Goldshtein521ab4f2016-02-08 05:48:31 -080060 Trace roughly every 5th allocation, to reduce overhead
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080061"""
62
63description = """
64Trace outstanding memory allocations that weren't freed.
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +030065Supports both user-mode allocations made with libc functions and kernel-mode
66allocations made with kmalloc/kmem_cache_alloc/get_free_pages and corresponding
67memory release functions.
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080068"""
69
Sasha Goldshteina7cc6c22016-02-07 12:03:54 -080070parser = argparse.ArgumentParser(description=description,
Sasha Goldshtein33522d72016-02-08 03:39:44 -080071 formatter_class=argparse.RawDescriptionHelpFormatter,
72 epilog=examples)
Sasha Goldshteind2241f42016-02-09 06:23:10 -080073parser.add_argument("-p", "--pid", type=int, default=-1,
Sasha Goldshtein33522d72016-02-08 03:39:44 -080074 help="the PID to trace; if not specified, trace kernel allocs")
Sasha Goldshteina7cc6c22016-02-07 12:03:54 -080075parser.add_argument("-t", "--trace", action="store_true",
Sasha Goldshtein33522d72016-02-08 03:39:44 -080076 help="print trace messages for each alloc/free call")
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -080077parser.add_argument("interval", nargs="?", default=5, type=int,
Sasha Goldshtein33522d72016-02-08 03:39:44 -080078 help="interval in seconds to print outstanding allocations")
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -080079parser.add_argument("count", nargs="?", type=int,
80 help="number of times to print the report before exiting")
Sasha Goldshteina7cc6c22016-02-07 12:03:54 -080081parser.add_argument("-a", "--show-allocs", default=False, action="store_true",
Sasha Goldshtein33522d72016-02-08 03:39:44 -080082 help="show allocation addresses and sizes as well as call stacks")
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -080083parser.add_argument("-o", "--older", default=500, type=int,
Sasha Goldshtein33522d72016-02-08 03:39:44 -080084 help="prune allocations younger than this age in milliseconds")
Sasha Goldshtein29228612016-02-07 12:20:19 -080085parser.add_argument("-c", "--command",
Sasha Goldshtein33522d72016-02-08 03:39:44 -080086 help="execute and trace the specified command")
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +030087parser.add_argument("--combined-only", default=False, action="store_true",
88 help="show combined allocation statistics only")
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -080089parser.add_argument("-s", "--sample-rate", default=1, type=int,
Sasha Goldshtein521ab4f2016-02-08 05:48:31 -080090 help="sample every N-th allocation to decrease the overhead")
Sasha Goldshteinc8148c82016-02-09 11:15:41 -080091parser.add_argument("-T", "--top", type=int, default=10,
92 help="display only this many top allocating stacks (by size)")
Sasha Goldshtein50459642016-02-10 08:35:20 -080093parser.add_argument("-z", "--min-size", type=int,
94 help="capture only allocations larger than this size")
95parser.add_argument("-Z", "--max-size", type=int,
96 help="capture only allocations smaller than this size")
Maria Kacik9389ab42017-01-18 21:43:41 -080097parser.add_argument("-O", "--obj", type=str, default="c",
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +030098 help="attach to allocator functions in the specified object")
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080099
100args = parser.parse_args()
101
Sasha Goldshteind2241f42016-02-09 06:23:10 -0800102pid = args.pid
Sasha Goldshtein29228612016-02-07 12:20:19 -0800103command = args.command
104kernel_trace = (pid == -1 and command is None)
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800105trace_all = args.trace
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -0800106interval = args.interval
107min_age_ns = 1e6 * args.older
Sasha Goldshtein521ab4f2016-02-08 05:48:31 -0800108sample_every_n = args.sample_rate
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -0800109num_prints = args.count
Sasha Goldshteinc8148c82016-02-09 11:15:41 -0800110top_stacks = args.top
Sasha Goldshtein50459642016-02-10 08:35:20 -0800111min_size = args.min_size
112max_size = args.max_size
Maria Kacik9389ab42017-01-18 21:43:41 -0800113obj = args.obj
Sasha Goldshtein50459642016-02-10 08:35:20 -0800114
115if min_size is not None and max_size is not None and min_size > max_size:
116 print("min_size (-z) can't be greater than max_size (-Z)")
117 exit(1)
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800118
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800119if command is not None:
120 print("Executing '%s' and tracing the resulting process." % command)
121 pid = run_command_get_pid(command)
Sasha Goldshtein29228612016-02-07 12:20:19 -0800122
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800123bpf_source = """
124#include <uapi/linux/ptrace.h>
125
126struct alloc_info_t {
127 u64 size;
128 u64 timestamp_ns;
Vicent Martie25ae032016-03-25 17:14:34 +0100129 int stack_id;
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800130};
131
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300132struct combined_alloc_info_t {
133 u64 total_size;
134 u64 number_of_allocs;
135};
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800136
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300137BPF_HASH(sizes, u64);
138BPF_TABLE("hash", u64, struct alloc_info_t, allocs, 1000000);
139BPF_HASH(memptrs, u64, u64);
140BPF_STACK_TRACE(stack_traces, 10240)
141BPF_TABLE("hash", u64, struct combined_alloc_info_t, combined_allocs, 10240);
142
143static inline void update_statistics_add(u64 stack_id, u64 sz) {
144 struct combined_alloc_info_t *existing_cinfo;
145 struct combined_alloc_info_t cinfo = {0};
146
147 existing_cinfo = combined_allocs.lookup(&stack_id);
148 if (existing_cinfo != 0)
149 cinfo = *existing_cinfo;
150
151 cinfo.total_size += sz;
152 cinfo.number_of_allocs += 1;
153
154 combined_allocs.update(&stack_id, &cinfo);
155}
156
157static inline void update_statistics_del(u64 stack_id, u64 sz) {
158 struct combined_alloc_info_t *existing_cinfo;
159 struct combined_alloc_info_t cinfo = {0};
160
161 existing_cinfo = combined_allocs.lookup(&stack_id);
162 if (existing_cinfo != 0)
163 cinfo = *existing_cinfo;
164
165 if (sz >= cinfo.total_size)
166 cinfo.total_size = 0;
167 else
168 cinfo.total_size -= sz;
169
170 if (cinfo.number_of_allocs > 0)
171 cinfo.number_of_allocs -= 1;
172
173 combined_allocs.update(&stack_id, &cinfo);
174}
175
176static inline int gen_alloc_enter(struct pt_regs *ctx, size_t size) {
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800177 SIZE_FILTER
178 if (SAMPLE_EVERY_N > 1) {
179 u64 ts = bpf_ktime_get_ns();
180 if (ts % SAMPLE_EVERY_N != 0)
181 return 0;
182 }
183
184 u64 pid = bpf_get_current_pid_tgid();
185 u64 size64 = size;
186 sizes.update(&pid, &size64);
187
188 if (SHOULD_PRINT)
189 bpf_trace_printk("alloc entered, size = %u\\n", size);
190 return 0;
191}
192
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300193static inline int gen_alloc_exit2(struct pt_regs *ctx, u64 address) {
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800194 u64 pid = bpf_get_current_pid_tgid();
195 u64* size64 = sizes.lookup(&pid);
196 struct alloc_info_t info = {0};
197
198 if (size64 == 0)
199 return 0; // missed alloc entry
200
201 info.size = *size64;
202 sizes.delete(&pid);
203
204 info.timestamp_ns = bpf_ktime_get_ns();
Vicent Martie25ae032016-03-25 17:14:34 +0100205 info.stack_id = stack_traces.get_stackid(ctx, STACK_FLAGS);
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800206 allocs.update(&address, &info);
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300207 update_statistics_add(info.stack_id, info.size);
Sasha Goldshtein0e856f42016-03-21 07:26:52 -0700208
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800209 if (SHOULD_PRINT) {
Vicent Martie25ae032016-03-25 17:14:34 +0100210 bpf_trace_printk("alloc exited, size = %lu, result = %lx\\n",
211 info.size, address);
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800212 }
213 return 0;
214}
215
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300216static inline int gen_alloc_exit(struct pt_regs *ctx) {
217 return gen_alloc_exit2(ctx, PT_REGS_RC(ctx));
218}
219
220static inline int gen_free_enter(struct pt_regs *ctx, void *address) {
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800221 u64 addr = (u64)address;
222 struct alloc_info_t *info = allocs.lookup(&addr);
223 if (info == 0)
224 return 0;
225
226 allocs.delete(&addr);
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300227 update_statistics_del(info->stack_id, info->size);
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800228
229 if (SHOULD_PRINT) {
230 bpf_trace_printk("free entered, address = %lx, size = %lu\\n",
231 address, info->size);
232 }
233 return 0;
234}
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300235
236int malloc_enter(struct pt_regs *ctx, size_t size) {
237 return gen_alloc_enter(ctx, size);
238}
239
240int malloc_exit(struct pt_regs *ctx) {
241 return gen_alloc_exit(ctx);
242}
243
244int free_enter(struct pt_regs *ctx, void *address) {
245 return gen_free_enter(ctx, address);
246}
247
248int calloc_enter(struct pt_regs *ctx, size_t nmemb, size_t size) {
249 return gen_alloc_enter(ctx, nmemb * size);
250}
251
252int calloc_exit(struct pt_regs *ctx) {
253 return gen_alloc_exit(ctx);
254}
255
256int realloc_enter(struct pt_regs *ctx, void *ptr, size_t size) {
257 gen_free_enter(ctx, ptr);
258 return gen_alloc_enter(ctx, size);
259}
260
261int realloc_exit(struct pt_regs *ctx) {
262 return gen_alloc_exit(ctx);
263}
264
265int posix_memalign_enter(struct pt_regs *ctx, void **memptr, size_t alignment,
266 size_t size) {
267 u64 memptr64 = (u64)(size_t)memptr;
268 u64 pid = bpf_get_current_pid_tgid();
269
270 memptrs.update(&pid, &memptr64);
271 return gen_alloc_enter(ctx, size);
272}
273
274int posix_memalign_exit(struct pt_regs *ctx) {
275 u64 pid = bpf_get_current_pid_tgid();
276 u64 *memptr64 = memptrs.lookup(&pid);
277 void *addr;
278
279 if (memptr64 == 0)
280 return 0;
281
282 memptrs.delete(&pid);
283
Paul Chaignon2e07ddc2017-10-07 11:07:10 +0200284 if (bpf_probe_read(&addr, sizeof(void*), (void*)(size_t)*memptr64))
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300285 return 0;
286
287 u64 addr64 = (u64)(size_t)addr;
288 return gen_alloc_exit2(ctx, addr64);
289}
290
291int aligned_alloc_enter(struct pt_regs *ctx, size_t alignment, size_t size) {
292 return gen_alloc_enter(ctx, size);
293}
294
295int aligned_alloc_exit(struct pt_regs *ctx) {
296 return gen_alloc_exit(ctx);
297}
298
299int valloc_enter(struct pt_regs *ctx, size_t size) {
300 return gen_alloc_enter(ctx, size);
301}
302
303int valloc_exit(struct pt_regs *ctx) {
304 return gen_alloc_exit(ctx);
305}
306
307int memalign_enter(struct pt_regs *ctx, size_t alignment, size_t size) {
308 return gen_alloc_enter(ctx, size);
309}
310
311int memalign_exit(struct pt_regs *ctx) {
312 return gen_alloc_exit(ctx);
313}
314
315int pvalloc_enter(struct pt_regs *ctx, size_t size) {
316 return gen_alloc_enter(ctx, size);
317}
318
319int pvalloc_exit(struct pt_regs *ctx) {
320 return gen_alloc_exit(ctx);
321}
Sasha Goldshtein0e856f42016-03-21 07:26:52 -0700322"""
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300323
324bpf_source_kernel = """
325
326TRACEPOINT_PROBE(kmem, kmalloc) {
327 gen_alloc_enter((struct pt_regs *)args, args->bytes_alloc);
328 return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->ptr);
329}
330
331TRACEPOINT_PROBE(kmem, kmalloc_node) {
332 gen_alloc_enter((struct pt_regs *)args, args->bytes_alloc);
333 return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->ptr);
334}
335
336TRACEPOINT_PROBE(kmem, kfree) {
337 return gen_free_enter((struct pt_regs *)args, (void *)args->ptr);
338}
339
340TRACEPOINT_PROBE(kmem, kmem_cache_alloc) {
341 gen_alloc_enter((struct pt_regs *)args, args->bytes_alloc);
342 return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->ptr);
343}
344
345TRACEPOINT_PROBE(kmem, kmem_cache_alloc_node) {
346 gen_alloc_enter((struct pt_regs *)args, args->bytes_alloc);
347 return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->ptr);
348}
349
350TRACEPOINT_PROBE(kmem, kmem_cache_free) {
351 return gen_free_enter((struct pt_regs *)args, (void *)args->ptr);
352}
353
354TRACEPOINT_PROBE(kmem, mm_page_alloc) {
355 gen_alloc_enter((struct pt_regs *)args, PAGE_SIZE << args->order);
356 return gen_alloc_exit2((struct pt_regs *)args, args->pfn);
357}
358
359TRACEPOINT_PROBE(kmem, mm_page_free) {
360 return gen_free_enter((struct pt_regs *)args, (void *)args->pfn);
361}
362"""
363
364if kernel_trace:
365 bpf_source += bpf_source_kernel
366
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800367bpf_source = bpf_source.replace("SHOULD_PRINT", "1" if trace_all else "0")
Sasha Goldshtein521ab4f2016-02-08 05:48:31 -0800368bpf_source = bpf_source.replace("SAMPLE_EVERY_N", str(sample_every_n))
Sasha Goldshtein50459642016-02-10 08:35:20 -0800369
370size_filter = ""
371if min_size is not None and max_size is not None:
372 size_filter = "if (size < %d || size > %d) return 0;" % \
373 (min_size, max_size)
374elif min_size is not None:
375 size_filter = "if (size < %d) return 0;" % min_size
376elif max_size is not None:
377 size_filter = "if (size > %d) return 0;" % max_size
378bpf_source = bpf_source.replace("SIZE_FILTER", size_filter)
379
Vicent Martie25ae032016-03-25 17:14:34 +0100380stack_flags = "BPF_F_REUSE_STACKID"
381if not kernel_trace:
382 stack_flags += "|BPF_F_USER_STACK"
383bpf_source = bpf_source.replace("STACK_FLAGS", stack_flags)
384
Paul Chaignon2e07ddc2017-10-07 11:07:10 +0200385bpf = BPF(text=bpf_source)
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800386
387if not kernel_trace:
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300388 print("Attaching to pid %d, Ctrl+C to quit." % pid)
389
390 def attach_probes(sym, fn_prefix=None, can_fail=False):
391 if fn_prefix is None:
392 fn_prefix = sym
393
394 try:
Paul Chaignon2e07ddc2017-10-07 11:07:10 +0200395 bpf.attach_uprobe(name=obj, sym=sym,
396 fn_name=fn_prefix + "_enter",
397 pid=pid)
398 bpf.attach_uretprobe(name=obj, sym=sym,
399 fn_name=fn_prefix + "_exit",
400 pid=pid)
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300401 except Exception:
402 if can_fail:
403 return
404 else:
405 raise
406
407 attach_probes("malloc")
408 attach_probes("calloc")
409 attach_probes("realloc")
410 attach_probes("posix_memalign")
411 attach_probes("valloc")
412 attach_probes("memalign")
413 attach_probes("pvalloc")
Paul Chaignon2e07ddc2017-10-07 11:07:10 +0200414 attach_probes("aligned_alloc", can_fail=True) # added in C11
415 bpf.attach_uprobe(name=obj, sym="free", fn_name="free_enter",
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300416 pid=pid)
417
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800418else:
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300419 print("Attaching to kernel allocators, Ctrl+C to quit.")
420
421 # No probe attaching here. Allocations are counted by attaching to
422 # tracepoints.
423 #
424 # Memory allocations in Linux kernel are not limited to malloc/free
425 # equivalents. It's also common to allocate a memory page or multiple
Paul Chaignon2e07ddc2017-10-07 11:07:10 +0200426 # pages. Page allocator have two interfaces, one working with page
427 # frame numbers (PFN), while other working with page addresses. It's
428 # possible to allocate pages with one kind of functions, and free them
429 # with another. Code in kernel can easy convert PFNs to addresses and
430 # back, but it's hard to do the same in eBPF kprobe without fragile
431 # hacks.
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300432 #
433 # Fortunately, Linux exposes tracepoints for memory allocations, which
434 # can be instrumented by eBPF programs. Tracepoint for page allocations
435 # gives access to PFNs for both allocator interfaces. So there is no
436 # need to guess which allocation corresponds to which free.
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800437
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800438def print_outstanding():
Sasha Goldshteinc8148c82016-02-09 11:15:41 -0800439 print("[%s] Top %d stacks with outstanding allocations:" %
440 (datetime.now().strftime("%H:%M:%S"), top_stacks))
Vicent Martie25ae032016-03-25 17:14:34 +0100441 alloc_info = {}
Paul Chaignon2e07ddc2017-10-07 11:07:10 +0200442 allocs = bpf["allocs"]
443 stack_traces = bpf["stack_traces"]
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800444 for address, info in sorted(allocs.items(), key=lambda a: a[1].size):
Sasha Goldshtein60c41922017-02-09 04:19:53 -0500445 if BPF.monotonic_time() - min_age_ns < info.timestamp_ns:
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800446 continue
Vicent Martie25ae032016-03-25 17:14:34 +0100447 if info.stack_id < 0:
448 continue
449 if info.stack_id in alloc_info:
450 alloc_info[info.stack_id].update(info.size)
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800451 else:
Sasha Goldshtein49df9942017-02-08 23:22:06 -0500452 stack = list(stack_traces.walk(info.stack_id))
453 combined = []
454 for addr in stack:
Paul Chaignon2e07ddc2017-10-07 11:07:10 +0200455 combined.append(bpf.sym(addr, pid,
Sasha Goldshtein01553852017-02-09 03:58:09 -0500456 show_module=True, show_offset=True))
Sasha Goldshtein49df9942017-02-08 23:22:06 -0500457 alloc_info[info.stack_id] = Allocation(combined,
Sasha Goldshteinf41ae862016-10-19 01:14:30 +0300458 info.size)
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800459 if args.show_allocs:
460 print("\taddr = %x size = %s" %
461 (address.value, info.size))
Sasha Goldshteinf41ae862016-10-19 01:14:30 +0300462 to_show = sorted(alloc_info.values(),
463 key=lambda a: a.size)[-top_stacks:]
Vicent Martie25ae032016-03-25 17:14:34 +0100464 for alloc in to_show:
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800465 print("\t%d bytes in %d allocations from stack\n\t\t%s" %
Vicent Martie25ae032016-03-25 17:14:34 +0100466 (alloc.size, alloc.count, "\n\t\t".join(alloc.stack)))
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800467
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300468def print_outstanding_combined():
Paul Chaignon2e07ddc2017-10-07 11:07:10 +0200469 stack_traces = bpf["stack_traces"]
470 stacks = sorted(bpf["combined_allocs"].items(),
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300471 key=lambda a: -a[1].total_size)
472 cnt = 1
473 entries = []
474 for stack_id, info in stacks:
475 try:
476 trace = []
477 for addr in stack_traces.walk(stack_id.value):
Paul Chaignon2e07ddc2017-10-07 11:07:10 +0200478 sym = bpf.sym(addr, pid,
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300479 show_module=True,
480 show_offset=True)
481 trace.append(sym)
482 trace = "\n\t\t".join(trace)
483 except KeyError:
484 trace = "stack information lost"
485
486 entry = ("\t%d bytes in %d allocations from stack\n\t\t%s" %
487 (info.total_size, info.number_of_allocs, trace))
488 entries.append(entry)
489
490 cnt += 1
491 if cnt > top_stacks:
492 break
493
494 print("[%s] Top %d stacks with outstanding allocations:" %
495 (datetime.now().strftime("%H:%M:%S"), top_stacks))
496
497 print('\n'.join(reversed(entries)))
498
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -0800499count_so_far = 0
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800500while True:
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800501 if trace_all:
Paul Chaignon2e07ddc2017-10-07 11:07:10 +0200502 print(bpf.trace_fields())
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800503 else:
504 try:
505 sleep(interval)
506 except KeyboardInterrupt:
507 exit()
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300508 if args.combined_only:
509 print_outstanding_combined()
510 else:
511 print_outstanding()
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -0800512 count_so_far += 1
513 if num_prints is not None and count_so_far >= num_prints:
514 exit()