blob: a9030f7f36a01db4f017f3430060e24df40b1c29 [file] [log] [blame]
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -08001#!/usr/bin/env python
Sasha Goldshtein50459642016-02-10 08:35:20 -08002#
Sasha Goldshtein0e856f42016-03-21 07:26:52 -07003# memleak Trace and display outstanding allocations to detect
4# memory leaks in user-mode processes and the kernel.
Sasha Goldshtein50459642016-02-10 08:35:20 -08005#
Sasha Goldshtein29e37d92016-02-14 06:56:07 -08006# USAGE: memleak [-h] [-p PID] [-t] [-a] [-o OLDER] [-c COMMAND]
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +03007# [--combined-only] [-s SAMPLE_RATE] [-T TOP] [-z MIN_SIZE]
8# [-Z MAX_SIZE] [-O OBJ]
Sasha Goldshtein0e856f42016-03-21 07:26:52 -07009# [interval] [count]
Sasha Goldshtein50459642016-02-10 08:35:20 -080010#
Sasha Goldshtein43fa0412016-02-10 22:17:26 -080011# Licensed under the Apache License, Version 2.0 (the "License")
Sasha Goldshtein50459642016-02-10 08:35:20 -080012# Copyright (C) 2016 Sasha Goldshtein.
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080013
Sasha Goldshtein49df9942017-02-08 23:22:06 -050014from bcc import BPF
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080015from time import sleep
Sasha Goldshteinc8148c82016-02-09 11:15:41 -080016from datetime import datetime
Yonghong Songeb6ddc02017-10-26 22:33:24 -070017import resource
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080018import argparse
19import subprocess
Sasha Goldshteincfce3112016-02-07 11:09:36 -080020import os
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080021
Vicent Martie25ae032016-03-25 17:14:34 +010022class Allocation(object):
23 def __init__(self, stack, size):
24 self.stack = stack
25 self.count = 1
26 self.size = size
27
28 def update(self, size):
29 self.count += 1
30 self.size += size
Sasha Goldshtein29228612016-02-07 12:20:19 -080031
Sasha Goldshtein751fce52016-02-08 02:57:02 -080032def run_command_get_output(command):
Sasha Goldshtein33522d72016-02-08 03:39:44 -080033 p = subprocess.Popen(command.split(),
34 stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
35 return iter(p.stdout.readline, b'')
Sasha Goldshtein29228612016-02-07 12:20:19 -080036
Sasha Goldshtein751fce52016-02-08 02:57:02 -080037def run_command_get_pid(command):
Sasha Goldshtein33522d72016-02-08 03:39:44 -080038 p = subprocess.Popen(command.split())
39 return p.pid
Sasha Goldshtein751fce52016-02-08 02:57:02 -080040
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080041examples = """
42EXAMPLES:
43
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080044./memleak -p $(pidof allocs)
Sasha Goldshtein33522d72016-02-08 03:39:44 -080045 Trace allocations and display a summary of "leaked" (outstanding)
46 allocations every 5 seconds
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080047./memleak -p $(pidof allocs) -t
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +030048 Trace allocations and display each individual allocator function call
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080049./memleak -ap $(pidof allocs) 10
Sasha Goldshtein33522d72016-02-08 03:39:44 -080050 Trace allocations and display allocated addresses, sizes, and stacks
51 every 10 seconds for outstanding allocations
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080052./memleak -c "./allocs"
Sasha Goldshtein33522d72016-02-08 03:39:44 -080053 Run the specified command and trace its allocations
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080054./memleak
Sasha Goldshtein33522d72016-02-08 03:39:44 -080055 Trace allocations in kernel mode and display a summary of outstanding
56 allocations every 5 seconds
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080057./memleak -o 60000
Sasha Goldshtein33522d72016-02-08 03:39:44 -080058 Trace allocations in kernel mode and display a summary of outstanding
59 allocations that are at least one minute (60 seconds) old
Sasha Goldshtein29e37d92016-02-14 06:56:07 -080060./memleak -s 5
Sasha Goldshtein521ab4f2016-02-08 05:48:31 -080061 Trace roughly every 5th allocation, to reduce overhead
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080062"""
63
64description = """
65Trace outstanding memory allocations that weren't freed.
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +030066Supports both user-mode allocations made with libc functions and kernel-mode
67allocations made with kmalloc/kmem_cache_alloc/get_free_pages and corresponding
68memory release functions.
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -080069"""
70
Sasha Goldshteina7cc6c22016-02-07 12:03:54 -080071parser = argparse.ArgumentParser(description=description,
Sasha Goldshtein33522d72016-02-08 03:39:44 -080072 formatter_class=argparse.RawDescriptionHelpFormatter,
73 epilog=examples)
Sasha Goldshteind2241f42016-02-09 06:23:10 -080074parser.add_argument("-p", "--pid", type=int, default=-1,
Sasha Goldshtein33522d72016-02-08 03:39:44 -080075 help="the PID to trace; if not specified, trace kernel allocs")
Sasha Goldshteina7cc6c22016-02-07 12:03:54 -080076parser.add_argument("-t", "--trace", action="store_true",
Sasha Goldshtein33522d72016-02-08 03:39:44 -080077 help="print trace messages for each alloc/free call")
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -080078parser.add_argument("interval", nargs="?", default=5, type=int,
Sasha Goldshtein33522d72016-02-08 03:39:44 -080079 help="interval in seconds to print outstanding allocations")
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -080080parser.add_argument("count", nargs="?", type=int,
81 help="number of times to print the report before exiting")
Sasha Goldshteina7cc6c22016-02-07 12:03:54 -080082parser.add_argument("-a", "--show-allocs", default=False, action="store_true",
Sasha Goldshtein33522d72016-02-08 03:39:44 -080083 help="show allocation addresses and sizes as well as call stacks")
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -080084parser.add_argument("-o", "--older", default=500, type=int,
Sasha Goldshtein33522d72016-02-08 03:39:44 -080085 help="prune allocations younger than this age in milliseconds")
Sasha Goldshtein29228612016-02-07 12:20:19 -080086parser.add_argument("-c", "--command",
Sasha Goldshtein33522d72016-02-08 03:39:44 -080087 help="execute and trace the specified command")
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +030088parser.add_argument("--combined-only", default=False, action="store_true",
89 help="show combined allocation statistics only")
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -080090parser.add_argument("-s", "--sample-rate", default=1, type=int,
Sasha Goldshtein521ab4f2016-02-08 05:48:31 -080091 help="sample every N-th allocation to decrease the overhead")
Sasha Goldshteinc8148c82016-02-09 11:15:41 -080092parser.add_argument("-T", "--top", type=int, default=10,
93 help="display only this many top allocating stacks (by size)")
Sasha Goldshtein50459642016-02-10 08:35:20 -080094parser.add_argument("-z", "--min-size", type=int,
95 help="capture only allocations larger than this size")
96parser.add_argument("-Z", "--max-size", type=int,
97 help="capture only allocations smaller than this size")
Maria Kacik9389ab42017-01-18 21:43:41 -080098parser.add_argument("-O", "--obj", type=str, default="c",
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +030099 help="attach to allocator functions in the specified object")
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800100
101args = parser.parse_args()
102
Sasha Goldshteind2241f42016-02-09 06:23:10 -0800103pid = args.pid
Sasha Goldshtein29228612016-02-07 12:20:19 -0800104command = args.command
105kernel_trace = (pid == -1 and command is None)
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800106trace_all = args.trace
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -0800107interval = args.interval
108min_age_ns = 1e6 * args.older
Sasha Goldshtein521ab4f2016-02-08 05:48:31 -0800109sample_every_n = args.sample_rate
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -0800110num_prints = args.count
Sasha Goldshteinc8148c82016-02-09 11:15:41 -0800111top_stacks = args.top
Sasha Goldshtein50459642016-02-10 08:35:20 -0800112min_size = args.min_size
113max_size = args.max_size
Maria Kacik9389ab42017-01-18 21:43:41 -0800114obj = args.obj
Sasha Goldshtein50459642016-02-10 08:35:20 -0800115
116if min_size is not None and max_size is not None and min_size > max_size:
117 print("min_size (-z) can't be greater than max_size (-Z)")
118 exit(1)
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800119
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800120if command is not None:
121 print("Executing '%s' and tracing the resulting process." % command)
122 pid = run_command_get_pid(command)
Sasha Goldshtein29228612016-02-07 12:20:19 -0800123
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800124bpf_source = """
125#include <uapi/linux/ptrace.h>
126
127struct alloc_info_t {
128 u64 size;
129 u64 timestamp_ns;
Vicent Martie25ae032016-03-25 17:14:34 +0100130 int stack_id;
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800131};
132
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300133struct combined_alloc_info_t {
134 u64 total_size;
135 u64 number_of_allocs;
136};
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800137
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300138BPF_HASH(sizes, u64);
139BPF_TABLE("hash", u64, struct alloc_info_t, allocs, 1000000);
140BPF_HASH(memptrs, u64, u64);
141BPF_STACK_TRACE(stack_traces, 10240)
142BPF_TABLE("hash", u64, struct combined_alloc_info_t, combined_allocs, 10240);
143
144static inline void update_statistics_add(u64 stack_id, u64 sz) {
145 struct combined_alloc_info_t *existing_cinfo;
146 struct combined_alloc_info_t cinfo = {0};
147
148 existing_cinfo = combined_allocs.lookup(&stack_id);
149 if (existing_cinfo != 0)
150 cinfo = *existing_cinfo;
151
152 cinfo.total_size += sz;
153 cinfo.number_of_allocs += 1;
154
155 combined_allocs.update(&stack_id, &cinfo);
156}
157
158static inline void update_statistics_del(u64 stack_id, u64 sz) {
159 struct combined_alloc_info_t *existing_cinfo;
160 struct combined_alloc_info_t cinfo = {0};
161
162 existing_cinfo = combined_allocs.lookup(&stack_id);
163 if (existing_cinfo != 0)
164 cinfo = *existing_cinfo;
165
166 if (sz >= cinfo.total_size)
167 cinfo.total_size = 0;
168 else
169 cinfo.total_size -= sz;
170
171 if (cinfo.number_of_allocs > 0)
172 cinfo.number_of_allocs -= 1;
173
174 combined_allocs.update(&stack_id, &cinfo);
175}
176
177static inline int gen_alloc_enter(struct pt_regs *ctx, size_t size) {
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800178 SIZE_FILTER
179 if (SAMPLE_EVERY_N > 1) {
180 u64 ts = bpf_ktime_get_ns();
181 if (ts % SAMPLE_EVERY_N != 0)
182 return 0;
183 }
184
185 u64 pid = bpf_get_current_pid_tgid();
186 u64 size64 = size;
187 sizes.update(&pid, &size64);
188
189 if (SHOULD_PRINT)
190 bpf_trace_printk("alloc entered, size = %u\\n", size);
191 return 0;
192}
193
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300194static inline int gen_alloc_exit2(struct pt_regs *ctx, u64 address) {
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800195 u64 pid = bpf_get_current_pid_tgid();
196 u64* size64 = sizes.lookup(&pid);
197 struct alloc_info_t info = {0};
198
199 if (size64 == 0)
200 return 0; // missed alloc entry
201
202 info.size = *size64;
203 sizes.delete(&pid);
204
205 info.timestamp_ns = bpf_ktime_get_ns();
Vicent Martie25ae032016-03-25 17:14:34 +0100206 info.stack_id = stack_traces.get_stackid(ctx, STACK_FLAGS);
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800207 allocs.update(&address, &info);
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300208 update_statistics_add(info.stack_id, info.size);
Sasha Goldshtein0e856f42016-03-21 07:26:52 -0700209
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800210 if (SHOULD_PRINT) {
Vicent Martie25ae032016-03-25 17:14:34 +0100211 bpf_trace_printk("alloc exited, size = %lu, result = %lx\\n",
212 info.size, address);
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800213 }
214 return 0;
215}
216
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300217static inline int gen_alloc_exit(struct pt_regs *ctx) {
218 return gen_alloc_exit2(ctx, PT_REGS_RC(ctx));
219}
220
221static inline int gen_free_enter(struct pt_regs *ctx, void *address) {
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800222 u64 addr = (u64)address;
223 struct alloc_info_t *info = allocs.lookup(&addr);
224 if (info == 0)
225 return 0;
226
227 allocs.delete(&addr);
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300228 update_statistics_del(info->stack_id, info->size);
Sasha Goldshtein43fa0412016-02-10 22:17:26 -0800229
230 if (SHOULD_PRINT) {
231 bpf_trace_printk("free entered, address = %lx, size = %lu\\n",
232 address, info->size);
233 }
234 return 0;
235}
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300236
237int malloc_enter(struct pt_regs *ctx, size_t size) {
238 return gen_alloc_enter(ctx, size);
239}
240
241int malloc_exit(struct pt_regs *ctx) {
242 return gen_alloc_exit(ctx);
243}
244
245int free_enter(struct pt_regs *ctx, void *address) {
246 return gen_free_enter(ctx, address);
247}
248
249int calloc_enter(struct pt_regs *ctx, size_t nmemb, size_t size) {
250 return gen_alloc_enter(ctx, nmemb * size);
251}
252
253int calloc_exit(struct pt_regs *ctx) {
254 return gen_alloc_exit(ctx);
255}
256
257int realloc_enter(struct pt_regs *ctx, void *ptr, size_t size) {
258 gen_free_enter(ctx, ptr);
259 return gen_alloc_enter(ctx, size);
260}
261
262int realloc_exit(struct pt_regs *ctx) {
263 return gen_alloc_exit(ctx);
264}
265
266int posix_memalign_enter(struct pt_regs *ctx, void **memptr, size_t alignment,
267 size_t size) {
268 u64 memptr64 = (u64)(size_t)memptr;
269 u64 pid = bpf_get_current_pid_tgid();
270
271 memptrs.update(&pid, &memptr64);
272 return gen_alloc_enter(ctx, size);
273}
274
275int posix_memalign_exit(struct pt_regs *ctx) {
276 u64 pid = bpf_get_current_pid_tgid();
277 u64 *memptr64 = memptrs.lookup(&pid);
278 void *addr;
279
280 if (memptr64 == 0)
281 return 0;
282
283 memptrs.delete(&pid);
284
285 if (bpf_probe_read(&addr, sizeof(void*), (void*)(size_t)*memptr64) != 0)
286 return 0;
287
288 u64 addr64 = (u64)(size_t)addr;
289 return gen_alloc_exit2(ctx, addr64);
290}
291
292int aligned_alloc_enter(struct pt_regs *ctx, size_t alignment, size_t size) {
293 return gen_alloc_enter(ctx, size);
294}
295
296int aligned_alloc_exit(struct pt_regs *ctx) {
297 return gen_alloc_exit(ctx);
298}
299
300int valloc_enter(struct pt_regs *ctx, size_t size) {
301 return gen_alloc_enter(ctx, size);
302}
303
304int valloc_exit(struct pt_regs *ctx) {
305 return gen_alloc_exit(ctx);
306}
307
308int memalign_enter(struct pt_regs *ctx, size_t alignment, size_t size) {
309 return gen_alloc_enter(ctx, size);
310}
311
312int memalign_exit(struct pt_regs *ctx) {
313 return gen_alloc_exit(ctx);
314}
315
316int pvalloc_enter(struct pt_regs *ctx, size_t size) {
317 return gen_alloc_enter(ctx, size);
318}
319
320int pvalloc_exit(struct pt_regs *ctx) {
321 return gen_alloc_exit(ctx);
322}
Sasha Goldshtein0e856f42016-03-21 07:26:52 -0700323"""
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300324
325bpf_source_kernel = """
326
327TRACEPOINT_PROBE(kmem, kmalloc) {
328 gen_alloc_enter((struct pt_regs *)args, args->bytes_alloc);
329 return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->ptr);
330}
331
332TRACEPOINT_PROBE(kmem, kmalloc_node) {
333 gen_alloc_enter((struct pt_regs *)args, args->bytes_alloc);
334 return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->ptr);
335}
336
337TRACEPOINT_PROBE(kmem, kfree) {
338 return gen_free_enter((struct pt_regs *)args, (void *)args->ptr);
339}
340
341TRACEPOINT_PROBE(kmem, kmem_cache_alloc) {
342 gen_alloc_enter((struct pt_regs *)args, args->bytes_alloc);
343 return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->ptr);
344}
345
346TRACEPOINT_PROBE(kmem, kmem_cache_alloc_node) {
347 gen_alloc_enter((struct pt_regs *)args, args->bytes_alloc);
348 return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->ptr);
349}
350
351TRACEPOINT_PROBE(kmem, kmem_cache_free) {
352 return gen_free_enter((struct pt_regs *)args, (void *)args->ptr);
353}
354
355TRACEPOINT_PROBE(kmem, mm_page_alloc) {
356 gen_alloc_enter((struct pt_regs *)args, PAGE_SIZE << args->order);
357 return gen_alloc_exit2((struct pt_regs *)args, args->pfn);
358}
359
360TRACEPOINT_PROBE(kmem, mm_page_free) {
361 return gen_free_enter((struct pt_regs *)args, (void *)args->pfn);
362}
363"""
364
365if kernel_trace:
366 bpf_source += bpf_source_kernel
367
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800368bpf_source = bpf_source.replace("SHOULD_PRINT", "1" if trace_all else "0")
Sasha Goldshtein521ab4f2016-02-08 05:48:31 -0800369bpf_source = bpf_source.replace("SAMPLE_EVERY_N", str(sample_every_n))
Yonghong Songeb6ddc02017-10-26 22:33:24 -0700370bpf_source = bpf_source.replace("PAGE_SIZE", str(resource.getpagesize()))
Sasha Goldshtein50459642016-02-10 08:35:20 -0800371
372size_filter = ""
373if min_size is not None and max_size is not None:
374 size_filter = "if (size < %d || size > %d) return 0;" % \
375 (min_size, max_size)
376elif min_size is not None:
377 size_filter = "if (size < %d) return 0;" % min_size
378elif max_size is not None:
379 size_filter = "if (size > %d) return 0;" % max_size
380bpf_source = bpf_source.replace("SIZE_FILTER", size_filter)
381
Vicent Martie25ae032016-03-25 17:14:34 +0100382stack_flags = "BPF_F_REUSE_STACKID"
383if not kernel_trace:
384 stack_flags += "|BPF_F_USER_STACK"
385bpf_source = bpf_source.replace("STACK_FLAGS", stack_flags)
386
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800387bpf_program = BPF(text=bpf_source)
388
389if not kernel_trace:
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300390 print("Attaching to pid %d, Ctrl+C to quit." % pid)
391
392 def attach_probes(sym, fn_prefix=None, can_fail=False):
393 if fn_prefix is None:
394 fn_prefix = sym
395
396 try:
397 bpf_program.attach_uprobe(name=obj, sym=sym,
398 fn_name=fn_prefix+"_enter",
399 pid=pid)
400 bpf_program.attach_uretprobe(name=obj, sym=sym,
401 fn_name=fn_prefix+"_exit",
402 pid=pid)
403 except Exception:
404 if can_fail:
405 return
406 else:
407 raise
408
409 attach_probes("malloc")
410 attach_probes("calloc")
411 attach_probes("realloc")
412 attach_probes("posix_memalign")
413 attach_probes("valloc")
414 attach_probes("memalign")
415 attach_probes("pvalloc")
416 attach_probes("aligned_alloc", can_fail=True) # added in C11
417 bpf_program.attach_uprobe(name=obj, sym="free", fn_name="free_enter",
418 pid=pid)
419
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800420else:
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300421 print("Attaching to kernel allocators, Ctrl+C to quit.")
422
423 # No probe attaching here. Allocations are counted by attaching to
424 # tracepoints.
425 #
426 # Memory allocations in Linux kernel are not limited to malloc/free
427 # equivalents. It's also common to allocate a memory page or multiple
428 # pages. Page allocator have two interfaces, one working with page frame
429 # numbers (PFN), while other working with page addresses. It's possible
430 # to allocate pages with one kind of functions, and free them with
431 # another. Code in kernel can easy convert PFNs to addresses and back,
432 # but it's hard to do the same in eBPF kprobe without fragile hacks.
433 #
434 # Fortunately, Linux exposes tracepoints for memory allocations, which
435 # can be instrumented by eBPF programs. Tracepoint for page allocations
436 # gives access to PFNs for both allocator interfaces. So there is no
437 # need to guess which allocation corresponds to which free.
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800438
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800439def print_outstanding():
Sasha Goldshteinc8148c82016-02-09 11:15:41 -0800440 print("[%s] Top %d stacks with outstanding allocations:" %
441 (datetime.now().strftime("%H:%M:%S"), top_stacks))
Vicent Martie25ae032016-03-25 17:14:34 +0100442 alloc_info = {}
443 allocs = bpf_program["allocs"]
444 stack_traces = bpf_program["stack_traces"]
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800445 for address, info in sorted(allocs.items(), key=lambda a: a[1].size):
Sasha Goldshtein60c41922017-02-09 04:19:53 -0500446 if BPF.monotonic_time() - min_age_ns < info.timestamp_ns:
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800447 continue
Vicent Martie25ae032016-03-25 17:14:34 +0100448 if info.stack_id < 0:
449 continue
450 if info.stack_id in alloc_info:
451 alloc_info[info.stack_id].update(info.size)
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800452 else:
Sasha Goldshtein49df9942017-02-08 23:22:06 -0500453 stack = list(stack_traces.walk(info.stack_id))
454 combined = []
455 for addr in stack:
456 combined.append(bpf_program.sym(addr, pid,
Sasha Goldshtein01553852017-02-09 03:58:09 -0500457 show_module=True, show_offset=True))
Sasha Goldshtein49df9942017-02-08 23:22:06 -0500458 alloc_info[info.stack_id] = Allocation(combined,
Sasha Goldshteinf41ae862016-10-19 01:14:30 +0300459 info.size)
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800460 if args.show_allocs:
461 print("\taddr = %x size = %s" %
462 (address.value, info.size))
Sasha Goldshteinf41ae862016-10-19 01:14:30 +0300463 to_show = sorted(alloc_info.values(),
464 key=lambda a: a.size)[-top_stacks:]
Vicent Martie25ae032016-03-25 17:14:34 +0100465 for alloc in to_show:
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800466 print("\t%d bytes in %d allocations from stack\n\t\t%s" %
Vicent Martie25ae032016-03-25 17:14:34 +0100467 (alloc.size, alloc.count, "\n\t\t".join(alloc.stack)))
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800468
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300469def print_outstanding_combined():
470 stack_traces = bpf_program["stack_traces"]
471 stacks = sorted(bpf_program["combined_allocs"].items(),
472 key=lambda a: -a[1].total_size)
473 cnt = 1
474 entries = []
475 for stack_id, info in stacks:
476 try:
477 trace = []
478 for addr in stack_traces.walk(stack_id.value):
479 sym = bpf_program.sym(addr, pid,
480 show_module=True,
481 show_offset=True)
482 trace.append(sym)
483 trace = "\n\t\t".join(trace)
484 except KeyError:
485 trace = "stack information lost"
486
487 entry = ("\t%d bytes in %d allocations from stack\n\t\t%s" %
488 (info.total_size, info.number_of_allocs, trace))
489 entries.append(entry)
490
491 cnt += 1
492 if cnt > top_stacks:
493 break
494
495 print("[%s] Top %d stacks with outstanding allocations:" %
496 (datetime.now().strftime("%H:%M:%S"), top_stacks))
497
498 print('\n'.join(reversed(entries)))
499
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -0800500count_so_far = 0
Sasha Goldshtein4f1ea672016-02-07 01:57:42 -0800501while True:
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800502 if trace_all:
Brenden Blancoc94ab7a2016-03-11 15:34:29 -0800503 print(bpf_program.trace_fields())
Sasha Goldshtein33522d72016-02-08 03:39:44 -0800504 else:
505 try:
506 sleep(interval)
507 except KeyboardInterrupt:
508 exit()
Rinat Ibragimov2c1799c2017-07-11 21:14:08 +0300509 if args.combined_only:
510 print_outstanding_combined()
511 else:
512 print_outstanding()
Sasha Goldshtein40e55ba2016-02-09 05:53:48 -0800513 count_so_far += 1
514 if num_prints is not None and count_so_far >= num_prints:
515 exit()