Catalin Marinas | 3c7b4e6 | 2009-06-11 13:22:39 +0100 | [diff] [blame^] | 1 | /* |
| 2 | * mm/kmemleak.c |
| 3 | * |
| 4 | * Copyright (C) 2008 ARM Limited |
| 5 | * Written by Catalin Marinas <catalin.marinas@arm.com> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program; if not, write to the Free Software |
| 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 19 | * |
| 20 | * |
| 21 | * For more information on the algorithm and kmemleak usage, please see |
| 22 | * Documentation/kmemleak.txt. |
| 23 | * |
| 24 | * Notes on locking |
| 25 | * ---------------- |
| 26 | * |
| 27 | * The following locks and mutexes are used by kmemleak: |
| 28 | * |
| 29 | * - kmemleak_lock (rwlock): protects the object_list modifications and |
| 30 | * accesses to the object_tree_root. The object_list is the main list |
| 31 | * holding the metadata (struct kmemleak_object) for the allocated memory |
| 32 | * blocks. The object_tree_root is a priority search tree used to look-up |
| 33 | * metadata based on a pointer to the corresponding memory block. The |
| 34 | * kmemleak_object structures are added to the object_list and |
| 35 | * object_tree_root in the create_object() function called from the |
| 36 | * kmemleak_alloc() callback and removed in delete_object() called from the |
| 37 | * kmemleak_free() callback |
| 38 | * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to |
| 39 | * the metadata (e.g. count) are protected by this lock. Note that some |
| 40 | * members of this structure may be protected by other means (atomic or |
| 41 | * kmemleak_lock). This lock is also held when scanning the corresponding |
| 42 | * memory block to avoid the kernel freeing it via the kmemleak_free() |
| 43 | * callback. This is less heavyweight than holding a global lock like |
| 44 | * kmemleak_lock during scanning |
| 45 | * - scan_mutex (mutex): ensures that only one thread may scan the memory for |
| 46 | * unreferenced objects at a time. The gray_list contains the objects which |
| 47 | * are already referenced or marked as false positives and need to be |
| 48 | * scanned. This list is only modified during a scanning episode when the |
| 49 | * scan_mutex is held. At the end of a scan, the gray_list is always empty. |
| 50 | * Note that the kmemleak_object.use_count is incremented when an object is |
| 51 | * added to the gray_list and therefore cannot be freed |
| 52 | * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs |
| 53 | * file together with modifications to the memory scanning parameters |
| 54 | * including the scan_thread pointer |
| 55 | * |
| 56 | * The kmemleak_object structures have a use_count incremented or decremented |
| 57 | * using the get_object()/put_object() functions. When the use_count becomes |
| 58 | * 0, this count can no longer be incremented and put_object() schedules the |
| 59 | * kmemleak_object freeing via an RCU callback. All calls to the get_object() |
| 60 | * function must be protected by rcu_read_lock() to avoid accessing a freed |
| 61 | * structure. |
| 62 | */ |
| 63 | |
| 64 | #include <linux/init.h> |
| 65 | #include <linux/kernel.h> |
| 66 | #include <linux/list.h> |
| 67 | #include <linux/sched.h> |
| 68 | #include <linux/jiffies.h> |
| 69 | #include <linux/delay.h> |
| 70 | #include <linux/module.h> |
| 71 | #include <linux/kthread.h> |
| 72 | #include <linux/prio_tree.h> |
| 73 | #include <linux/gfp.h> |
| 74 | #include <linux/fs.h> |
| 75 | #include <linux/debugfs.h> |
| 76 | #include <linux/seq_file.h> |
| 77 | #include <linux/cpumask.h> |
| 78 | #include <linux/spinlock.h> |
| 79 | #include <linux/mutex.h> |
| 80 | #include <linux/rcupdate.h> |
| 81 | #include <linux/stacktrace.h> |
| 82 | #include <linux/cache.h> |
| 83 | #include <linux/percpu.h> |
| 84 | #include <linux/hardirq.h> |
| 85 | #include <linux/mmzone.h> |
| 86 | #include <linux/slab.h> |
| 87 | #include <linux/thread_info.h> |
| 88 | #include <linux/err.h> |
| 89 | #include <linux/uaccess.h> |
| 90 | #include <linux/string.h> |
| 91 | #include <linux/nodemask.h> |
| 92 | #include <linux/mm.h> |
| 93 | |
| 94 | #include <asm/sections.h> |
| 95 | #include <asm/processor.h> |
| 96 | #include <asm/atomic.h> |
| 97 | |
| 98 | #include <linux/kmemleak.h> |
| 99 | |
| 100 | /* |
| 101 | * Kmemleak configuration and common defines. |
| 102 | */ |
| 103 | #define MAX_TRACE 16 /* stack trace length */ |
| 104 | #define REPORTS_NR 50 /* maximum number of reported leaks */ |
| 105 | #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ |
| 106 | #define MSECS_SCAN_YIELD 10 /* CPU yielding period */ |
| 107 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ |
| 108 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ |
| 109 | |
| 110 | #define BYTES_PER_POINTER sizeof(void *) |
| 111 | |
| 112 | /* scanning area inside a memory block */ |
| 113 | struct kmemleak_scan_area { |
| 114 | struct hlist_node node; |
| 115 | unsigned long offset; |
| 116 | size_t length; |
| 117 | }; |
| 118 | |
| 119 | /* |
| 120 | * Structure holding the metadata for each allocated memory block. |
| 121 | * Modifications to such objects should be made while holding the |
| 122 | * object->lock. Insertions or deletions from object_list, gray_list or |
| 123 | * tree_node are already protected by the corresponding locks or mutex (see |
| 124 | * the notes on locking above). These objects are reference-counted |
| 125 | * (use_count) and freed using the RCU mechanism. |
| 126 | */ |
| 127 | struct kmemleak_object { |
| 128 | spinlock_t lock; |
| 129 | unsigned long flags; /* object status flags */ |
| 130 | struct list_head object_list; |
| 131 | struct list_head gray_list; |
| 132 | struct prio_tree_node tree_node; |
| 133 | struct rcu_head rcu; /* object_list lockless traversal */ |
| 134 | /* object usage count; object freed when use_count == 0 */ |
| 135 | atomic_t use_count; |
| 136 | unsigned long pointer; |
| 137 | size_t size; |
| 138 | /* minimum number of a pointers found before it is considered leak */ |
| 139 | int min_count; |
| 140 | /* the total number of pointers found pointing to this object */ |
| 141 | int count; |
| 142 | /* memory ranges to be scanned inside an object (empty for all) */ |
| 143 | struct hlist_head area_list; |
| 144 | unsigned long trace[MAX_TRACE]; |
| 145 | unsigned int trace_len; |
| 146 | unsigned long jiffies; /* creation timestamp */ |
| 147 | pid_t pid; /* pid of the current task */ |
| 148 | char comm[TASK_COMM_LEN]; /* executable name */ |
| 149 | }; |
| 150 | |
| 151 | /* flag representing the memory block allocation status */ |
| 152 | #define OBJECT_ALLOCATED (1 << 0) |
| 153 | /* flag set after the first reporting of an unreference object */ |
| 154 | #define OBJECT_REPORTED (1 << 1) |
| 155 | /* flag set to not scan the object */ |
| 156 | #define OBJECT_NO_SCAN (1 << 2) |
| 157 | |
| 158 | /* the list of all allocated objects */ |
| 159 | static LIST_HEAD(object_list); |
| 160 | /* the list of gray-colored objects (see color_gray comment below) */ |
| 161 | static LIST_HEAD(gray_list); |
| 162 | /* prio search tree for object boundaries */ |
| 163 | static struct prio_tree_root object_tree_root; |
| 164 | /* rw_lock protecting the access to object_list and prio_tree_root */ |
| 165 | static DEFINE_RWLOCK(kmemleak_lock); |
| 166 | |
| 167 | /* allocation caches for kmemleak internal data */ |
| 168 | static struct kmem_cache *object_cache; |
| 169 | static struct kmem_cache *scan_area_cache; |
| 170 | |
| 171 | /* set if tracing memory operations is enabled */ |
| 172 | static atomic_t kmemleak_enabled = ATOMIC_INIT(0); |
| 173 | /* set in the late_initcall if there were no errors */ |
| 174 | static atomic_t kmemleak_initialized = ATOMIC_INIT(0); |
| 175 | /* enables or disables early logging of the memory operations */ |
| 176 | static atomic_t kmemleak_early_log = ATOMIC_INIT(1); |
| 177 | /* set if a fata kmemleak error has occurred */ |
| 178 | static atomic_t kmemleak_error = ATOMIC_INIT(0); |
| 179 | |
| 180 | /* minimum and maximum address that may be valid pointers */ |
| 181 | static unsigned long min_addr = ULONG_MAX; |
| 182 | static unsigned long max_addr; |
| 183 | |
| 184 | /* used for yielding the CPU to other tasks during scanning */ |
| 185 | static unsigned long next_scan_yield; |
| 186 | static struct task_struct *scan_thread; |
| 187 | static unsigned long jiffies_scan_yield; |
| 188 | static unsigned long jiffies_min_age; |
| 189 | /* delay between automatic memory scannings */ |
| 190 | static signed long jiffies_scan_wait; |
| 191 | /* enables or disables the task stacks scanning */ |
| 192 | static int kmemleak_stack_scan; |
| 193 | /* mutex protecting the memory scanning */ |
| 194 | static DEFINE_MUTEX(scan_mutex); |
| 195 | /* mutex protecting the access to the /sys/kernel/debug/kmemleak file */ |
| 196 | static DEFINE_MUTEX(kmemleak_mutex); |
| 197 | |
| 198 | /* number of leaks reported (for limitation purposes) */ |
| 199 | static int reported_leaks; |
| 200 | |
| 201 | /* |
| 202 | * Early object allocation/freeing logging. Kkmemleak is initialized after the |
| 203 | * kernel allocator. However, both the kernel allocator and kmemleak may |
| 204 | * allocate memory blocks which need to be tracked. Kkmemleak defines an |
| 205 | * arbitrary buffer to hold the allocation/freeing information before it is |
| 206 | * fully initialized. |
| 207 | */ |
| 208 | |
| 209 | /* kmemleak operation type for early logging */ |
| 210 | enum { |
| 211 | KMEMLEAK_ALLOC, |
| 212 | KMEMLEAK_FREE, |
| 213 | KMEMLEAK_NOT_LEAK, |
| 214 | KMEMLEAK_IGNORE, |
| 215 | KMEMLEAK_SCAN_AREA, |
| 216 | KMEMLEAK_NO_SCAN |
| 217 | }; |
| 218 | |
| 219 | /* |
| 220 | * Structure holding the information passed to kmemleak callbacks during the |
| 221 | * early logging. |
| 222 | */ |
| 223 | struct early_log { |
| 224 | int op_type; /* kmemleak operation type */ |
| 225 | const void *ptr; /* allocated/freed memory block */ |
| 226 | size_t size; /* memory block size */ |
| 227 | int min_count; /* minimum reference count */ |
| 228 | unsigned long offset; /* scan area offset */ |
| 229 | size_t length; /* scan area length */ |
| 230 | }; |
| 231 | |
| 232 | /* early logging buffer and current position */ |
| 233 | static struct early_log early_log[200]; |
| 234 | static int crt_early_log; |
| 235 | |
| 236 | static void kmemleak_disable(void); |
| 237 | |
| 238 | /* |
| 239 | * Print a warning and dump the stack trace. |
| 240 | */ |
| 241 | #define kmemleak_warn(x...) do { \ |
| 242 | pr_warning(x); \ |
| 243 | dump_stack(); \ |
| 244 | } while (0) |
| 245 | |
| 246 | /* |
| 247 | * Macro invoked when a serious kmemleak condition occured and cannot be |
| 248 | * recovered from. Kkmemleak will be disabled and further allocation/freeing |
| 249 | * tracing no longer available. |
| 250 | */ |
| 251 | #define kmemleak_panic(x...) do { \ |
| 252 | kmemleak_warn(x); \ |
| 253 | kmemleak_disable(); \ |
| 254 | } while (0) |
| 255 | |
| 256 | /* |
| 257 | * Object colors, encoded with count and min_count: |
| 258 | * - white - orphan object, not enough references to it (count < min_count) |
| 259 | * - gray - not orphan, not marked as false positive (min_count == 0) or |
| 260 | * sufficient references to it (count >= min_count) |
| 261 | * - black - ignore, it doesn't contain references (e.g. text section) |
| 262 | * (min_count == -1). No function defined for this color. |
| 263 | * Newly created objects don't have any color assigned (object->count == -1) |
| 264 | * before the next memory scan when they become white. |
| 265 | */ |
| 266 | static int color_white(const struct kmemleak_object *object) |
| 267 | { |
| 268 | return object->count != -1 && object->count < object->min_count; |
| 269 | } |
| 270 | |
| 271 | static int color_gray(const struct kmemleak_object *object) |
| 272 | { |
| 273 | return object->min_count != -1 && object->count >= object->min_count; |
| 274 | } |
| 275 | |
| 276 | /* |
| 277 | * Objects are considered referenced if their color is gray and they have not |
| 278 | * been deleted. |
| 279 | */ |
| 280 | static int referenced_object(struct kmemleak_object *object) |
| 281 | { |
| 282 | return (object->flags & OBJECT_ALLOCATED) && color_gray(object); |
| 283 | } |
| 284 | |
| 285 | /* |
| 286 | * Objects are considered unreferenced only if their color is white, they have |
| 287 | * not be deleted and have a minimum age to avoid false positives caused by |
| 288 | * pointers temporarily stored in CPU registers. |
| 289 | */ |
| 290 | static int unreferenced_object(struct kmemleak_object *object) |
| 291 | { |
| 292 | return (object->flags & OBJECT_ALLOCATED) && color_white(object) && |
| 293 | time_is_before_eq_jiffies(object->jiffies + jiffies_min_age); |
| 294 | } |
| 295 | |
| 296 | /* |
| 297 | * Printing of the (un)referenced objects information, either to the seq file |
| 298 | * or to the kernel log. The print_referenced/print_unreferenced functions |
| 299 | * must be called with the object->lock held. |
| 300 | */ |
| 301 | #define print_helper(seq, x...) do { \ |
| 302 | struct seq_file *s = (seq); \ |
| 303 | if (s) \ |
| 304 | seq_printf(s, x); \ |
| 305 | else \ |
| 306 | pr_info(x); \ |
| 307 | } while (0) |
| 308 | |
| 309 | static void print_referenced(struct kmemleak_object *object) |
| 310 | { |
| 311 | pr_info("kmemleak: referenced object 0x%08lx (size %zu)\n", |
| 312 | object->pointer, object->size); |
| 313 | } |
| 314 | |
| 315 | static void print_unreferenced(struct seq_file *seq, |
| 316 | struct kmemleak_object *object) |
| 317 | { |
| 318 | int i; |
| 319 | |
| 320 | print_helper(seq, "kmemleak: unreferenced object 0x%08lx (size %zu):\n", |
| 321 | object->pointer, object->size); |
| 322 | print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n", |
| 323 | object->comm, object->pid, object->jiffies); |
| 324 | print_helper(seq, " backtrace:\n"); |
| 325 | |
| 326 | for (i = 0; i < object->trace_len; i++) { |
| 327 | void *ptr = (void *)object->trace[i]; |
| 328 | print_helper(seq, " [<%p>] %pS\n", ptr, ptr); |
| 329 | } |
| 330 | } |
| 331 | |
| 332 | /* |
| 333 | * Print the kmemleak_object information. This function is used mainly for |
| 334 | * debugging special cases when kmemleak operations. It must be called with |
| 335 | * the object->lock held. |
| 336 | */ |
| 337 | static void dump_object_info(struct kmemleak_object *object) |
| 338 | { |
| 339 | struct stack_trace trace; |
| 340 | |
| 341 | trace.nr_entries = object->trace_len; |
| 342 | trace.entries = object->trace; |
| 343 | |
| 344 | pr_notice("kmemleak: Object 0x%08lx (size %zu):\n", |
| 345 | object->tree_node.start, object->size); |
| 346 | pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", |
| 347 | object->comm, object->pid, object->jiffies); |
| 348 | pr_notice(" min_count = %d\n", object->min_count); |
| 349 | pr_notice(" count = %d\n", object->count); |
| 350 | pr_notice(" backtrace:\n"); |
| 351 | print_stack_trace(&trace, 4); |
| 352 | } |
| 353 | |
| 354 | /* |
| 355 | * Look-up a memory block metadata (kmemleak_object) in the priority search |
| 356 | * tree based on a pointer value. If alias is 0, only values pointing to the |
| 357 | * beginning of the memory block are allowed. The kmemleak_lock must be held |
| 358 | * when calling this function. |
| 359 | */ |
| 360 | static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) |
| 361 | { |
| 362 | struct prio_tree_node *node; |
| 363 | struct prio_tree_iter iter; |
| 364 | struct kmemleak_object *object; |
| 365 | |
| 366 | prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr); |
| 367 | node = prio_tree_next(&iter); |
| 368 | if (node) { |
| 369 | object = prio_tree_entry(node, struct kmemleak_object, |
| 370 | tree_node); |
| 371 | if (!alias && object->pointer != ptr) { |
| 372 | kmemleak_warn("kmemleak: Found object by alias"); |
| 373 | object = NULL; |
| 374 | } |
| 375 | } else |
| 376 | object = NULL; |
| 377 | |
| 378 | return object; |
| 379 | } |
| 380 | |
| 381 | /* |
| 382 | * Increment the object use_count. Return 1 if successful or 0 otherwise. Note |
| 383 | * that once an object's use_count reached 0, the RCU freeing was already |
| 384 | * registered and the object should no longer be used. This function must be |
| 385 | * called under the protection of rcu_read_lock(). |
| 386 | */ |
| 387 | static int get_object(struct kmemleak_object *object) |
| 388 | { |
| 389 | return atomic_inc_not_zero(&object->use_count); |
| 390 | } |
| 391 | |
| 392 | /* |
| 393 | * RCU callback to free a kmemleak_object. |
| 394 | */ |
| 395 | static void free_object_rcu(struct rcu_head *rcu) |
| 396 | { |
| 397 | struct hlist_node *elem, *tmp; |
| 398 | struct kmemleak_scan_area *area; |
| 399 | struct kmemleak_object *object = |
| 400 | container_of(rcu, struct kmemleak_object, rcu); |
| 401 | |
| 402 | /* |
| 403 | * Once use_count is 0 (guaranteed by put_object), there is no other |
| 404 | * code accessing this object, hence no need for locking. |
| 405 | */ |
| 406 | hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) { |
| 407 | hlist_del(elem); |
| 408 | kmem_cache_free(scan_area_cache, area); |
| 409 | } |
| 410 | kmem_cache_free(object_cache, object); |
| 411 | } |
| 412 | |
| 413 | /* |
| 414 | * Decrement the object use_count. Once the count is 0, free the object using |
| 415 | * an RCU callback. Since put_object() may be called via the kmemleak_free() -> |
| 416 | * delete_object() path, the delayed RCU freeing ensures that there is no |
| 417 | * recursive call to the kernel allocator. Lock-less RCU object_list traversal |
| 418 | * is also possible. |
| 419 | */ |
| 420 | static void put_object(struct kmemleak_object *object) |
| 421 | { |
| 422 | if (!atomic_dec_and_test(&object->use_count)) |
| 423 | return; |
| 424 | |
| 425 | /* should only get here after delete_object was called */ |
| 426 | WARN_ON(object->flags & OBJECT_ALLOCATED); |
| 427 | |
| 428 | call_rcu(&object->rcu, free_object_rcu); |
| 429 | } |
| 430 | |
| 431 | /* |
| 432 | * Look up an object in the prio search tree and increase its use_count. |
| 433 | */ |
| 434 | static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) |
| 435 | { |
| 436 | unsigned long flags; |
| 437 | struct kmemleak_object *object = NULL; |
| 438 | |
| 439 | rcu_read_lock(); |
| 440 | read_lock_irqsave(&kmemleak_lock, flags); |
| 441 | if (ptr >= min_addr && ptr < max_addr) |
| 442 | object = lookup_object(ptr, alias); |
| 443 | read_unlock_irqrestore(&kmemleak_lock, flags); |
| 444 | |
| 445 | /* check whether the object is still available */ |
| 446 | if (object && !get_object(object)) |
| 447 | object = NULL; |
| 448 | rcu_read_unlock(); |
| 449 | |
| 450 | return object; |
| 451 | } |
| 452 | |
| 453 | /* |
| 454 | * Create the metadata (struct kmemleak_object) corresponding to an allocated |
| 455 | * memory block and add it to the object_list and object_tree_root. |
| 456 | */ |
| 457 | static void create_object(unsigned long ptr, size_t size, int min_count, |
| 458 | gfp_t gfp) |
| 459 | { |
| 460 | unsigned long flags; |
| 461 | struct kmemleak_object *object; |
| 462 | struct prio_tree_node *node; |
| 463 | struct stack_trace trace; |
| 464 | |
| 465 | object = kmem_cache_alloc(object_cache, gfp & ~GFP_SLAB_BUG_MASK); |
| 466 | if (!object) { |
| 467 | kmemleak_panic("kmemleak: Cannot allocate a kmemleak_object " |
| 468 | "structure\n"); |
| 469 | return; |
| 470 | } |
| 471 | |
| 472 | INIT_LIST_HEAD(&object->object_list); |
| 473 | INIT_LIST_HEAD(&object->gray_list); |
| 474 | INIT_HLIST_HEAD(&object->area_list); |
| 475 | spin_lock_init(&object->lock); |
| 476 | atomic_set(&object->use_count, 1); |
| 477 | object->flags = OBJECT_ALLOCATED; |
| 478 | object->pointer = ptr; |
| 479 | object->size = size; |
| 480 | object->min_count = min_count; |
| 481 | object->count = -1; /* no color initially */ |
| 482 | object->jiffies = jiffies; |
| 483 | |
| 484 | /* task information */ |
| 485 | if (in_irq()) { |
| 486 | object->pid = 0; |
| 487 | strncpy(object->comm, "hardirq", sizeof(object->comm)); |
| 488 | } else if (in_softirq()) { |
| 489 | object->pid = 0; |
| 490 | strncpy(object->comm, "softirq", sizeof(object->comm)); |
| 491 | } else { |
| 492 | object->pid = current->pid; |
| 493 | /* |
| 494 | * There is a small chance of a race with set_task_comm(), |
| 495 | * however using get_task_comm() here may cause locking |
| 496 | * dependency issues with current->alloc_lock. In the worst |
| 497 | * case, the command line is not correct. |
| 498 | */ |
| 499 | strncpy(object->comm, current->comm, sizeof(object->comm)); |
| 500 | } |
| 501 | |
| 502 | /* kernel backtrace */ |
| 503 | trace.max_entries = MAX_TRACE; |
| 504 | trace.nr_entries = 0; |
| 505 | trace.entries = object->trace; |
| 506 | trace.skip = 1; |
| 507 | save_stack_trace(&trace); |
| 508 | object->trace_len = trace.nr_entries; |
| 509 | |
| 510 | INIT_PRIO_TREE_NODE(&object->tree_node); |
| 511 | object->tree_node.start = ptr; |
| 512 | object->tree_node.last = ptr + size - 1; |
| 513 | |
| 514 | write_lock_irqsave(&kmemleak_lock, flags); |
| 515 | min_addr = min(min_addr, ptr); |
| 516 | max_addr = max(max_addr, ptr + size); |
| 517 | node = prio_tree_insert(&object_tree_root, &object->tree_node); |
| 518 | /* |
| 519 | * The code calling the kernel does not yet have the pointer to the |
| 520 | * memory block to be able to free it. However, we still hold the |
| 521 | * kmemleak_lock here in case parts of the kernel started freeing |
| 522 | * random memory blocks. |
| 523 | */ |
| 524 | if (node != &object->tree_node) { |
| 525 | unsigned long flags; |
| 526 | |
| 527 | kmemleak_panic("kmemleak: Cannot insert 0x%lx into the object " |
| 528 | "search tree (already existing)\n", ptr); |
| 529 | object = lookup_object(ptr, 1); |
| 530 | spin_lock_irqsave(&object->lock, flags); |
| 531 | dump_object_info(object); |
| 532 | spin_unlock_irqrestore(&object->lock, flags); |
| 533 | |
| 534 | goto out; |
| 535 | } |
| 536 | list_add_tail_rcu(&object->object_list, &object_list); |
| 537 | out: |
| 538 | write_unlock_irqrestore(&kmemleak_lock, flags); |
| 539 | } |
| 540 | |
| 541 | /* |
| 542 | * Remove the metadata (struct kmemleak_object) for a memory block from the |
| 543 | * object_list and object_tree_root and decrement its use_count. |
| 544 | */ |
| 545 | static void delete_object(unsigned long ptr) |
| 546 | { |
| 547 | unsigned long flags; |
| 548 | struct kmemleak_object *object; |
| 549 | |
| 550 | write_lock_irqsave(&kmemleak_lock, flags); |
| 551 | object = lookup_object(ptr, 0); |
| 552 | if (!object) { |
| 553 | kmemleak_warn("kmemleak: Freeing unknown object at 0x%08lx\n", |
| 554 | ptr); |
| 555 | write_unlock_irqrestore(&kmemleak_lock, flags); |
| 556 | return; |
| 557 | } |
| 558 | prio_tree_remove(&object_tree_root, &object->tree_node); |
| 559 | list_del_rcu(&object->object_list); |
| 560 | write_unlock_irqrestore(&kmemleak_lock, flags); |
| 561 | |
| 562 | WARN_ON(!(object->flags & OBJECT_ALLOCATED)); |
| 563 | WARN_ON(atomic_read(&object->use_count) < 1); |
| 564 | |
| 565 | /* |
| 566 | * Locking here also ensures that the corresponding memory block |
| 567 | * cannot be freed when it is being scanned. |
| 568 | */ |
| 569 | spin_lock_irqsave(&object->lock, flags); |
| 570 | if (object->flags & OBJECT_REPORTED) |
| 571 | print_referenced(object); |
| 572 | object->flags &= ~OBJECT_ALLOCATED; |
| 573 | spin_unlock_irqrestore(&object->lock, flags); |
| 574 | put_object(object); |
| 575 | } |
| 576 | |
| 577 | /* |
| 578 | * Make a object permanently as gray-colored so that it can no longer be |
| 579 | * reported as a leak. This is used in general to mark a false positive. |
| 580 | */ |
| 581 | static void make_gray_object(unsigned long ptr) |
| 582 | { |
| 583 | unsigned long flags; |
| 584 | struct kmemleak_object *object; |
| 585 | |
| 586 | object = find_and_get_object(ptr, 0); |
| 587 | if (!object) { |
| 588 | kmemleak_warn("kmemleak: Graying unknown object at 0x%08lx\n", |
| 589 | ptr); |
| 590 | return; |
| 591 | } |
| 592 | |
| 593 | spin_lock_irqsave(&object->lock, flags); |
| 594 | object->min_count = 0; |
| 595 | spin_unlock_irqrestore(&object->lock, flags); |
| 596 | put_object(object); |
| 597 | } |
| 598 | |
| 599 | /* |
| 600 | * Mark the object as black-colored so that it is ignored from scans and |
| 601 | * reporting. |
| 602 | */ |
| 603 | static void make_black_object(unsigned long ptr) |
| 604 | { |
| 605 | unsigned long flags; |
| 606 | struct kmemleak_object *object; |
| 607 | |
| 608 | object = find_and_get_object(ptr, 0); |
| 609 | if (!object) { |
| 610 | kmemleak_warn("kmemleak: Blacking unknown object at 0x%08lx\n", |
| 611 | ptr); |
| 612 | return; |
| 613 | } |
| 614 | |
| 615 | spin_lock_irqsave(&object->lock, flags); |
| 616 | object->min_count = -1; |
| 617 | spin_unlock_irqrestore(&object->lock, flags); |
| 618 | put_object(object); |
| 619 | } |
| 620 | |
| 621 | /* |
| 622 | * Add a scanning area to the object. If at least one such area is added, |
| 623 | * kmemleak will only scan these ranges rather than the whole memory block. |
| 624 | */ |
| 625 | static void add_scan_area(unsigned long ptr, unsigned long offset, |
| 626 | size_t length, gfp_t gfp) |
| 627 | { |
| 628 | unsigned long flags; |
| 629 | struct kmemleak_object *object; |
| 630 | struct kmemleak_scan_area *area; |
| 631 | |
| 632 | object = find_and_get_object(ptr, 0); |
| 633 | if (!object) { |
| 634 | kmemleak_warn("kmemleak: Adding scan area to unknown " |
| 635 | "object at 0x%08lx\n", ptr); |
| 636 | return; |
| 637 | } |
| 638 | |
| 639 | area = kmem_cache_alloc(scan_area_cache, gfp & ~GFP_SLAB_BUG_MASK); |
| 640 | if (!area) { |
| 641 | kmemleak_warn("kmemleak: Cannot allocate a scan area\n"); |
| 642 | goto out; |
| 643 | } |
| 644 | |
| 645 | spin_lock_irqsave(&object->lock, flags); |
| 646 | if (offset + length > object->size) { |
| 647 | kmemleak_warn("kmemleak: Scan area larger than object " |
| 648 | "0x%08lx\n", ptr); |
| 649 | dump_object_info(object); |
| 650 | kmem_cache_free(scan_area_cache, area); |
| 651 | goto out_unlock; |
| 652 | } |
| 653 | |
| 654 | INIT_HLIST_NODE(&area->node); |
| 655 | area->offset = offset; |
| 656 | area->length = length; |
| 657 | |
| 658 | hlist_add_head(&area->node, &object->area_list); |
| 659 | out_unlock: |
| 660 | spin_unlock_irqrestore(&object->lock, flags); |
| 661 | out: |
| 662 | put_object(object); |
| 663 | } |
| 664 | |
| 665 | /* |
| 666 | * Set the OBJECT_NO_SCAN flag for the object corresponding to the give |
| 667 | * pointer. Such object will not be scanned by kmemleak but references to it |
| 668 | * are searched. |
| 669 | */ |
| 670 | static void object_no_scan(unsigned long ptr) |
| 671 | { |
| 672 | unsigned long flags; |
| 673 | struct kmemleak_object *object; |
| 674 | |
| 675 | object = find_and_get_object(ptr, 0); |
| 676 | if (!object) { |
| 677 | kmemleak_warn("kmemleak: Not scanning unknown object at " |
| 678 | "0x%08lx\n", ptr); |
| 679 | return; |
| 680 | } |
| 681 | |
| 682 | spin_lock_irqsave(&object->lock, flags); |
| 683 | object->flags |= OBJECT_NO_SCAN; |
| 684 | spin_unlock_irqrestore(&object->lock, flags); |
| 685 | put_object(object); |
| 686 | } |
| 687 | |
| 688 | /* |
| 689 | * Log an early kmemleak_* call to the early_log buffer. These calls will be |
| 690 | * processed later once kmemleak is fully initialized. |
| 691 | */ |
| 692 | static void log_early(int op_type, const void *ptr, size_t size, |
| 693 | int min_count, unsigned long offset, size_t length) |
| 694 | { |
| 695 | unsigned long flags; |
| 696 | struct early_log *log; |
| 697 | |
| 698 | if (crt_early_log >= ARRAY_SIZE(early_log)) { |
| 699 | kmemleak_panic("kmemleak: Early log buffer exceeded\n"); |
| 700 | return; |
| 701 | } |
| 702 | |
| 703 | /* |
| 704 | * There is no need for locking since the kernel is still in UP mode |
| 705 | * at this stage. Disabling the IRQs is enough. |
| 706 | */ |
| 707 | local_irq_save(flags); |
| 708 | log = &early_log[crt_early_log]; |
| 709 | log->op_type = op_type; |
| 710 | log->ptr = ptr; |
| 711 | log->size = size; |
| 712 | log->min_count = min_count; |
| 713 | log->offset = offset; |
| 714 | log->length = length; |
| 715 | crt_early_log++; |
| 716 | local_irq_restore(flags); |
| 717 | } |
| 718 | |
| 719 | /* |
| 720 | * Memory allocation function callback. This function is called from the |
| 721 | * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, |
| 722 | * vmalloc etc.). |
| 723 | */ |
| 724 | void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) |
| 725 | { |
| 726 | pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); |
| 727 | |
| 728 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 729 | create_object((unsigned long)ptr, size, min_count, gfp); |
| 730 | else if (atomic_read(&kmemleak_early_log)) |
| 731 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0); |
| 732 | } |
| 733 | EXPORT_SYMBOL_GPL(kmemleak_alloc); |
| 734 | |
| 735 | /* |
| 736 | * Memory freeing function callback. This function is called from the kernel |
| 737 | * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.). |
| 738 | */ |
| 739 | void kmemleak_free(const void *ptr) |
| 740 | { |
| 741 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 742 | |
| 743 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 744 | delete_object((unsigned long)ptr); |
| 745 | else if (atomic_read(&kmemleak_early_log)) |
| 746 | log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); |
| 747 | } |
| 748 | EXPORT_SYMBOL_GPL(kmemleak_free); |
| 749 | |
| 750 | /* |
| 751 | * Mark an already allocated memory block as a false positive. This will cause |
| 752 | * the block to no longer be reported as leak and always be scanned. |
| 753 | */ |
| 754 | void kmemleak_not_leak(const void *ptr) |
| 755 | { |
| 756 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 757 | |
| 758 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 759 | make_gray_object((unsigned long)ptr); |
| 760 | else if (atomic_read(&kmemleak_early_log)) |
| 761 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0); |
| 762 | } |
| 763 | EXPORT_SYMBOL(kmemleak_not_leak); |
| 764 | |
| 765 | /* |
| 766 | * Ignore a memory block. This is usually done when it is known that the |
| 767 | * corresponding block is not a leak and does not contain any references to |
| 768 | * other allocated memory blocks. |
| 769 | */ |
| 770 | void kmemleak_ignore(const void *ptr) |
| 771 | { |
| 772 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 773 | |
| 774 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 775 | make_black_object((unsigned long)ptr); |
| 776 | else if (atomic_read(&kmemleak_early_log)) |
| 777 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0); |
| 778 | } |
| 779 | EXPORT_SYMBOL(kmemleak_ignore); |
| 780 | |
| 781 | /* |
| 782 | * Limit the range to be scanned in an allocated memory block. |
| 783 | */ |
| 784 | void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length, |
| 785 | gfp_t gfp) |
| 786 | { |
| 787 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 788 | |
| 789 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 790 | add_scan_area((unsigned long)ptr, offset, length, gfp); |
| 791 | else if (atomic_read(&kmemleak_early_log)) |
| 792 | log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length); |
| 793 | } |
| 794 | EXPORT_SYMBOL(kmemleak_scan_area); |
| 795 | |
| 796 | /* |
| 797 | * Inform kmemleak not to scan the given memory block. |
| 798 | */ |
| 799 | void kmemleak_no_scan(const void *ptr) |
| 800 | { |
| 801 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 802 | |
| 803 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 804 | object_no_scan((unsigned long)ptr); |
| 805 | else if (atomic_read(&kmemleak_early_log)) |
| 806 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0); |
| 807 | } |
| 808 | EXPORT_SYMBOL(kmemleak_no_scan); |
| 809 | |
| 810 | /* |
| 811 | * Yield the CPU so that other tasks get a chance to run. The yielding is |
| 812 | * rate-limited to avoid excessive number of calls to the schedule() function |
| 813 | * during memory scanning. |
| 814 | */ |
| 815 | static void scan_yield(void) |
| 816 | { |
| 817 | might_sleep(); |
| 818 | |
| 819 | if (time_is_before_eq_jiffies(next_scan_yield)) { |
| 820 | schedule(); |
| 821 | next_scan_yield = jiffies + jiffies_scan_yield; |
| 822 | } |
| 823 | } |
| 824 | |
| 825 | /* |
| 826 | * Memory scanning is a long process and it needs to be interruptable. This |
| 827 | * function checks whether such interrupt condition occured. |
| 828 | */ |
| 829 | static int scan_should_stop(void) |
| 830 | { |
| 831 | if (!atomic_read(&kmemleak_enabled)) |
| 832 | return 1; |
| 833 | |
| 834 | /* |
| 835 | * This function may be called from either process or kthread context, |
| 836 | * hence the need to check for both stop conditions. |
| 837 | */ |
| 838 | if (current->mm) |
| 839 | return signal_pending(current); |
| 840 | else |
| 841 | return kthread_should_stop(); |
| 842 | |
| 843 | return 0; |
| 844 | } |
| 845 | |
| 846 | /* |
| 847 | * Scan a memory block (exclusive range) for valid pointers and add those |
| 848 | * found to the gray list. |
| 849 | */ |
| 850 | static void scan_block(void *_start, void *_end, |
| 851 | struct kmemleak_object *scanned) |
| 852 | { |
| 853 | unsigned long *ptr; |
| 854 | unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); |
| 855 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); |
| 856 | |
| 857 | for (ptr = start; ptr < end; ptr++) { |
| 858 | unsigned long flags; |
| 859 | unsigned long pointer = *ptr; |
| 860 | struct kmemleak_object *object; |
| 861 | |
| 862 | if (scan_should_stop()) |
| 863 | break; |
| 864 | |
| 865 | /* |
| 866 | * When scanning a memory block with a corresponding |
| 867 | * kmemleak_object, the CPU yielding is handled in the calling |
| 868 | * code since it holds the object->lock to avoid the block |
| 869 | * freeing. |
| 870 | */ |
| 871 | if (!scanned) |
| 872 | scan_yield(); |
| 873 | |
| 874 | object = find_and_get_object(pointer, 1); |
| 875 | if (!object) |
| 876 | continue; |
| 877 | if (object == scanned) { |
| 878 | /* self referenced, ignore */ |
| 879 | put_object(object); |
| 880 | continue; |
| 881 | } |
| 882 | |
| 883 | /* |
| 884 | * Avoid the lockdep recursive warning on object->lock being |
| 885 | * previously acquired in scan_object(). These locks are |
| 886 | * enclosed by scan_mutex. |
| 887 | */ |
| 888 | spin_lock_irqsave_nested(&object->lock, flags, |
| 889 | SINGLE_DEPTH_NESTING); |
| 890 | if (!color_white(object)) { |
| 891 | /* non-orphan, ignored or new */ |
| 892 | spin_unlock_irqrestore(&object->lock, flags); |
| 893 | put_object(object); |
| 894 | continue; |
| 895 | } |
| 896 | |
| 897 | /* |
| 898 | * Increase the object's reference count (number of pointers |
| 899 | * to the memory block). If this count reaches the required |
| 900 | * minimum, the object's color will become gray and it will be |
| 901 | * added to the gray_list. |
| 902 | */ |
| 903 | object->count++; |
| 904 | if (color_gray(object)) |
| 905 | list_add_tail(&object->gray_list, &gray_list); |
| 906 | else |
| 907 | put_object(object); |
| 908 | spin_unlock_irqrestore(&object->lock, flags); |
| 909 | } |
| 910 | } |
| 911 | |
| 912 | /* |
| 913 | * Scan a memory block corresponding to a kmemleak_object. A condition is |
| 914 | * that object->use_count >= 1. |
| 915 | */ |
| 916 | static void scan_object(struct kmemleak_object *object) |
| 917 | { |
| 918 | struct kmemleak_scan_area *area; |
| 919 | struct hlist_node *elem; |
| 920 | unsigned long flags; |
| 921 | |
| 922 | /* |
| 923 | * Once the object->lock is aquired, the corresponding memory block |
| 924 | * cannot be freed (the same lock is aquired in delete_object). |
| 925 | */ |
| 926 | spin_lock_irqsave(&object->lock, flags); |
| 927 | if (object->flags & OBJECT_NO_SCAN) |
| 928 | goto out; |
| 929 | if (!(object->flags & OBJECT_ALLOCATED)) |
| 930 | /* already freed object */ |
| 931 | goto out; |
| 932 | if (hlist_empty(&object->area_list)) |
| 933 | scan_block((void *)object->pointer, |
| 934 | (void *)(object->pointer + object->size), object); |
| 935 | else |
| 936 | hlist_for_each_entry(area, elem, &object->area_list, node) |
| 937 | scan_block((void *)(object->pointer + area->offset), |
| 938 | (void *)(object->pointer + area->offset |
| 939 | + area->length), object); |
| 940 | out: |
| 941 | spin_unlock_irqrestore(&object->lock, flags); |
| 942 | } |
| 943 | |
| 944 | /* |
| 945 | * Scan data sections and all the referenced memory blocks allocated via the |
| 946 | * kernel's standard allocators. This function must be called with the |
| 947 | * scan_mutex held. |
| 948 | */ |
| 949 | static void kmemleak_scan(void) |
| 950 | { |
| 951 | unsigned long flags; |
| 952 | struct kmemleak_object *object, *tmp; |
| 953 | struct task_struct *task; |
| 954 | int i; |
| 955 | |
| 956 | /* prepare the kmemleak_object's */ |
| 957 | rcu_read_lock(); |
| 958 | list_for_each_entry_rcu(object, &object_list, object_list) { |
| 959 | spin_lock_irqsave(&object->lock, flags); |
| 960 | #ifdef DEBUG |
| 961 | /* |
| 962 | * With a few exceptions there should be a maximum of |
| 963 | * 1 reference to any object at this point. |
| 964 | */ |
| 965 | if (atomic_read(&object->use_count) > 1) { |
| 966 | pr_debug("kmemleak: object->use_count = %d\n", |
| 967 | atomic_read(&object->use_count)); |
| 968 | dump_object_info(object); |
| 969 | } |
| 970 | #endif |
| 971 | /* reset the reference count (whiten the object) */ |
| 972 | object->count = 0; |
| 973 | if (color_gray(object) && get_object(object)) |
| 974 | list_add_tail(&object->gray_list, &gray_list); |
| 975 | |
| 976 | spin_unlock_irqrestore(&object->lock, flags); |
| 977 | } |
| 978 | rcu_read_unlock(); |
| 979 | |
| 980 | /* data/bss scanning */ |
| 981 | scan_block(_sdata, _edata, NULL); |
| 982 | scan_block(__bss_start, __bss_stop, NULL); |
| 983 | |
| 984 | #ifdef CONFIG_SMP |
| 985 | /* per-cpu sections scanning */ |
| 986 | for_each_possible_cpu(i) |
| 987 | scan_block(__per_cpu_start + per_cpu_offset(i), |
| 988 | __per_cpu_end + per_cpu_offset(i), NULL); |
| 989 | #endif |
| 990 | |
| 991 | /* |
| 992 | * Struct page scanning for each node. The code below is not yet safe |
| 993 | * with MEMORY_HOTPLUG. |
| 994 | */ |
| 995 | for_each_online_node(i) { |
| 996 | pg_data_t *pgdat = NODE_DATA(i); |
| 997 | unsigned long start_pfn = pgdat->node_start_pfn; |
| 998 | unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; |
| 999 | unsigned long pfn; |
| 1000 | |
| 1001 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
| 1002 | struct page *page; |
| 1003 | |
| 1004 | if (!pfn_valid(pfn)) |
| 1005 | continue; |
| 1006 | page = pfn_to_page(pfn); |
| 1007 | /* only scan if page is in use */ |
| 1008 | if (page_count(page) == 0) |
| 1009 | continue; |
| 1010 | scan_block(page, page + 1, NULL); |
| 1011 | } |
| 1012 | } |
| 1013 | |
| 1014 | /* |
| 1015 | * Scanning the task stacks may introduce false negatives and it is |
| 1016 | * not enabled by default. |
| 1017 | */ |
| 1018 | if (kmemleak_stack_scan) { |
| 1019 | read_lock(&tasklist_lock); |
| 1020 | for_each_process(task) |
| 1021 | scan_block(task_stack_page(task), |
| 1022 | task_stack_page(task) + THREAD_SIZE, NULL); |
| 1023 | read_unlock(&tasklist_lock); |
| 1024 | } |
| 1025 | |
| 1026 | /* |
| 1027 | * Scan the objects already referenced from the sections scanned |
| 1028 | * above. More objects will be referenced and, if there are no memory |
| 1029 | * leaks, all the objects will be scanned. The list traversal is safe |
| 1030 | * for both tail additions and removals from inside the loop. The |
| 1031 | * kmemleak objects cannot be freed from outside the loop because their |
| 1032 | * use_count was increased. |
| 1033 | */ |
| 1034 | object = list_entry(gray_list.next, typeof(*object), gray_list); |
| 1035 | while (&object->gray_list != &gray_list) { |
| 1036 | scan_yield(); |
| 1037 | |
| 1038 | /* may add new objects to the list */ |
| 1039 | if (!scan_should_stop()) |
| 1040 | scan_object(object); |
| 1041 | |
| 1042 | tmp = list_entry(object->gray_list.next, typeof(*object), |
| 1043 | gray_list); |
| 1044 | |
| 1045 | /* remove the object from the list and release it */ |
| 1046 | list_del(&object->gray_list); |
| 1047 | put_object(object); |
| 1048 | |
| 1049 | object = tmp; |
| 1050 | } |
| 1051 | WARN_ON(!list_empty(&gray_list)); |
| 1052 | } |
| 1053 | |
| 1054 | /* |
| 1055 | * Thread function performing automatic memory scanning. Unreferenced objects |
| 1056 | * at the end of a memory scan are reported but only the first time. |
| 1057 | */ |
| 1058 | static int kmemleak_scan_thread(void *arg) |
| 1059 | { |
| 1060 | static int first_run = 1; |
| 1061 | |
| 1062 | pr_info("kmemleak: Automatic memory scanning thread started\n"); |
| 1063 | |
| 1064 | /* |
| 1065 | * Wait before the first scan to allow the system to fully initialize. |
| 1066 | */ |
| 1067 | if (first_run) { |
| 1068 | first_run = 0; |
| 1069 | ssleep(SECS_FIRST_SCAN); |
| 1070 | } |
| 1071 | |
| 1072 | while (!kthread_should_stop()) { |
| 1073 | struct kmemleak_object *object; |
| 1074 | signed long timeout = jiffies_scan_wait; |
| 1075 | |
| 1076 | mutex_lock(&scan_mutex); |
| 1077 | |
| 1078 | kmemleak_scan(); |
| 1079 | reported_leaks = 0; |
| 1080 | |
| 1081 | rcu_read_lock(); |
| 1082 | list_for_each_entry_rcu(object, &object_list, object_list) { |
| 1083 | unsigned long flags; |
| 1084 | |
| 1085 | if (reported_leaks >= REPORTS_NR) |
| 1086 | break; |
| 1087 | spin_lock_irqsave(&object->lock, flags); |
| 1088 | if (!(object->flags & OBJECT_REPORTED) && |
| 1089 | unreferenced_object(object)) { |
| 1090 | print_unreferenced(NULL, object); |
| 1091 | object->flags |= OBJECT_REPORTED; |
| 1092 | reported_leaks++; |
| 1093 | } else if ((object->flags & OBJECT_REPORTED) && |
| 1094 | referenced_object(object)) { |
| 1095 | print_referenced(object); |
| 1096 | object->flags &= ~OBJECT_REPORTED; |
| 1097 | } |
| 1098 | spin_unlock_irqrestore(&object->lock, flags); |
| 1099 | } |
| 1100 | rcu_read_unlock(); |
| 1101 | |
| 1102 | mutex_unlock(&scan_mutex); |
| 1103 | /* wait before the next scan */ |
| 1104 | while (timeout && !kthread_should_stop()) |
| 1105 | timeout = schedule_timeout_interruptible(timeout); |
| 1106 | } |
| 1107 | |
| 1108 | pr_info("kmemleak: Automatic memory scanning thread ended\n"); |
| 1109 | |
| 1110 | return 0; |
| 1111 | } |
| 1112 | |
| 1113 | /* |
| 1114 | * Start the automatic memory scanning thread. This function must be called |
| 1115 | * with the kmemleak_mutex held. |
| 1116 | */ |
| 1117 | void start_scan_thread(void) |
| 1118 | { |
| 1119 | if (scan_thread) |
| 1120 | return; |
| 1121 | scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); |
| 1122 | if (IS_ERR(scan_thread)) { |
| 1123 | pr_warning("kmemleak: Failed to create the scan thread\n"); |
| 1124 | scan_thread = NULL; |
| 1125 | } |
| 1126 | } |
| 1127 | |
| 1128 | /* |
| 1129 | * Stop the automatic memory scanning thread. This function must be called |
| 1130 | * with the kmemleak_mutex held. |
| 1131 | */ |
| 1132 | void stop_scan_thread(void) |
| 1133 | { |
| 1134 | if (scan_thread) { |
| 1135 | kthread_stop(scan_thread); |
| 1136 | scan_thread = NULL; |
| 1137 | } |
| 1138 | } |
| 1139 | |
| 1140 | /* |
| 1141 | * Iterate over the object_list and return the first valid object at or after |
| 1142 | * the required position with its use_count incremented. The function triggers |
| 1143 | * a memory scanning when the pos argument points to the first position. |
| 1144 | */ |
| 1145 | static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) |
| 1146 | { |
| 1147 | struct kmemleak_object *object; |
| 1148 | loff_t n = *pos; |
| 1149 | |
| 1150 | if (!n) { |
| 1151 | kmemleak_scan(); |
| 1152 | reported_leaks = 0; |
| 1153 | } |
| 1154 | if (reported_leaks >= REPORTS_NR) |
| 1155 | return NULL; |
| 1156 | |
| 1157 | rcu_read_lock(); |
| 1158 | list_for_each_entry_rcu(object, &object_list, object_list) { |
| 1159 | if (n-- > 0) |
| 1160 | continue; |
| 1161 | if (get_object(object)) |
| 1162 | goto out; |
| 1163 | } |
| 1164 | object = NULL; |
| 1165 | out: |
| 1166 | rcu_read_unlock(); |
| 1167 | return object; |
| 1168 | } |
| 1169 | |
| 1170 | /* |
| 1171 | * Return the next object in the object_list. The function decrements the |
| 1172 | * use_count of the previous object and increases that of the next one. |
| 1173 | */ |
| 1174 | static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 1175 | { |
| 1176 | struct kmemleak_object *prev_obj = v; |
| 1177 | struct kmemleak_object *next_obj = NULL; |
| 1178 | struct list_head *n = &prev_obj->object_list; |
| 1179 | |
| 1180 | ++(*pos); |
| 1181 | if (reported_leaks >= REPORTS_NR) |
| 1182 | goto out; |
| 1183 | |
| 1184 | rcu_read_lock(); |
| 1185 | list_for_each_continue_rcu(n, &object_list) { |
| 1186 | next_obj = list_entry(n, struct kmemleak_object, object_list); |
| 1187 | if (get_object(next_obj)) |
| 1188 | break; |
| 1189 | } |
| 1190 | rcu_read_unlock(); |
| 1191 | out: |
| 1192 | put_object(prev_obj); |
| 1193 | return next_obj; |
| 1194 | } |
| 1195 | |
| 1196 | /* |
| 1197 | * Decrement the use_count of the last object required, if any. |
| 1198 | */ |
| 1199 | static void kmemleak_seq_stop(struct seq_file *seq, void *v) |
| 1200 | { |
| 1201 | if (v) |
| 1202 | put_object(v); |
| 1203 | } |
| 1204 | |
| 1205 | /* |
| 1206 | * Print the information for an unreferenced object to the seq file. |
| 1207 | */ |
| 1208 | static int kmemleak_seq_show(struct seq_file *seq, void *v) |
| 1209 | { |
| 1210 | struct kmemleak_object *object = v; |
| 1211 | unsigned long flags; |
| 1212 | |
| 1213 | spin_lock_irqsave(&object->lock, flags); |
| 1214 | if (!unreferenced_object(object)) |
| 1215 | goto out; |
| 1216 | print_unreferenced(seq, object); |
| 1217 | reported_leaks++; |
| 1218 | out: |
| 1219 | spin_unlock_irqrestore(&object->lock, flags); |
| 1220 | return 0; |
| 1221 | } |
| 1222 | |
| 1223 | static const struct seq_operations kmemleak_seq_ops = { |
| 1224 | .start = kmemleak_seq_start, |
| 1225 | .next = kmemleak_seq_next, |
| 1226 | .stop = kmemleak_seq_stop, |
| 1227 | .show = kmemleak_seq_show, |
| 1228 | }; |
| 1229 | |
| 1230 | static int kmemleak_open(struct inode *inode, struct file *file) |
| 1231 | { |
| 1232 | int ret = 0; |
| 1233 | |
| 1234 | if (!atomic_read(&kmemleak_enabled)) |
| 1235 | return -EBUSY; |
| 1236 | |
| 1237 | ret = mutex_lock_interruptible(&kmemleak_mutex); |
| 1238 | if (ret < 0) |
| 1239 | goto out; |
| 1240 | if (file->f_mode & FMODE_READ) { |
| 1241 | ret = mutex_lock_interruptible(&scan_mutex); |
| 1242 | if (ret < 0) |
| 1243 | goto kmemleak_unlock; |
| 1244 | ret = seq_open(file, &kmemleak_seq_ops); |
| 1245 | if (ret < 0) |
| 1246 | goto scan_unlock; |
| 1247 | } |
| 1248 | return ret; |
| 1249 | |
| 1250 | scan_unlock: |
| 1251 | mutex_unlock(&scan_mutex); |
| 1252 | kmemleak_unlock: |
| 1253 | mutex_unlock(&kmemleak_mutex); |
| 1254 | out: |
| 1255 | return ret; |
| 1256 | } |
| 1257 | |
| 1258 | static int kmemleak_release(struct inode *inode, struct file *file) |
| 1259 | { |
| 1260 | int ret = 0; |
| 1261 | |
| 1262 | if (file->f_mode & FMODE_READ) { |
| 1263 | seq_release(inode, file); |
| 1264 | mutex_unlock(&scan_mutex); |
| 1265 | } |
| 1266 | mutex_unlock(&kmemleak_mutex); |
| 1267 | |
| 1268 | return ret; |
| 1269 | } |
| 1270 | |
| 1271 | /* |
| 1272 | * File write operation to configure kmemleak at run-time. The following |
| 1273 | * commands can be written to the /sys/kernel/debug/kmemleak file: |
| 1274 | * off - disable kmemleak (irreversible) |
| 1275 | * stack=on - enable the task stacks scanning |
| 1276 | * stack=off - disable the tasks stacks scanning |
| 1277 | * scan=on - start the automatic memory scanning thread |
| 1278 | * scan=off - stop the automatic memory scanning thread |
| 1279 | * scan=... - set the automatic memory scanning period in seconds (0 to |
| 1280 | * disable it) |
| 1281 | */ |
| 1282 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, |
| 1283 | size_t size, loff_t *ppos) |
| 1284 | { |
| 1285 | char buf[64]; |
| 1286 | int buf_size; |
| 1287 | |
| 1288 | if (!atomic_read(&kmemleak_enabled)) |
| 1289 | return -EBUSY; |
| 1290 | |
| 1291 | buf_size = min(size, (sizeof(buf) - 1)); |
| 1292 | if (strncpy_from_user(buf, user_buf, buf_size) < 0) |
| 1293 | return -EFAULT; |
| 1294 | buf[buf_size] = 0; |
| 1295 | |
| 1296 | if (strncmp(buf, "off", 3) == 0) |
| 1297 | kmemleak_disable(); |
| 1298 | else if (strncmp(buf, "stack=on", 8) == 0) |
| 1299 | kmemleak_stack_scan = 1; |
| 1300 | else if (strncmp(buf, "stack=off", 9) == 0) |
| 1301 | kmemleak_stack_scan = 0; |
| 1302 | else if (strncmp(buf, "scan=on", 7) == 0) |
| 1303 | start_scan_thread(); |
| 1304 | else if (strncmp(buf, "scan=off", 8) == 0) |
| 1305 | stop_scan_thread(); |
| 1306 | else if (strncmp(buf, "scan=", 5) == 0) { |
| 1307 | unsigned long secs; |
| 1308 | int err; |
| 1309 | |
| 1310 | err = strict_strtoul(buf + 5, 0, &secs); |
| 1311 | if (err < 0) |
| 1312 | return err; |
| 1313 | stop_scan_thread(); |
| 1314 | if (secs) { |
| 1315 | jiffies_scan_wait = msecs_to_jiffies(secs * 1000); |
| 1316 | start_scan_thread(); |
| 1317 | } |
| 1318 | } else |
| 1319 | return -EINVAL; |
| 1320 | |
| 1321 | /* ignore the rest of the buffer, only one command at a time */ |
| 1322 | *ppos += size; |
| 1323 | return size; |
| 1324 | } |
| 1325 | |
| 1326 | static const struct file_operations kmemleak_fops = { |
| 1327 | .owner = THIS_MODULE, |
| 1328 | .open = kmemleak_open, |
| 1329 | .read = seq_read, |
| 1330 | .write = kmemleak_write, |
| 1331 | .llseek = seq_lseek, |
| 1332 | .release = kmemleak_release, |
| 1333 | }; |
| 1334 | |
| 1335 | /* |
| 1336 | * Perform the freeing of the kmemleak internal objects after waiting for any |
| 1337 | * current memory scan to complete. |
| 1338 | */ |
| 1339 | static int kmemleak_cleanup_thread(void *arg) |
| 1340 | { |
| 1341 | struct kmemleak_object *object; |
| 1342 | |
| 1343 | mutex_lock(&kmemleak_mutex); |
| 1344 | stop_scan_thread(); |
| 1345 | mutex_unlock(&kmemleak_mutex); |
| 1346 | |
| 1347 | mutex_lock(&scan_mutex); |
| 1348 | rcu_read_lock(); |
| 1349 | list_for_each_entry_rcu(object, &object_list, object_list) |
| 1350 | delete_object(object->pointer); |
| 1351 | rcu_read_unlock(); |
| 1352 | mutex_unlock(&scan_mutex); |
| 1353 | |
| 1354 | return 0; |
| 1355 | } |
| 1356 | |
| 1357 | /* |
| 1358 | * Start the clean-up thread. |
| 1359 | */ |
| 1360 | static void kmemleak_cleanup(void) |
| 1361 | { |
| 1362 | struct task_struct *cleanup_thread; |
| 1363 | |
| 1364 | cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL, |
| 1365 | "kmemleak-clean"); |
| 1366 | if (IS_ERR(cleanup_thread)) |
| 1367 | pr_warning("kmemleak: Failed to create the clean-up thread\n"); |
| 1368 | } |
| 1369 | |
| 1370 | /* |
| 1371 | * Disable kmemleak. No memory allocation/freeing will be traced once this |
| 1372 | * function is called. Disabling kmemleak is an irreversible operation. |
| 1373 | */ |
| 1374 | static void kmemleak_disable(void) |
| 1375 | { |
| 1376 | /* atomically check whether it was already invoked */ |
| 1377 | if (atomic_cmpxchg(&kmemleak_error, 0, 1)) |
| 1378 | return; |
| 1379 | |
| 1380 | /* stop any memory operation tracing */ |
| 1381 | atomic_set(&kmemleak_early_log, 0); |
| 1382 | atomic_set(&kmemleak_enabled, 0); |
| 1383 | |
| 1384 | /* check whether it is too early for a kernel thread */ |
| 1385 | if (atomic_read(&kmemleak_initialized)) |
| 1386 | kmemleak_cleanup(); |
| 1387 | |
| 1388 | pr_info("Kernel memory leak detector disabled\n"); |
| 1389 | } |
| 1390 | |
| 1391 | /* |
| 1392 | * Allow boot-time kmemleak disabling (enabled by default). |
| 1393 | */ |
| 1394 | static int kmemleak_boot_config(char *str) |
| 1395 | { |
| 1396 | if (!str) |
| 1397 | return -EINVAL; |
| 1398 | if (strcmp(str, "off") == 0) |
| 1399 | kmemleak_disable(); |
| 1400 | else if (strcmp(str, "on") != 0) |
| 1401 | return -EINVAL; |
| 1402 | return 0; |
| 1403 | } |
| 1404 | early_param("kmemleak", kmemleak_boot_config); |
| 1405 | |
| 1406 | /* |
| 1407 | * Kkmemleak initialization. |
| 1408 | */ |
| 1409 | void __init kmemleak_init(void) |
| 1410 | { |
| 1411 | int i; |
| 1412 | unsigned long flags; |
| 1413 | |
| 1414 | jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD); |
| 1415 | jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); |
| 1416 | jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); |
| 1417 | |
| 1418 | object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); |
| 1419 | scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); |
| 1420 | INIT_PRIO_TREE_ROOT(&object_tree_root); |
| 1421 | |
| 1422 | /* the kernel is still in UP mode, so disabling the IRQs is enough */ |
| 1423 | local_irq_save(flags); |
| 1424 | if (!atomic_read(&kmemleak_error)) { |
| 1425 | atomic_set(&kmemleak_enabled, 1); |
| 1426 | atomic_set(&kmemleak_early_log, 0); |
| 1427 | } |
| 1428 | local_irq_restore(flags); |
| 1429 | |
| 1430 | /* |
| 1431 | * This is the point where tracking allocations is safe. Automatic |
| 1432 | * scanning is started during the late initcall. Add the early logged |
| 1433 | * callbacks to the kmemleak infrastructure. |
| 1434 | */ |
| 1435 | for (i = 0; i < crt_early_log; i++) { |
| 1436 | struct early_log *log = &early_log[i]; |
| 1437 | |
| 1438 | switch (log->op_type) { |
| 1439 | case KMEMLEAK_ALLOC: |
| 1440 | kmemleak_alloc(log->ptr, log->size, log->min_count, |
| 1441 | GFP_KERNEL); |
| 1442 | break; |
| 1443 | case KMEMLEAK_FREE: |
| 1444 | kmemleak_free(log->ptr); |
| 1445 | break; |
| 1446 | case KMEMLEAK_NOT_LEAK: |
| 1447 | kmemleak_not_leak(log->ptr); |
| 1448 | break; |
| 1449 | case KMEMLEAK_IGNORE: |
| 1450 | kmemleak_ignore(log->ptr); |
| 1451 | break; |
| 1452 | case KMEMLEAK_SCAN_AREA: |
| 1453 | kmemleak_scan_area(log->ptr, log->offset, log->length, |
| 1454 | GFP_KERNEL); |
| 1455 | break; |
| 1456 | case KMEMLEAK_NO_SCAN: |
| 1457 | kmemleak_no_scan(log->ptr); |
| 1458 | break; |
| 1459 | default: |
| 1460 | WARN_ON(1); |
| 1461 | } |
| 1462 | } |
| 1463 | } |
| 1464 | |
| 1465 | /* |
| 1466 | * Late initialization function. |
| 1467 | */ |
| 1468 | static int __init kmemleak_late_init(void) |
| 1469 | { |
| 1470 | struct dentry *dentry; |
| 1471 | |
| 1472 | atomic_set(&kmemleak_initialized, 1); |
| 1473 | |
| 1474 | if (atomic_read(&kmemleak_error)) { |
| 1475 | /* |
| 1476 | * Some error occured and kmemleak was disabled. There is a |
| 1477 | * small chance that kmemleak_disable() was called immediately |
| 1478 | * after setting kmemleak_initialized and we may end up with |
| 1479 | * two clean-up threads but serialized by scan_mutex. |
| 1480 | */ |
| 1481 | kmemleak_cleanup(); |
| 1482 | return -ENOMEM; |
| 1483 | } |
| 1484 | |
| 1485 | dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, |
| 1486 | &kmemleak_fops); |
| 1487 | if (!dentry) |
| 1488 | pr_warning("kmemleak: Failed to create the debugfs kmemleak " |
| 1489 | "file\n"); |
| 1490 | mutex_lock(&kmemleak_mutex); |
| 1491 | start_scan_thread(); |
| 1492 | mutex_unlock(&kmemleak_mutex); |
| 1493 | |
| 1494 | pr_info("Kernel memory leak detector initialized\n"); |
| 1495 | |
| 1496 | return 0; |
| 1497 | } |
| 1498 | late_initcall(kmemleak_late_init); |