blob: d67c97bb10256d5dc5a9b74b3b8aaa37022f96b1 [file] [log] [blame]
David Howells5d135442009-09-02 09:14:00 +01001/* Key garbage collector
2 *
David Howells0c061b52011-08-22 14:09:36 +01003 * Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved.
David Howells5d135442009-09-02 09:14:00 +01004 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
David Howells8bc16de2011-08-22 14:09:11 +010013#include <linux/slab.h>
14#include <linux/security.h>
David Howells5d135442009-09-02 09:14:00 +010015#include <keys/keyring-type.h>
16#include "internal.h"
17
18/*
19 * Delay between key revocation/expiry in seconds
20 */
21unsigned key_gc_delay = 5 * 60;
22
23/*
David Howells8bc16de2011-08-22 14:09:11 +010024 * Reaper for unused keys.
25 */
David Howells0c061b52011-08-22 14:09:36 +010026static void key_garbage_collector(struct work_struct *work);
27DECLARE_WORK(key_gc_work, key_garbage_collector);
David Howells8bc16de2011-08-22 14:09:11 +010028
29/*
30 * Reaper for links from keyrings to dead keys.
David Howells5d135442009-09-02 09:14:00 +010031 */
32static void key_gc_timer_func(unsigned long);
David Howells5d135442009-09-02 09:14:00 +010033static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
David Howells0c061b52011-08-22 14:09:36 +010034
David Howells5d135442009-09-02 09:14:00 +010035static time_t key_gc_next_run = LONG_MAX;
David Howells0c061b52011-08-22 14:09:36 +010036static struct key_type *key_gc_dead_keytype;
37
38static unsigned long key_gc_flags;
39#define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */
40#define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */
41#define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */
42
43
44/*
45 * Any key whose type gets unregistered will be re-typed to this if it can't be
46 * immediately unlinked.
47 */
48struct key_type key_type_dead = {
49 .name = "dead",
50};
David Howells5d135442009-09-02 09:14:00 +010051
52/*
David Howells973c9f42011-01-20 16:38:33 +000053 * Schedule a garbage collection run.
54 * - time precision isn't particularly important
David Howells5d135442009-09-02 09:14:00 +010055 */
56void key_schedule_gc(time_t gc_at)
57{
58 unsigned long expires;
59 time_t now = current_kernel_time().tv_sec;
60
61 kenter("%ld", gc_at - now);
62
David Howells0c061b52011-08-22 14:09:36 +010063 if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
64 kdebug("IMMEDIATE");
Tejun Heo3b07e9c2012-08-20 14:51:24 -070065 schedule_work(&key_gc_work);
David Howells5d135442009-09-02 09:14:00 +010066 } else if (gc_at < key_gc_next_run) {
David Howells0c061b52011-08-22 14:09:36 +010067 kdebug("DEFERRED");
68 key_gc_next_run = gc_at;
David Howells5d135442009-09-02 09:14:00 +010069 expires = jiffies + (gc_at - now) * HZ;
70 mod_timer(&key_gc_timer, expires);
71 }
72}
73
74/*
David Howellsfd758152012-05-11 10:56:56 +010075 * Schedule a dead links collection run.
76 */
77void key_schedule_gc_links(void)
78{
79 set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
Tejun Heo3b07e9c2012-08-20 14:51:24 -070080 schedule_work(&key_gc_work);
David Howellsfd758152012-05-11 10:56:56 +010081}
82
83/*
David Howells0c061b52011-08-22 14:09:36 +010084 * Some key's cleanup time was met after it expired, so we need to get the
85 * reaper to go through a cycle finding expired keys.
David Howells5d135442009-09-02 09:14:00 +010086 */
87static void key_gc_timer_func(unsigned long data)
88{
89 kenter("");
90 key_gc_next_run = LONG_MAX;
David Howellsfd758152012-05-11 10:56:56 +010091 key_schedule_gc_links();
David Howells5d135442009-09-02 09:14:00 +010092}
93
94/*
David Howells0c061b52011-08-22 14:09:36 +010095 * wait_on_bit() sleep function for uninterruptible waiting
96 */
97static int key_gc_wait_bit(void *flags)
98{
99 schedule();
100 return 0;
101}
102
103/*
104 * Reap keys of dead type.
105 *
106 * We use three flags to make sure we see three complete cycles of the garbage
107 * collector: the first to mark keys of that type as being dead, the second to
108 * collect dead links and the third to clean up the dead keys. We have to be
109 * careful as there may already be a cycle in progress.
110 *
111 * The caller must be holding key_types_sem.
112 */
113void key_gc_keytype(struct key_type *ktype)
114{
115 kenter("%s", ktype->name);
116
117 key_gc_dead_keytype = ktype;
118 set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
119 smp_mb();
120 set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
121
122 kdebug("schedule");
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700123 schedule_work(&key_gc_work);
David Howells0c061b52011-08-22 14:09:36 +0100124
125 kdebug("sleep");
126 wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
127 TASK_UNINTERRUPTIBLE);
128
129 key_gc_dead_keytype = NULL;
130 kleave("");
131}
132
133/*
David Howells973c9f42011-01-20 16:38:33 +0000134 * Garbage collect pointers from a keyring.
135 *
David Howells0c061b52011-08-22 14:09:36 +0100136 * Not called with any locks held. The keyring's key struct will not be
137 * deallocated under us as only our caller may deallocate it.
David Howells5d135442009-09-02 09:14:00 +0100138 */
David Howells0c061b52011-08-22 14:09:36 +0100139static void key_gc_keyring(struct key *keyring, time_t limit)
David Howells5d135442009-09-02 09:14:00 +0100140{
141 struct keyring_list *klist;
David Howells5d135442009-09-02 09:14:00 +0100142 int loop;
143
144 kenter("%x", key_serial(keyring));
145
David Howellsfd758152012-05-11 10:56:56 +0100146 if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
147 (1 << KEY_FLAG_REVOKED)))
David Howells5d135442009-09-02 09:14:00 +0100148 goto dont_gc;
149
150 /* scan the keyring looking for dead keys */
David Howellscf8304e2010-05-04 14:16:10 +0100151 rcu_read_lock();
152 klist = rcu_dereference(keyring->payload.subscriptions);
David Howells5d135442009-09-02 09:14:00 +0100153 if (!klist)
David Howellscf8304e2010-05-04 14:16:10 +0100154 goto unlock_dont_gc;
David Howells5d135442009-09-02 09:14:00 +0100155
David Howellsefde8b62012-01-17 20:39:40 +0000156 loop = klist->nkeys;
157 smp_rmb();
158 for (loop--; loop >= 0; loop--) {
David Howellsfd758152012-05-11 10:56:56 +0100159 struct key *key = rcu_dereference(klist->keys[loop]);
160 if (key_is_dead(key, limit))
David Howells5d135442009-09-02 09:14:00 +0100161 goto do_gc;
162 }
163
David Howellscf8304e2010-05-04 14:16:10 +0100164unlock_dont_gc:
165 rcu_read_unlock();
David Howells5d135442009-09-02 09:14:00 +0100166dont_gc:
David Howells0c061b52011-08-22 14:09:36 +0100167 kleave(" [no gc]");
168 return;
David Howells5d135442009-09-02 09:14:00 +0100169
170do_gc:
David Howellscf8304e2010-05-04 14:16:10 +0100171 rcu_read_unlock();
David Howells0c061b52011-08-22 14:09:36 +0100172
David Howells5d135442009-09-02 09:14:00 +0100173 keyring_gc(keyring, limit);
David Howells0c061b52011-08-22 14:09:36 +0100174 kleave(" [gc]");
David Howells5d135442009-09-02 09:14:00 +0100175}
176
177/*
David Howells65d87fe2012-05-11 10:56:56 +0100178 * Garbage collect a list of unreferenced, detached keys
David Howells5d135442009-09-02 09:14:00 +0100179 */
David Howells65d87fe2012-05-11 10:56:56 +0100180static noinline void key_gc_unused_keys(struct list_head *keys)
David Howells5d135442009-09-02 09:14:00 +0100181{
David Howells65d87fe2012-05-11 10:56:56 +0100182 while (!list_empty(keys)) {
183 struct key *key =
184 list_entry(keys->next, struct key, graveyard_link);
185 list_del(&key->graveyard_link);
David Howells8bc16de2011-08-22 14:09:11 +0100186
David Howells65d87fe2012-05-11 10:56:56 +0100187 kdebug("- %u", key->serial);
188 key_check(key);
David Howells8bc16de2011-08-22 14:09:11 +0100189
David Howells65d87fe2012-05-11 10:56:56 +0100190 security_key_free(key);
David Howells8bc16de2011-08-22 14:09:11 +0100191
David Howells65d87fe2012-05-11 10:56:56 +0100192 /* deal with the user's key tracking and quota */
193 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
194 spin_lock(&key->user->lock);
195 key->user->qnkeys--;
196 key->user->qnbytes -= key->quotalen;
197 spin_unlock(&key->user->lock);
198 }
David Howells8bc16de2011-08-22 14:09:11 +0100199
David Howells65d87fe2012-05-11 10:56:56 +0100200 atomic_dec(&key->user->nkeys);
201 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
202 atomic_dec(&key->user->nikeys);
David Howells8bc16de2011-08-22 14:09:11 +0100203
David Howells65d87fe2012-05-11 10:56:56 +0100204 key_user_put(key->user);
David Howells8bc16de2011-08-22 14:09:11 +0100205
David Howells65d87fe2012-05-11 10:56:56 +0100206 /* now throw away the key memory */
207 if (key->type->destroy)
208 key->type->destroy(key);
209
210 kfree(key->description);
David Howells8bc16de2011-08-22 14:09:11 +0100211
212#ifdef KEY_DEBUGGING
David Howells65d87fe2012-05-11 10:56:56 +0100213 key->magic = KEY_DEBUG_MAGIC_X;
David Howells8bc16de2011-08-22 14:09:11 +0100214#endif
David Howells65d87fe2012-05-11 10:56:56 +0100215 kmem_cache_free(key_jar, key);
216 }
David Howells0c061b52011-08-22 14:09:36 +0100217}
David Howells8bc16de2011-08-22 14:09:11 +0100218
David Howells0c061b52011-08-22 14:09:36 +0100219/*
220 * Garbage collector for unused keys.
221 *
222 * This is done in process context so that we don't have to disable interrupts
223 * all over the place. key_put() schedules this rather than trying to do the
224 * cleanup itself, which means key_put() doesn't have to sleep.
225 */
226static void key_garbage_collector(struct work_struct *work)
227{
David Howells65d87fe2012-05-11 10:56:56 +0100228 static LIST_HEAD(graveyard);
David Howells0c061b52011-08-22 14:09:36 +0100229 static u8 gc_state; /* Internal persistent state */
230#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
231#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
232#define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */
233#define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
234#define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
235#define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
236#define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */
237
238 struct rb_node *cursor;
239 struct key *key;
240 time_t new_timer, limit;
241
242 kenter("[%lx,%x]", key_gc_flags, gc_state);
243
244 limit = current_kernel_time().tv_sec;
245 if (limit > key_gc_delay)
246 limit -= key_gc_delay;
247 else
248 limit = key_gc_delay;
249
250 /* Work out what we're going to be doing in this pass */
251 gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
252 gc_state <<= 1;
253 if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
254 gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
255
256 if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
257 gc_state |= KEY_GC_REAPING_DEAD_1;
258 kdebug("new pass %x", gc_state);
259
260 new_timer = LONG_MAX;
261
262 /* As only this function is permitted to remove things from the key
263 * serial tree, if cursor is non-NULL then it will always point to a
264 * valid node in the tree - even if lock got dropped.
265 */
266 spin_lock(&key_serial_lock);
267 cursor = rb_first(&key_serial_tree);
268
269continue_scanning:
270 while (cursor) {
271 key = rb_entry(cursor, struct key, serial_node);
272 cursor = rb_next(cursor);
273
274 if (atomic_read(&key->usage) == 0)
275 goto found_unreferenced_key;
276
277 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
278 if (key->type == key_gc_dead_keytype) {
279 gc_state |= KEY_GC_FOUND_DEAD_KEY;
280 set_bit(KEY_FLAG_DEAD, &key->flags);
281 key->perm = 0;
282 goto skip_dead_key;
283 }
284 }
285
286 if (gc_state & KEY_GC_SET_TIMER) {
287 if (key->expiry > limit && key->expiry < new_timer) {
288 kdebug("will expire %x in %ld",
289 key_serial(key), key->expiry - limit);
290 new_timer = key->expiry;
291 }
292 }
293
294 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2))
295 if (key->type == key_gc_dead_keytype)
296 gc_state |= KEY_GC_FOUND_DEAD_KEY;
297
298 if ((gc_state & KEY_GC_REAPING_LINKS) ||
299 unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
300 if (key->type == &key_type_keyring)
301 goto found_keyring;
302 }
303
304 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3))
305 if (key->type == key_gc_dead_keytype)
306 goto destroy_dead_key;
307
308 skip_dead_key:
309 if (spin_is_contended(&key_serial_lock) || need_resched())
310 goto contended;
311 }
312
313contended:
314 spin_unlock(&key_serial_lock);
315
316maybe_resched:
317 if (cursor) {
318 cond_resched();
319 spin_lock(&key_serial_lock);
320 goto continue_scanning;
321 }
322
323 /* We've completed the pass. Set the timer if we need to and queue a
324 * new cycle if necessary. We keep executing cycles until we find one
325 * where we didn't reap any keys.
326 */
327 kdebug("pass complete");
328
329 if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
330 new_timer += key_gc_delay;
331 key_schedule_gc(new_timer);
332 }
333
David Howells65d87fe2012-05-11 10:56:56 +0100334 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2) ||
335 !list_empty(&graveyard)) {
336 /* Make sure that all pending keyring payload destructions are
337 * fulfilled and that people aren't now looking at dead or
338 * dying keys that they don't have a reference upon or a link
339 * to.
David Howells0c061b52011-08-22 14:09:36 +0100340 */
David Howells65d87fe2012-05-11 10:56:56 +0100341 kdebug("gc sync");
David Howells0c061b52011-08-22 14:09:36 +0100342 synchronize_rcu();
343 }
344
David Howells65d87fe2012-05-11 10:56:56 +0100345 if (!list_empty(&graveyard)) {
346 kdebug("gc keys");
347 key_gc_unused_keys(&graveyard);
348 }
349
David Howells0c061b52011-08-22 14:09:36 +0100350 if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
351 KEY_GC_REAPING_DEAD_2))) {
352 if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
353 /* No remaining dead keys: short circuit the remaining
354 * keytype reap cycles.
355 */
356 kdebug("dead short");
357 gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2);
358 gc_state |= KEY_GC_REAPING_DEAD_3;
359 } else {
360 gc_state |= KEY_GC_REAP_AGAIN;
361 }
362 }
363
364 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) {
365 kdebug("dead wake");
366 smp_mb();
367 clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
368 wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE);
369 }
370
371 if (gc_state & KEY_GC_REAP_AGAIN)
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700372 schedule_work(&key_gc_work);
David Howells0c061b52011-08-22 14:09:36 +0100373 kleave(" [end %x]", gc_state);
374 return;
375
376 /* We found an unreferenced key - once we've removed it from the tree,
377 * we can safely drop the lock.
378 */
379found_unreferenced_key:
380 kdebug("unrefd key %d", key->serial);
381 rb_erase(&key->serial_node, &key_serial_tree);
382 spin_unlock(&key_serial_lock);
383
David Howells65d87fe2012-05-11 10:56:56 +0100384 list_add_tail(&key->graveyard_link, &graveyard);
David Howells0c061b52011-08-22 14:09:36 +0100385 gc_state |= KEY_GC_REAP_AGAIN;
386 goto maybe_resched;
387
388 /* We found a keyring and we need to check the payload for links to
389 * dead or expired keys. We don't flag another reap immediately as we
390 * have to wait for the old payload to be destroyed by RCU before we
391 * can reap the keys to which it refers.
392 */
393found_keyring:
394 spin_unlock(&key_serial_lock);
395 kdebug("scan keyring %d", key->serial);
396 key_gc_keyring(key, limit);
397 goto maybe_resched;
398
399 /* We found a dead key that is still referenced. Reset its type and
400 * destroy its payload with its semaphore held.
401 */
402destroy_dead_key:
403 spin_unlock(&key_serial_lock);
404 kdebug("destroy key %d", key->serial);
405 down_write(&key->sem);
406 key->type = &key_type_dead;
407 if (key_gc_dead_keytype->destroy)
408 key_gc_dead_keytype->destroy(key);
409 memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
410 up_write(&key->sem);
411 goto maybe_resched;
David Howells8bc16de2011-08-22 14:09:11 +0100412}