blob: b23db3fbb32dd932d76e0a9c59620b7892215b7e [file] [log] [blame]
David Howells5d135442009-09-02 09:14:00 +01001/* Key garbage collector
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
David Howells8bc16de2011-08-22 14:09:11 +010013#include <linux/slab.h>
14#include <linux/security.h>
David Howells5d135442009-09-02 09:14:00 +010015#include <keys/keyring-type.h>
16#include "internal.h"
17
18/*
19 * Delay between key revocation/expiry in seconds
20 */
21unsigned key_gc_delay = 5 * 60;
22
23/*
David Howells8bc16de2011-08-22 14:09:11 +010024 * Reaper for unused keys.
25 */
26static void key_gc_unused_keys(struct work_struct *work);
27DECLARE_WORK(key_gc_unused_work, key_gc_unused_keys);
28
29/*
30 * Reaper for links from keyrings to dead keys.
David Howells5d135442009-09-02 09:14:00 +010031 */
32static void key_gc_timer_func(unsigned long);
David Howells8bc16de2011-08-22 14:09:11 +010033static void key_gc_dead_links(struct work_struct *);
David Howells5d135442009-09-02 09:14:00 +010034static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
David Howells8bc16de2011-08-22 14:09:11 +010035static DECLARE_WORK(key_gc_work, key_gc_dead_links);
David Howells5d135442009-09-02 09:14:00 +010036static key_serial_t key_gc_cursor; /* the last key the gc considered */
David Howellsc08ef802009-09-14 17:26:13 +010037static bool key_gc_again;
David Howells5d135442009-09-02 09:14:00 +010038static unsigned long key_gc_executing;
39static time_t key_gc_next_run = LONG_MAX;
David Howellsc08ef802009-09-14 17:26:13 +010040static time_t key_gc_new_timer;
David Howells5d135442009-09-02 09:14:00 +010041
42/*
David Howells973c9f42011-01-20 16:38:33 +000043 * Schedule a garbage collection run.
44 * - time precision isn't particularly important
David Howells5d135442009-09-02 09:14:00 +010045 */
46void key_schedule_gc(time_t gc_at)
47{
48 unsigned long expires;
49 time_t now = current_kernel_time().tv_sec;
50
51 kenter("%ld", gc_at - now);
52
David Howellsc08ef802009-09-14 17:26:13 +010053 if (gc_at <= now) {
David Howells5d135442009-09-02 09:14:00 +010054 schedule_work(&key_gc_work);
55 } else if (gc_at < key_gc_next_run) {
56 expires = jiffies + (gc_at - now) * HZ;
57 mod_timer(&key_gc_timer, expires);
58 }
59}
60
61/*
62 * The garbage collector timer kicked off
63 */
64static void key_gc_timer_func(unsigned long data)
65{
66 kenter("");
67 key_gc_next_run = LONG_MAX;
68 schedule_work(&key_gc_work);
69}
70
71/*
David Howells973c9f42011-01-20 16:38:33 +000072 * Garbage collect pointers from a keyring.
73 *
74 * Return true if we altered the keyring.
David Howells5d135442009-09-02 09:14:00 +010075 */
76static bool key_gc_keyring(struct key *keyring, time_t limit)
David Howellsee18d642009-09-02 09:14:21 +010077 __releases(key_serial_lock)
David Howells5d135442009-09-02 09:14:00 +010078{
79 struct keyring_list *klist;
80 struct key *key;
81 int loop;
82
83 kenter("%x", key_serial(keyring));
84
85 if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
86 goto dont_gc;
87
88 /* scan the keyring looking for dead keys */
David Howellscf8304e2010-05-04 14:16:10 +010089 rcu_read_lock();
90 klist = rcu_dereference(keyring->payload.subscriptions);
David Howells5d135442009-09-02 09:14:00 +010091 if (!klist)
David Howellscf8304e2010-05-04 14:16:10 +010092 goto unlock_dont_gc;
David Howells5d135442009-09-02 09:14:00 +010093
94 for (loop = klist->nkeys - 1; loop >= 0; loop--) {
95 key = klist->keys[loop];
96 if (test_bit(KEY_FLAG_DEAD, &key->flags) ||
97 (key->expiry > 0 && key->expiry <= limit))
98 goto do_gc;
99 }
100
David Howellscf8304e2010-05-04 14:16:10 +0100101unlock_dont_gc:
102 rcu_read_unlock();
David Howells5d135442009-09-02 09:14:00 +0100103dont_gc:
104 kleave(" = false");
105 return false;
106
107do_gc:
David Howellscf8304e2010-05-04 14:16:10 +0100108 rcu_read_unlock();
David Howells5d135442009-09-02 09:14:00 +0100109 key_gc_cursor = keyring->serial;
110 key_get(keyring);
111 spin_unlock(&key_serial_lock);
112 keyring_gc(keyring, limit);
113 key_put(keyring);
114 kleave(" = true");
115 return true;
116}
117
118/*
David Howells8bc16de2011-08-22 14:09:11 +0100119 * Garbage collector for links to dead keys.
120 *
121 * This involves scanning the keyrings for dead, expired and revoked keys that
122 * have overstayed their welcome
David Howells5d135442009-09-02 09:14:00 +0100123 */
David Howells8bc16de2011-08-22 14:09:11 +0100124static void key_gc_dead_links(struct work_struct *work)
David Howells5d135442009-09-02 09:14:00 +0100125{
126 struct rb_node *rb;
127 key_serial_t cursor;
128 struct key *key, *xkey;
David Howellsc08ef802009-09-14 17:26:13 +0100129 time_t new_timer = LONG_MAX, limit, now;
David Howells5d135442009-09-02 09:14:00 +0100130
David Howellsc08ef802009-09-14 17:26:13 +0100131 now = current_kernel_time().tv_sec;
132 kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now);
David Howells5d135442009-09-02 09:14:00 +0100133
134 if (test_and_set_bit(0, &key_gc_executing)) {
David Howellsc08ef802009-09-14 17:26:13 +0100135 key_schedule_gc(current_kernel_time().tv_sec + 1);
136 kleave(" [busy; deferring]");
David Howells5d135442009-09-02 09:14:00 +0100137 return;
138 }
139
David Howellsc08ef802009-09-14 17:26:13 +0100140 limit = now;
David Howells5d135442009-09-02 09:14:00 +0100141 if (limit > key_gc_delay)
142 limit -= key_gc_delay;
143 else
144 limit = key_gc_delay;
145
146 spin_lock(&key_serial_lock);
147
David Howellsc08ef802009-09-14 17:26:13 +0100148 if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) {
149 spin_unlock(&key_serial_lock);
150 clear_bit(0, &key_gc_executing);
151 return;
152 }
David Howells5d135442009-09-02 09:14:00 +0100153
154 cursor = key_gc_cursor;
155 if (cursor < 0)
156 cursor = 0;
David Howellsc08ef802009-09-14 17:26:13 +0100157 if (cursor > 0)
158 new_timer = key_gc_new_timer;
159 else
160 key_gc_again = false;
David Howells5d135442009-09-02 09:14:00 +0100161
162 /* find the first key above the cursor */
163 key = NULL;
164 rb = key_serial_tree.rb_node;
165 while (rb) {
166 xkey = rb_entry(rb, struct key, serial_node);
167 if (cursor < xkey->serial) {
168 key = xkey;
169 rb = rb->rb_left;
170 } else if (cursor > xkey->serial) {
171 rb = rb->rb_right;
172 } else {
173 rb = rb_next(rb);
174 if (!rb)
175 goto reached_the_end;
176 key = rb_entry(rb, struct key, serial_node);
177 break;
178 }
179 }
180
181 if (!key)
182 goto reached_the_end;
183
184 /* trawl through the keys looking for keyrings */
185 for (;;) {
David Howells606531c2009-09-16 15:54:14 +0100186 if (key->expiry > limit && key->expiry < new_timer) {
David Howellsc08ef802009-09-14 17:26:13 +0100187 kdebug("will expire %x in %ld",
David Howells606531c2009-09-16 15:54:14 +0100188 key_serial(key), key->expiry - limit);
David Howells5d135442009-09-02 09:14:00 +0100189 new_timer = key->expiry;
David Howellsc08ef802009-09-14 17:26:13 +0100190 }
David Howells5d135442009-09-02 09:14:00 +0100191
192 if (key->type == &key_type_keyring &&
David Howellsc08ef802009-09-14 17:26:13 +0100193 key_gc_keyring(key, limit))
194 /* the gc had to release our lock so that the keyring
195 * could be modified, so we have to get it again */
196 goto gc_released_our_lock;
David Howells5d135442009-09-02 09:14:00 +0100197
198 rb = rb_next(&key->serial_node);
David Howellsc08ef802009-09-14 17:26:13 +0100199 if (!rb)
200 goto reached_the_end;
David Howells5d135442009-09-02 09:14:00 +0100201 key = rb_entry(rb, struct key, serial_node);
202 }
203
David Howellsc08ef802009-09-14 17:26:13 +0100204gc_released_our_lock:
205 kdebug("gc_released_our_lock");
206 key_gc_new_timer = new_timer;
207 key_gc_again = true;
David Howells5d135442009-09-02 09:14:00 +0100208 clear_bit(0, &key_gc_executing);
David Howellsc08ef802009-09-14 17:26:13 +0100209 schedule_work(&key_gc_work);
210 kleave(" [continue]");
David Howells5d135442009-09-02 09:14:00 +0100211 return;
212
David Howellsc08ef802009-09-14 17:26:13 +0100213 /* when we reach the end of the run, we set the timer for the next one */
David Howells5d135442009-09-02 09:14:00 +0100214reached_the_end:
David Howellsc08ef802009-09-14 17:26:13 +0100215 kdebug("reached_the_end");
216 spin_unlock(&key_serial_lock);
217 key_gc_new_timer = new_timer;
David Howells5d135442009-09-02 09:14:00 +0100218 key_gc_cursor = 0;
David Howellsc08ef802009-09-14 17:26:13 +0100219 clear_bit(0, &key_gc_executing);
220
221 if (key_gc_again) {
222 /* there may have been a key that expired whilst we were
223 * scanning, so if we discarded any links we should do another
224 * scan */
225 new_timer = now + 1;
226 key_schedule_gc(new_timer);
227 } else if (new_timer < LONG_MAX) {
228 new_timer += key_gc_delay;
229 key_schedule_gc(new_timer);
230 }
231 kleave(" [end]");
David Howells5d135442009-09-02 09:14:00 +0100232}
David Howells8bc16de2011-08-22 14:09:11 +0100233
234/*
235 * Garbage collector for unused keys.
236 *
237 * This is done in process context so that we don't have to disable interrupts
238 * all over the place. key_put() schedules this rather than trying to do the
239 * cleanup itself, which means key_put() doesn't have to sleep.
240 */
241static void key_gc_unused_keys(struct work_struct *work)
242{
243 struct rb_node *_n;
244 struct key *key;
245
246go_again:
247 /* look for a dead key in the tree */
248 spin_lock(&key_serial_lock);
249
250 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
251 key = rb_entry(_n, struct key, serial_node);
252
253 if (atomic_read(&key->usage) == 0)
254 goto found_dead_key;
255 }
256
257 spin_unlock(&key_serial_lock);
258 return;
259
260found_dead_key:
261 /* we found a dead key - once we've removed it from the tree, we can
262 * drop the lock */
263 rb_erase(&key->serial_node, &key_serial_tree);
264 spin_unlock(&key_serial_lock);
265
266 key_check(key);
267
268 security_key_free(key);
269
270 /* deal with the user's key tracking and quota */
271 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
272 spin_lock(&key->user->lock);
273 key->user->qnkeys--;
274 key->user->qnbytes -= key->quotalen;
275 spin_unlock(&key->user->lock);
276 }
277
278 atomic_dec(&key->user->nkeys);
279 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
280 atomic_dec(&key->user->nikeys);
281
282 key_user_put(key->user);
283
284 /* now throw away the key memory */
285 if (key->type->destroy)
286 key->type->destroy(key);
287
288 kfree(key->description);
289
290#ifdef KEY_DEBUGGING
291 key->magic = KEY_DEBUG_MAGIC_X;
292#endif
293 kmem_cache_free(key_jar, key);
294
295 /* there may, of course, be more than one key to destroy */
296 goto go_again;
297}