blob: 61057b7dbddbe17532940f02677668bb9d67152b [file] [log] [blame]
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07001/*
2 * This contains encryption functions for per-file encryption.
3 *
4 * Copyright (C) 2015, Google, Inc.
5 * Copyright (C) 2015, Motorola Mobility
6 *
7 * Written by Michael Halcrow, 2014.
8 *
9 * Filename encryption additions
10 * Uday Savagaonkar, 2014
11 * Encryption policy handling additions
12 * Ildar Muslukhov, 2014
13 * Add fscrypt_pullback_bio_page()
14 * Jaegeuk Kim, 2015.
15 *
16 * This has not yet undergone a rigorous security audit.
17 *
18 * The usage of AES-XTS should conform to recommendations in NIST
19 * Special Publication 800-38E and IEEE P1619/D16.
20 */
21
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070022#include <linux/pagemap.h>
23#include <linux/mempool.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/ratelimit.h>
27#include <linux/bio.h>
28#include <linux/dcache.h>
Jaegeuk Kim03a8bb02016-04-12 16:05:36 -070029#include <linux/namei.h>
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070030#include <linux/fscrypto.h>
31
32static unsigned int num_prealloc_crypto_pages = 32;
33static unsigned int num_prealloc_crypto_ctxs = 128;
34
35module_param(num_prealloc_crypto_pages, uint, 0444);
36MODULE_PARM_DESC(num_prealloc_crypto_pages,
37 "Number of crypto pages to preallocate");
38module_param(num_prealloc_crypto_ctxs, uint, 0444);
39MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
40 "Number of crypto contexts to preallocate");
41
42static mempool_t *fscrypt_bounce_page_pool = NULL;
43
44static LIST_HEAD(fscrypt_free_ctxs);
45static DEFINE_SPINLOCK(fscrypt_ctx_lock);
46
47static struct workqueue_struct *fscrypt_read_workqueue;
48static DEFINE_MUTEX(fscrypt_init_mutex);
49
50static struct kmem_cache *fscrypt_ctx_cachep;
51struct kmem_cache *fscrypt_info_cachep;
52
53/**
54 * fscrypt_release_ctx() - Releases an encryption context
55 * @ctx: The encryption context to release.
56 *
57 * If the encryption context was allocated from the pre-allocated pool, returns
58 * it to that pool. Else, frees it.
59 *
60 * If there's a bounce page in the context, this frees that.
61 */
62void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
63{
64 unsigned long flags;
65
66 if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) {
67 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
68 ctx->w.bounce_page = NULL;
69 }
70 ctx->w.control_page = NULL;
71 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
72 kmem_cache_free(fscrypt_ctx_cachep, ctx);
73 } else {
74 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
75 list_add(&ctx->free_list, &fscrypt_free_ctxs);
76 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
77 }
78}
79EXPORT_SYMBOL(fscrypt_release_ctx);
80
81/**
82 * fscrypt_get_ctx() - Gets an encryption context
83 * @inode: The inode for which we are doing the crypto
Jaegeuk Kimb32e4482016-04-11 15:51:57 -070084 * @gfp_flags: The gfp flag for memory allocation
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070085 *
86 * Allocates and initializes an encryption context.
87 *
88 * Return: An allocated and initialized encryption context on success; error
89 * value or NULL otherwise.
90 */
Jaegeuk Kimb32e4482016-04-11 15:51:57 -070091struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070092{
93 struct fscrypt_ctx *ctx = NULL;
94 struct fscrypt_info *ci = inode->i_crypt_info;
95 unsigned long flags;
96
97 if (ci == NULL)
98 return ERR_PTR(-ENOKEY);
99
100 /*
101 * We first try getting the ctx from a free list because in
102 * the common case the ctx will have an allocated and
103 * initialized crypto tfm, so it's probably a worthwhile
104 * optimization. For the bounce page, we first try getting it
105 * from the kernel allocator because that's just about as fast
106 * as getting it from a list and because a cache of free pages
107 * should generally be a "last resort" option for a filesystem
108 * to be able to do its job.
109 */
110 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
111 ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
112 struct fscrypt_ctx, free_list);
113 if (ctx)
114 list_del(&ctx->free_list);
115 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
116 if (!ctx) {
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700117 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700118 if (!ctx)
119 return ERR_PTR(-ENOMEM);
120 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
121 } else {
122 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
123 }
124 ctx->flags &= ~FS_WRITE_PATH_FL;
125 return ctx;
126}
127EXPORT_SYMBOL(fscrypt_get_ctx);
128
129/**
Eric Biggers53fd7552016-09-15 16:51:01 -0400130 * page_crypt_complete() - completion callback for page crypto
131 * @req: The asynchronous cipher request context
132 * @res: The result of the cipher operation
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700133 */
Eric Biggers53fd7552016-09-15 16:51:01 -0400134static void page_crypt_complete(struct crypto_async_request *req, int res)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700135{
136 struct fscrypt_completion_result *ecr = req->data;
137
138 if (res == -EINPROGRESS)
139 return;
140 ecr->res = res;
141 complete(&ecr->completion);
142}
143
144typedef enum {
145 FS_DECRYPT = 0,
146 FS_ENCRYPT,
147} fscrypt_direction_t;
148
149static int do_page_crypto(struct inode *inode,
150 fscrypt_direction_t rw, pgoff_t index,
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700151 struct page *src_page, struct page *dest_page,
152 gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700153{
154 u8 xts_tweak[FS_XTS_TWEAK_SIZE];
Linus Torvaldsd4075742016-03-21 11:03:02 -0700155 struct skcipher_request *req = NULL;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700156 DECLARE_FS_COMPLETION_RESULT(ecr);
157 struct scatterlist dst, src;
158 struct fscrypt_info *ci = inode->i_crypt_info;
Linus Torvaldsd4075742016-03-21 11:03:02 -0700159 struct crypto_skcipher *tfm = ci->ci_ctfm;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700160 int res = 0;
161
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700162 req = skcipher_request_alloc(tfm, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700163 if (!req) {
164 printk_ratelimited(KERN_ERR
165 "%s: crypto_request_alloc() failed\n",
166 __func__);
167 return -ENOMEM;
168 }
169
Linus Torvaldsd4075742016-03-21 11:03:02 -0700170 skcipher_request_set_callback(
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700171 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
Eric Biggers53fd7552016-09-15 16:51:01 -0400172 page_crypt_complete, &ecr);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700173
174 BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index));
Linus Torvalds02fc59a2016-03-26 10:13:05 -0700175 memcpy(xts_tweak, &index, sizeof(index));
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700176 memset(&xts_tweak[sizeof(index)], 0,
177 FS_XTS_TWEAK_SIZE - sizeof(index));
178
179 sg_init_table(&dst, 1);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300180 sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700181 sg_init_table(&src, 1);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300182 sg_set_page(&src, src_page, PAGE_SIZE, 0);
183 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700184 xts_tweak);
185 if (rw == FS_DECRYPT)
Linus Torvaldsd4075742016-03-21 11:03:02 -0700186 res = crypto_skcipher_decrypt(req);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700187 else
Linus Torvaldsd4075742016-03-21 11:03:02 -0700188 res = crypto_skcipher_encrypt(req);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700189 if (res == -EINPROGRESS || res == -EBUSY) {
190 BUG_ON(req->base.data != &ecr);
191 wait_for_completion(&ecr.completion);
192 res = ecr.res;
193 }
Linus Torvaldsd4075742016-03-21 11:03:02 -0700194 skcipher_request_free(req);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700195 if (res) {
196 printk_ratelimited(KERN_ERR
Linus Torvaldsd4075742016-03-21 11:03:02 -0700197 "%s: crypto_skcipher_encrypt() returned %d\n",
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700198 __func__, res);
199 return res;
200 }
201 return 0;
202}
203
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700204static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700205{
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700206 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700207 if (ctx->w.bounce_page == NULL)
208 return ERR_PTR(-ENOMEM);
209 ctx->flags |= FS_WRITE_PATH_FL;
210 return ctx->w.bounce_page;
211}
212
213/**
214 * fscypt_encrypt_page() - Encrypts a page
215 * @inode: The inode for which the encryption should take place
216 * @plaintext_page: The page to encrypt. Must be locked.
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700217 * @gfp_flags: The gfp flag for memory allocation
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700218 *
219 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
220 * encryption context.
221 *
222 * Called on the page write path. The caller must call
223 * fscrypt_restore_control_page() on the returned ciphertext page to
224 * release the bounce buffer and the encryption context.
225 *
226 * Return: An allocated page with the encrypted content on success. Else, an
227 * error value or NULL.
228 */
229struct page *fscrypt_encrypt_page(struct inode *inode,
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700230 struct page *plaintext_page, gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700231{
232 struct fscrypt_ctx *ctx;
233 struct page *ciphertext_page = NULL;
234 int err;
235
236 BUG_ON(!PageLocked(plaintext_page));
237
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700238 ctx = fscrypt_get_ctx(inode, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700239 if (IS_ERR(ctx))
240 return (struct page *)ctx;
241
242 /* The encryption operation will require a bounce page. */
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700243 ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700244 if (IS_ERR(ciphertext_page))
245 goto errout;
246
247 ctx->w.control_page = plaintext_page;
248 err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700249 plaintext_page, ciphertext_page,
250 gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700251 if (err) {
252 ciphertext_page = ERR_PTR(err);
253 goto errout;
254 }
255 SetPagePrivate(ciphertext_page);
256 set_page_private(ciphertext_page, (unsigned long)ctx);
257 lock_page(ciphertext_page);
258 return ciphertext_page;
259
260errout:
261 fscrypt_release_ctx(ctx);
262 return ciphertext_page;
263}
264EXPORT_SYMBOL(fscrypt_encrypt_page);
265
266/**
267 * f2crypt_decrypt_page() - Decrypts a page in-place
268 * @page: The page to decrypt. Must be locked.
269 *
270 * Decrypts page in-place using the ctx encryption context.
271 *
272 * Called from the read completion callback.
273 *
274 * Return: Zero on success, non-zero otherwise.
275 */
276int fscrypt_decrypt_page(struct page *page)
277{
278 BUG_ON(!PageLocked(page));
279
280 return do_page_crypto(page->mapping->host,
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700281 FS_DECRYPT, page->index, page, page, GFP_NOFS);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700282}
283EXPORT_SYMBOL(fscrypt_decrypt_page);
284
285int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
286 sector_t pblk, unsigned int len)
287{
288 struct fscrypt_ctx *ctx;
289 struct page *ciphertext_page = NULL;
290 struct bio *bio;
291 int ret, err = 0;
292
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300293 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700294
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700295 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700296 if (IS_ERR(ctx))
297 return PTR_ERR(ctx);
298
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700299 ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700300 if (IS_ERR(ciphertext_page)) {
301 err = PTR_ERR(ciphertext_page);
302 goto errout;
303 }
304
305 while (len--) {
306 err = do_page_crypto(inode, FS_ENCRYPT, lblk,
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700307 ZERO_PAGE(0), ciphertext_page,
308 GFP_NOFS);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700309 if (err)
310 goto errout;
311
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700312 bio = bio_alloc(GFP_NOWAIT, 1);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700313 if (!bio) {
314 err = -ENOMEM;
315 goto errout;
316 }
317 bio->bi_bdev = inode->i_sb->s_bdev;
318 bio->bi_iter.bi_sector =
319 pblk << (inode->i_sb->s_blocksize_bits - 9);
Mike Christie95fe6c12016-06-05 14:31:48 -0500320 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700321 ret = bio_add_page(bio, ciphertext_page,
322 inode->i_sb->s_blocksize, 0);
323 if (ret != inode->i_sb->s_blocksize) {
324 /* should never happen! */
325 WARN_ON(1);
326 bio_put(bio);
327 err = -EIO;
328 goto errout;
329 }
Mike Christie4e49ea42016-06-05 14:31:41 -0500330 err = submit_bio_wait(bio);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700331 if ((err == 0) && bio->bi_error)
332 err = -EIO;
333 bio_put(bio);
334 if (err)
335 goto errout;
336 lblk++;
337 pblk++;
338 }
339 err = 0;
340errout:
341 fscrypt_release_ctx(ctx);
342 return err;
343}
344EXPORT_SYMBOL(fscrypt_zeroout_range);
345
346/*
347 * Validate dentries for encrypted directories to make sure we aren't
348 * potentially caching stale data after a key has been added or
349 * removed.
350 */
351static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
352{
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700353 struct dentry *dir;
354 struct fscrypt_info *ci;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700355 int dir_has_key, cached_with_key;
356
Jaegeuk Kim03a8bb02016-04-12 16:05:36 -0700357 if (flags & LOOKUP_RCU)
358 return -ECHILD;
359
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700360 dir = dget_parent(dentry);
361 if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
362 dput(dir);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700363 return 0;
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700364 }
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700365
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700366 ci = d_inode(dir)->i_crypt_info;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700367 if (ci && ci->ci_keyring_key &&
368 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
369 (1 << KEY_FLAG_REVOKED) |
370 (1 << KEY_FLAG_DEAD))))
371 ci = NULL;
372
373 /* this should eventually be an flag in d_flags */
374 spin_lock(&dentry->d_lock);
375 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
376 spin_unlock(&dentry->d_lock);
377 dir_has_key = (ci != NULL);
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700378 dput(dir);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700379
380 /*
381 * If the dentry was cached without the key, and it is a
382 * negative dentry, it might be a valid name. We can't check
383 * if the key has since been made available due to locking
384 * reasons, so we fail the validation so ext4_lookup() can do
385 * this check.
386 *
387 * We also fail the validation if the dentry was created with
388 * the key present, but we no longer have the key, or vice versa.
389 */
390 if ((!cached_with_key && d_is_negative(dentry)) ||
391 (!cached_with_key && dir_has_key) ||
392 (cached_with_key && !dir_has_key))
393 return 0;
394 return 1;
395}
396
397const struct dentry_operations fscrypt_d_ops = {
398 .d_revalidate = fscrypt_d_revalidate,
399};
400EXPORT_SYMBOL(fscrypt_d_ops);
401
402/*
403 * Call fscrypt_decrypt_page on every single page, reusing the encryption
404 * context.
405 */
406static void completion_pages(struct work_struct *work)
407{
408 struct fscrypt_ctx *ctx =
409 container_of(work, struct fscrypt_ctx, r.work);
410 struct bio *bio = ctx->r.bio;
411 struct bio_vec *bv;
412 int i;
413
414 bio_for_each_segment_all(bv, bio, i) {
415 struct page *page = bv->bv_page;
416 int ret = fscrypt_decrypt_page(page);
417
418 if (ret) {
419 WARN_ON_ONCE(1);
420 SetPageError(page);
421 } else {
422 SetPageUptodate(page);
423 }
424 unlock_page(page);
425 }
426 fscrypt_release_ctx(ctx);
427 bio_put(bio);
428}
429
430void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
431{
432 INIT_WORK(&ctx->r.work, completion_pages);
433 ctx->r.bio = bio;
434 queue_work(fscrypt_read_workqueue, &ctx->r.work);
435}
436EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
437
438void fscrypt_pullback_bio_page(struct page **page, bool restore)
439{
440 struct fscrypt_ctx *ctx;
441 struct page *bounce_page;
442
443 /* The bounce data pages are unmapped. */
444 if ((*page)->mapping)
445 return;
446
447 /* The bounce data page is unmapped. */
448 bounce_page = *page;
449 ctx = (struct fscrypt_ctx *)page_private(bounce_page);
450
451 /* restore control page */
452 *page = ctx->w.control_page;
453
454 if (restore)
455 fscrypt_restore_control_page(bounce_page);
456}
457EXPORT_SYMBOL(fscrypt_pullback_bio_page);
458
459void fscrypt_restore_control_page(struct page *page)
460{
461 struct fscrypt_ctx *ctx;
462
463 ctx = (struct fscrypt_ctx *)page_private(page);
464 set_page_private(page, (unsigned long)NULL);
465 ClearPagePrivate(page);
466 unlock_page(page);
467 fscrypt_release_ctx(ctx);
468}
469EXPORT_SYMBOL(fscrypt_restore_control_page);
470
471static void fscrypt_destroy(void)
472{
473 struct fscrypt_ctx *pos, *n;
474
475 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
476 kmem_cache_free(fscrypt_ctx_cachep, pos);
477 INIT_LIST_HEAD(&fscrypt_free_ctxs);
478 mempool_destroy(fscrypt_bounce_page_pool);
479 fscrypt_bounce_page_pool = NULL;
480}
481
482/**
483 * fscrypt_initialize() - allocate major buffers for fs encryption.
484 *
485 * We only call this when we start accessing encrypted files, since it
486 * results in memory getting allocated that wouldn't otherwise be used.
487 *
488 * Return: Zero on success, non-zero otherwise.
489 */
490int fscrypt_initialize(void)
491{
492 int i, res = -ENOMEM;
493
494 if (fscrypt_bounce_page_pool)
495 return 0;
496
497 mutex_lock(&fscrypt_init_mutex);
498 if (fscrypt_bounce_page_pool)
499 goto already_initialized;
500
501 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
502 struct fscrypt_ctx *ctx;
503
504 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
505 if (!ctx)
506 goto fail;
507 list_add(&ctx->free_list, &fscrypt_free_ctxs);
508 }
509
510 fscrypt_bounce_page_pool =
511 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
512 if (!fscrypt_bounce_page_pool)
513 goto fail;
514
515already_initialized:
516 mutex_unlock(&fscrypt_init_mutex);
517 return 0;
518fail:
519 fscrypt_destroy();
520 mutex_unlock(&fscrypt_init_mutex);
521 return res;
522}
523EXPORT_SYMBOL(fscrypt_initialize);
524
525/**
526 * fscrypt_init() - Set up for fs encryption.
527 */
528static int __init fscrypt_init(void)
529{
530 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
531 WQ_HIGHPRI, 0);
532 if (!fscrypt_read_workqueue)
533 goto fail;
534
535 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
536 if (!fscrypt_ctx_cachep)
537 goto fail_free_queue;
538
539 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
540 if (!fscrypt_info_cachep)
541 goto fail_free_ctx;
542
543 return 0;
544
545fail_free_ctx:
546 kmem_cache_destroy(fscrypt_ctx_cachep);
547fail_free_queue:
548 destroy_workqueue(fscrypt_read_workqueue);
549fail:
550 return -ENOMEM;
551}
552module_init(fscrypt_init)
553
554/**
555 * fscrypt_exit() - Shutdown the fs encryption system
556 */
557static void __exit fscrypt_exit(void)
558{
559 fscrypt_destroy();
560
561 if (fscrypt_read_workqueue)
562 destroy_workqueue(fscrypt_read_workqueue);
563 kmem_cache_destroy(fscrypt_ctx_cachep);
564 kmem_cache_destroy(fscrypt_info_cachep);
565}
566module_exit(fscrypt_exit);
567
568MODULE_LICENSE("GPL");