blob: c502c116924ca1bd603184089bff67be8f5100a5 [file] [log] [blame]
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07001/*
2 * This contains encryption functions for per-file encryption.
3 *
4 * Copyright (C) 2015, Google, Inc.
5 * Copyright (C) 2015, Motorola Mobility
6 *
7 * Written by Michael Halcrow, 2014.
8 *
9 * Filename encryption additions
10 * Uday Savagaonkar, 2014
11 * Encryption policy handling additions
12 * Ildar Muslukhov, 2014
13 * Add fscrypt_pullback_bio_page()
14 * Jaegeuk Kim, 2015.
15 *
16 * This has not yet undergone a rigorous security audit.
17 *
18 * The usage of AES-XTS should conform to recommendations in NIST
19 * Special Publication 800-38E and IEEE P1619/D16.
20 */
21
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070022#include <linux/pagemap.h>
23#include <linux/mempool.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/ratelimit.h>
27#include <linux/bio.h>
28#include <linux/dcache.h>
Jaegeuk Kim03a8bb02016-04-12 16:05:36 -070029#include <linux/namei.h>
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070030#include <linux/fscrypto.h>
Linus Torvaldsd4075742016-03-21 11:03:02 -070031#include <linux/ecryptfs.h>
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070032
33static unsigned int num_prealloc_crypto_pages = 32;
34static unsigned int num_prealloc_crypto_ctxs = 128;
35
36module_param(num_prealloc_crypto_pages, uint, 0444);
37MODULE_PARM_DESC(num_prealloc_crypto_pages,
38 "Number of crypto pages to preallocate");
39module_param(num_prealloc_crypto_ctxs, uint, 0444);
40MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
41 "Number of crypto contexts to preallocate");
42
43static mempool_t *fscrypt_bounce_page_pool = NULL;
44
45static LIST_HEAD(fscrypt_free_ctxs);
46static DEFINE_SPINLOCK(fscrypt_ctx_lock);
47
48static struct workqueue_struct *fscrypt_read_workqueue;
49static DEFINE_MUTEX(fscrypt_init_mutex);
50
51static struct kmem_cache *fscrypt_ctx_cachep;
52struct kmem_cache *fscrypt_info_cachep;
53
54/**
55 * fscrypt_release_ctx() - Releases an encryption context
56 * @ctx: The encryption context to release.
57 *
58 * If the encryption context was allocated from the pre-allocated pool, returns
59 * it to that pool. Else, frees it.
60 *
61 * If there's a bounce page in the context, this frees that.
62 */
63void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
64{
65 unsigned long flags;
66
67 if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) {
68 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
69 ctx->w.bounce_page = NULL;
70 }
71 ctx->w.control_page = NULL;
72 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
73 kmem_cache_free(fscrypt_ctx_cachep, ctx);
74 } else {
75 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
76 list_add(&ctx->free_list, &fscrypt_free_ctxs);
77 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
78 }
79}
80EXPORT_SYMBOL(fscrypt_release_ctx);
81
82/**
83 * fscrypt_get_ctx() - Gets an encryption context
84 * @inode: The inode for which we are doing the crypto
Jaegeuk Kimb32e4482016-04-11 15:51:57 -070085 * @gfp_flags: The gfp flag for memory allocation
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070086 *
87 * Allocates and initializes an encryption context.
88 *
89 * Return: An allocated and initialized encryption context on success; error
90 * value or NULL otherwise.
91 */
Jaegeuk Kimb32e4482016-04-11 15:51:57 -070092struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070093{
94 struct fscrypt_ctx *ctx = NULL;
95 struct fscrypt_info *ci = inode->i_crypt_info;
96 unsigned long flags;
97
98 if (ci == NULL)
99 return ERR_PTR(-ENOKEY);
100
101 /*
102 * We first try getting the ctx from a free list because in
103 * the common case the ctx will have an allocated and
104 * initialized crypto tfm, so it's probably a worthwhile
105 * optimization. For the bounce page, we first try getting it
106 * from the kernel allocator because that's just about as fast
107 * as getting it from a list and because a cache of free pages
108 * should generally be a "last resort" option for a filesystem
109 * to be able to do its job.
110 */
111 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
112 ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
113 struct fscrypt_ctx, free_list);
114 if (ctx)
115 list_del(&ctx->free_list);
116 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
117 if (!ctx) {
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700118 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700119 if (!ctx)
120 return ERR_PTR(-ENOMEM);
121 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
122 } else {
123 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
124 }
125 ctx->flags &= ~FS_WRITE_PATH_FL;
126 return ctx;
127}
128EXPORT_SYMBOL(fscrypt_get_ctx);
129
130/**
131 * fscrypt_complete() - The completion callback for page encryption
132 * @req: The asynchronous encryption request context
133 * @res: The result of the encryption operation
134 */
135static void fscrypt_complete(struct crypto_async_request *req, int res)
136{
137 struct fscrypt_completion_result *ecr = req->data;
138
139 if (res == -EINPROGRESS)
140 return;
141 ecr->res = res;
142 complete(&ecr->completion);
143}
144
145typedef enum {
146 FS_DECRYPT = 0,
147 FS_ENCRYPT,
148} fscrypt_direction_t;
149
150static int do_page_crypto(struct inode *inode,
151 fscrypt_direction_t rw, pgoff_t index,
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700152 struct page *src_page, struct page *dest_page,
153 gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700154{
155 u8 xts_tweak[FS_XTS_TWEAK_SIZE];
Linus Torvaldsd4075742016-03-21 11:03:02 -0700156 struct skcipher_request *req = NULL;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700157 DECLARE_FS_COMPLETION_RESULT(ecr);
158 struct scatterlist dst, src;
159 struct fscrypt_info *ci = inode->i_crypt_info;
Linus Torvaldsd4075742016-03-21 11:03:02 -0700160 struct crypto_skcipher *tfm = ci->ci_ctfm;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700161 int res = 0;
162
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700163 req = skcipher_request_alloc(tfm, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700164 if (!req) {
165 printk_ratelimited(KERN_ERR
166 "%s: crypto_request_alloc() failed\n",
167 __func__);
168 return -ENOMEM;
169 }
170
Linus Torvaldsd4075742016-03-21 11:03:02 -0700171 skcipher_request_set_callback(
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700172 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
173 fscrypt_complete, &ecr);
174
175 BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index));
Linus Torvalds02fc59a2016-03-26 10:13:05 -0700176 memcpy(xts_tweak, &index, sizeof(index));
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700177 memset(&xts_tweak[sizeof(index)], 0,
178 FS_XTS_TWEAK_SIZE - sizeof(index));
179
180 sg_init_table(&dst, 1);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300181 sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700182 sg_init_table(&src, 1);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300183 sg_set_page(&src, src_page, PAGE_SIZE, 0);
184 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700185 xts_tweak);
186 if (rw == FS_DECRYPT)
Linus Torvaldsd4075742016-03-21 11:03:02 -0700187 res = crypto_skcipher_decrypt(req);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700188 else
Linus Torvaldsd4075742016-03-21 11:03:02 -0700189 res = crypto_skcipher_encrypt(req);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700190 if (res == -EINPROGRESS || res == -EBUSY) {
191 BUG_ON(req->base.data != &ecr);
192 wait_for_completion(&ecr.completion);
193 res = ecr.res;
194 }
Linus Torvaldsd4075742016-03-21 11:03:02 -0700195 skcipher_request_free(req);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700196 if (res) {
197 printk_ratelimited(KERN_ERR
Linus Torvaldsd4075742016-03-21 11:03:02 -0700198 "%s: crypto_skcipher_encrypt() returned %d\n",
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700199 __func__, res);
200 return res;
201 }
202 return 0;
203}
204
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700205static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700206{
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700207 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700208 if (ctx->w.bounce_page == NULL)
209 return ERR_PTR(-ENOMEM);
210 ctx->flags |= FS_WRITE_PATH_FL;
211 return ctx->w.bounce_page;
212}
213
214/**
215 * fscypt_encrypt_page() - Encrypts a page
216 * @inode: The inode for which the encryption should take place
217 * @plaintext_page: The page to encrypt. Must be locked.
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700218 * @gfp_flags: The gfp flag for memory allocation
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700219 *
220 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
221 * encryption context.
222 *
223 * Called on the page write path. The caller must call
224 * fscrypt_restore_control_page() on the returned ciphertext page to
225 * release the bounce buffer and the encryption context.
226 *
227 * Return: An allocated page with the encrypted content on success. Else, an
228 * error value or NULL.
229 */
230struct page *fscrypt_encrypt_page(struct inode *inode,
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700231 struct page *plaintext_page, gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700232{
233 struct fscrypt_ctx *ctx;
234 struct page *ciphertext_page = NULL;
235 int err;
236
237 BUG_ON(!PageLocked(plaintext_page));
238
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700239 ctx = fscrypt_get_ctx(inode, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700240 if (IS_ERR(ctx))
241 return (struct page *)ctx;
242
243 /* The encryption operation will require a bounce page. */
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700244 ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700245 if (IS_ERR(ciphertext_page))
246 goto errout;
247
248 ctx->w.control_page = plaintext_page;
249 err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700250 plaintext_page, ciphertext_page,
251 gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700252 if (err) {
253 ciphertext_page = ERR_PTR(err);
254 goto errout;
255 }
256 SetPagePrivate(ciphertext_page);
257 set_page_private(ciphertext_page, (unsigned long)ctx);
258 lock_page(ciphertext_page);
259 return ciphertext_page;
260
261errout:
262 fscrypt_release_ctx(ctx);
263 return ciphertext_page;
264}
265EXPORT_SYMBOL(fscrypt_encrypt_page);
266
267/**
268 * f2crypt_decrypt_page() - Decrypts a page in-place
269 * @page: The page to decrypt. Must be locked.
270 *
271 * Decrypts page in-place using the ctx encryption context.
272 *
273 * Called from the read completion callback.
274 *
275 * Return: Zero on success, non-zero otherwise.
276 */
277int fscrypt_decrypt_page(struct page *page)
278{
279 BUG_ON(!PageLocked(page));
280
281 return do_page_crypto(page->mapping->host,
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700282 FS_DECRYPT, page->index, page, page, GFP_NOFS);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700283}
284EXPORT_SYMBOL(fscrypt_decrypt_page);
285
286int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
287 sector_t pblk, unsigned int len)
288{
289 struct fscrypt_ctx *ctx;
290 struct page *ciphertext_page = NULL;
291 struct bio *bio;
292 int ret, err = 0;
293
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300294 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700295
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700296 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700297 if (IS_ERR(ctx))
298 return PTR_ERR(ctx);
299
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700300 ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700301 if (IS_ERR(ciphertext_page)) {
302 err = PTR_ERR(ciphertext_page);
303 goto errout;
304 }
305
306 while (len--) {
307 err = do_page_crypto(inode, FS_ENCRYPT, lblk,
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700308 ZERO_PAGE(0), ciphertext_page,
309 GFP_NOFS);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700310 if (err)
311 goto errout;
312
Jaegeuk Kimb32e4482016-04-11 15:51:57 -0700313 bio = bio_alloc(GFP_NOWAIT, 1);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700314 if (!bio) {
315 err = -ENOMEM;
316 goto errout;
317 }
318 bio->bi_bdev = inode->i_sb->s_bdev;
319 bio->bi_iter.bi_sector =
320 pblk << (inode->i_sb->s_blocksize_bits - 9);
Mike Christie95fe6c12016-06-05 14:31:48 -0500321 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700322 ret = bio_add_page(bio, ciphertext_page,
323 inode->i_sb->s_blocksize, 0);
324 if (ret != inode->i_sb->s_blocksize) {
325 /* should never happen! */
326 WARN_ON(1);
327 bio_put(bio);
328 err = -EIO;
329 goto errout;
330 }
Mike Christie4e49ea42016-06-05 14:31:41 -0500331 err = submit_bio_wait(bio);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700332 if ((err == 0) && bio->bi_error)
333 err = -EIO;
334 bio_put(bio);
335 if (err)
336 goto errout;
337 lblk++;
338 pblk++;
339 }
340 err = 0;
341errout:
342 fscrypt_release_ctx(ctx);
343 return err;
344}
345EXPORT_SYMBOL(fscrypt_zeroout_range);
346
347/*
348 * Validate dentries for encrypted directories to make sure we aren't
349 * potentially caching stale data after a key has been added or
350 * removed.
351 */
352static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
353{
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700354 struct dentry *dir;
355 struct fscrypt_info *ci;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700356 int dir_has_key, cached_with_key;
357
Jaegeuk Kim03a8bb02016-04-12 16:05:36 -0700358 if (flags & LOOKUP_RCU)
359 return -ECHILD;
360
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700361 dir = dget_parent(dentry);
362 if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
363 dput(dir);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700364 return 0;
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700365 }
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700366
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700367 ci = d_inode(dir)->i_crypt_info;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700368 if (ci && ci->ci_keyring_key &&
369 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
370 (1 << KEY_FLAG_REVOKED) |
371 (1 << KEY_FLAG_DEAD))))
372 ci = NULL;
373
374 /* this should eventually be an flag in d_flags */
375 spin_lock(&dentry->d_lock);
376 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
377 spin_unlock(&dentry->d_lock);
378 dir_has_key = (ci != NULL);
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700379 dput(dir);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700380
381 /*
382 * If the dentry was cached without the key, and it is a
383 * negative dentry, it might be a valid name. We can't check
384 * if the key has since been made available due to locking
385 * reasons, so we fail the validation so ext4_lookup() can do
386 * this check.
387 *
388 * We also fail the validation if the dentry was created with
389 * the key present, but we no longer have the key, or vice versa.
390 */
391 if ((!cached_with_key && d_is_negative(dentry)) ||
392 (!cached_with_key && dir_has_key) ||
393 (cached_with_key && !dir_has_key))
394 return 0;
395 return 1;
396}
397
398const struct dentry_operations fscrypt_d_ops = {
399 .d_revalidate = fscrypt_d_revalidate,
400};
401EXPORT_SYMBOL(fscrypt_d_ops);
402
403/*
404 * Call fscrypt_decrypt_page on every single page, reusing the encryption
405 * context.
406 */
407static void completion_pages(struct work_struct *work)
408{
409 struct fscrypt_ctx *ctx =
410 container_of(work, struct fscrypt_ctx, r.work);
411 struct bio *bio = ctx->r.bio;
412 struct bio_vec *bv;
413 int i;
414
415 bio_for_each_segment_all(bv, bio, i) {
416 struct page *page = bv->bv_page;
417 int ret = fscrypt_decrypt_page(page);
418
419 if (ret) {
420 WARN_ON_ONCE(1);
421 SetPageError(page);
422 } else {
423 SetPageUptodate(page);
424 }
425 unlock_page(page);
426 }
427 fscrypt_release_ctx(ctx);
428 bio_put(bio);
429}
430
431void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
432{
433 INIT_WORK(&ctx->r.work, completion_pages);
434 ctx->r.bio = bio;
435 queue_work(fscrypt_read_workqueue, &ctx->r.work);
436}
437EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
438
439void fscrypt_pullback_bio_page(struct page **page, bool restore)
440{
441 struct fscrypt_ctx *ctx;
442 struct page *bounce_page;
443
444 /* The bounce data pages are unmapped. */
445 if ((*page)->mapping)
446 return;
447
448 /* The bounce data page is unmapped. */
449 bounce_page = *page;
450 ctx = (struct fscrypt_ctx *)page_private(bounce_page);
451
452 /* restore control page */
453 *page = ctx->w.control_page;
454
455 if (restore)
456 fscrypt_restore_control_page(bounce_page);
457}
458EXPORT_SYMBOL(fscrypt_pullback_bio_page);
459
460void fscrypt_restore_control_page(struct page *page)
461{
462 struct fscrypt_ctx *ctx;
463
464 ctx = (struct fscrypt_ctx *)page_private(page);
465 set_page_private(page, (unsigned long)NULL);
466 ClearPagePrivate(page);
467 unlock_page(page);
468 fscrypt_release_ctx(ctx);
469}
470EXPORT_SYMBOL(fscrypt_restore_control_page);
471
472static void fscrypt_destroy(void)
473{
474 struct fscrypt_ctx *pos, *n;
475
476 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
477 kmem_cache_free(fscrypt_ctx_cachep, pos);
478 INIT_LIST_HEAD(&fscrypt_free_ctxs);
479 mempool_destroy(fscrypt_bounce_page_pool);
480 fscrypt_bounce_page_pool = NULL;
481}
482
483/**
484 * fscrypt_initialize() - allocate major buffers for fs encryption.
485 *
486 * We only call this when we start accessing encrypted files, since it
487 * results in memory getting allocated that wouldn't otherwise be used.
488 *
489 * Return: Zero on success, non-zero otherwise.
490 */
491int fscrypt_initialize(void)
492{
493 int i, res = -ENOMEM;
494
495 if (fscrypt_bounce_page_pool)
496 return 0;
497
498 mutex_lock(&fscrypt_init_mutex);
499 if (fscrypt_bounce_page_pool)
500 goto already_initialized;
501
502 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
503 struct fscrypt_ctx *ctx;
504
505 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
506 if (!ctx)
507 goto fail;
508 list_add(&ctx->free_list, &fscrypt_free_ctxs);
509 }
510
511 fscrypt_bounce_page_pool =
512 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
513 if (!fscrypt_bounce_page_pool)
514 goto fail;
515
516already_initialized:
517 mutex_unlock(&fscrypt_init_mutex);
518 return 0;
519fail:
520 fscrypt_destroy();
521 mutex_unlock(&fscrypt_init_mutex);
522 return res;
523}
524EXPORT_SYMBOL(fscrypt_initialize);
525
526/**
527 * fscrypt_init() - Set up for fs encryption.
528 */
529static int __init fscrypt_init(void)
530{
531 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
532 WQ_HIGHPRI, 0);
533 if (!fscrypt_read_workqueue)
534 goto fail;
535
536 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
537 if (!fscrypt_ctx_cachep)
538 goto fail_free_queue;
539
540 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
541 if (!fscrypt_info_cachep)
542 goto fail_free_ctx;
543
544 return 0;
545
546fail_free_ctx:
547 kmem_cache_destroy(fscrypt_ctx_cachep);
548fail_free_queue:
549 destroy_workqueue(fscrypt_read_workqueue);
550fail:
551 return -ENOMEM;
552}
553module_init(fscrypt_init)
554
555/**
556 * fscrypt_exit() - Shutdown the fs encryption system
557 */
558static void __exit fscrypt_exit(void)
559{
560 fscrypt_destroy();
561
562 if (fscrypt_read_workqueue)
563 destroy_workqueue(fscrypt_read_workqueue);
564 kmem_cache_destroy(fscrypt_ctx_cachep);
565 kmem_cache_destroy(fscrypt_info_cachep);
566}
567module_exit(fscrypt_exit);
568
569MODULE_LICENSE("GPL");