blob: 8c104e712bb226ca09cb6fd512d31204110f8bcf [file] [log] [blame]
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07001/*
2 * This contains encryption functions for per-file encryption.
3 *
4 * Copyright (C) 2015, Google, Inc.
5 * Copyright (C) 2015, Motorola Mobility
6 *
7 * Written by Michael Halcrow, 2014.
8 *
9 * Filename encryption additions
10 * Uday Savagaonkar, 2014
11 * Encryption policy handling additions
12 * Ildar Muslukhov, 2014
13 * Add fscrypt_pullback_bio_page()
14 * Jaegeuk Kim, 2015.
15 *
16 * This has not yet undergone a rigorous security audit.
17 *
18 * The usage of AES-XTS should conform to recommendations in NIST
19 * Special Publication 800-38E and IEEE P1619/D16.
20 */
21
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070022#include <linux/pagemap.h>
23#include <linux/mempool.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/ratelimit.h>
27#include <linux/bio.h>
28#include <linux/dcache.h>
Jaegeuk Kim03a8bb02016-04-12 16:05:36 -070029#include <linux/namei.h>
Theodore Ts'occ4e0df2016-11-26 22:05:18 -050030#include "fscrypt_private.h"
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070031
32static unsigned int num_prealloc_crypto_pages = 32;
33static unsigned int num_prealloc_crypto_ctxs = 128;
34
35module_param(num_prealloc_crypto_pages, uint, 0444);
36MODULE_PARM_DESC(num_prealloc_crypto_pages,
37 "Number of crypto pages to preallocate");
38module_param(num_prealloc_crypto_ctxs, uint, 0444);
39MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
40 "Number of crypto contexts to preallocate");
41
42static mempool_t *fscrypt_bounce_page_pool = NULL;
43
44static LIST_HEAD(fscrypt_free_ctxs);
45static DEFINE_SPINLOCK(fscrypt_ctx_lock);
46
47static struct workqueue_struct *fscrypt_read_workqueue;
48static DEFINE_MUTEX(fscrypt_init_mutex);
49
50static struct kmem_cache *fscrypt_ctx_cachep;
51struct kmem_cache *fscrypt_info_cachep;
52
53/**
54 * fscrypt_release_ctx() - Releases an encryption context
55 * @ctx: The encryption context to release.
56 *
57 * If the encryption context was allocated from the pre-allocated pool, returns
58 * it to that pool. Else, frees it.
59 *
60 * If there's a bounce page in the context, this frees that.
61 */
62void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
63{
64 unsigned long flags;
65
66 if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) {
67 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
68 ctx->w.bounce_page = NULL;
69 }
70 ctx->w.control_page = NULL;
71 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
72 kmem_cache_free(fscrypt_ctx_cachep, ctx);
73 } else {
74 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
75 list_add(&ctx->free_list, &fscrypt_free_ctxs);
76 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
77 }
78}
79EXPORT_SYMBOL(fscrypt_release_ctx);
80
81/**
82 * fscrypt_get_ctx() - Gets an encryption context
83 * @inode: The inode for which we are doing the crypto
Jaegeuk Kimb32e44822016-04-11 15:51:57 -070084 * @gfp_flags: The gfp flag for memory allocation
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070085 *
86 * Allocates and initializes an encryption context.
87 *
88 * Return: An allocated and initialized encryption context on success; error
89 * value or NULL otherwise.
90 */
David Gstir0b93e1b2016-11-13 22:20:47 +010091struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070092{
93 struct fscrypt_ctx *ctx = NULL;
94 struct fscrypt_info *ci = inode->i_crypt_info;
95 unsigned long flags;
96
97 if (ci == NULL)
98 return ERR_PTR(-ENOKEY);
99
100 /*
101 * We first try getting the ctx from a free list because in
102 * the common case the ctx will have an allocated and
103 * initialized crypto tfm, so it's probably a worthwhile
104 * optimization. For the bounce page, we first try getting it
105 * from the kernel allocator because that's just about as fast
106 * as getting it from a list and because a cache of free pages
107 * should generally be a "last resort" option for a filesystem
108 * to be able to do its job.
109 */
110 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
111 ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
112 struct fscrypt_ctx, free_list);
113 if (ctx)
114 list_del(&ctx->free_list);
115 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
116 if (!ctx) {
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700117 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700118 if (!ctx)
119 return ERR_PTR(-ENOMEM);
120 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
121 } else {
122 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
123 }
124 ctx->flags &= ~FS_WRITE_PATH_FL;
125 return ctx;
126}
127EXPORT_SYMBOL(fscrypt_get_ctx);
128
129/**
Eric Biggers53fd7552016-09-15 16:51:01 -0400130 * page_crypt_complete() - completion callback for page crypto
131 * @req: The asynchronous cipher request context
132 * @res: The result of the cipher operation
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700133 */
Eric Biggers53fd7552016-09-15 16:51:01 -0400134static void page_crypt_complete(struct crypto_async_request *req, int res)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700135{
136 struct fscrypt_completion_result *ecr = req->data;
137
138 if (res == -EINPROGRESS)
139 return;
140 ecr->res = res;
141 complete(&ecr->completion);
142}
143
144typedef enum {
145 FS_DECRYPT = 0,
146 FS_ENCRYPT,
147} fscrypt_direction_t;
148
David Gstir0b93e1b2016-11-13 22:20:47 +0100149static int do_page_crypto(const struct inode *inode,
David Gstir14004512016-12-06 23:53:55 +0100150 fscrypt_direction_t rw, u64 lblk_num,
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700151 struct page *src_page, struct page *dest_page,
David Gstir14004512016-12-06 23:53:55 +0100152 unsigned int len, unsigned int offs,
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700153 gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700154{
Eric Biggersfb445432016-10-12 23:30:16 -0400155 struct {
156 __le64 index;
157 u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
158 } xts_tweak;
Linus Torvaldsd4075742016-03-21 11:03:02 -0700159 struct skcipher_request *req = NULL;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700160 DECLARE_FS_COMPLETION_RESULT(ecr);
161 struct scatterlist dst, src;
162 struct fscrypt_info *ci = inode->i_crypt_info;
Linus Torvaldsd4075742016-03-21 11:03:02 -0700163 struct crypto_skcipher *tfm = ci->ci_ctfm;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700164 int res = 0;
165
David Gstir14004512016-12-06 23:53:55 +0100166 BUG_ON(len == 0);
167
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700168 req = skcipher_request_alloc(tfm, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700169 if (!req) {
170 printk_ratelimited(KERN_ERR
171 "%s: crypto_request_alloc() failed\n",
172 __func__);
173 return -ENOMEM;
174 }
175
Linus Torvaldsd4075742016-03-21 11:03:02 -0700176 skcipher_request_set_callback(
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700177 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
Eric Biggers53fd7552016-09-15 16:51:01 -0400178 page_crypt_complete, &ecr);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700179
Eric Biggersfb445432016-10-12 23:30:16 -0400180 BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
David Gstir14004512016-12-06 23:53:55 +0100181 xts_tweak.index = cpu_to_le64(lblk_num);
Eric Biggersfb445432016-10-12 23:30:16 -0400182 memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700183
184 sg_init_table(&dst, 1);
David Gstir14004512016-12-06 23:53:55 +0100185 sg_set_page(&dst, dest_page, len, offs);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700186 sg_init_table(&src, 1);
David Gstir14004512016-12-06 23:53:55 +0100187 sg_set_page(&src, src_page, len, offs);
188 skcipher_request_set_crypt(req, &src, &dst, len, &xts_tweak);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700189 if (rw == FS_DECRYPT)
Linus Torvaldsd4075742016-03-21 11:03:02 -0700190 res = crypto_skcipher_decrypt(req);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700191 else
Linus Torvaldsd4075742016-03-21 11:03:02 -0700192 res = crypto_skcipher_encrypt(req);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700193 if (res == -EINPROGRESS || res == -EBUSY) {
194 BUG_ON(req->base.data != &ecr);
195 wait_for_completion(&ecr.completion);
196 res = ecr.res;
197 }
Linus Torvaldsd4075742016-03-21 11:03:02 -0700198 skcipher_request_free(req);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700199 if (res) {
200 printk_ratelimited(KERN_ERR
Linus Torvaldsd4075742016-03-21 11:03:02 -0700201 "%s: crypto_skcipher_encrypt() returned %d\n",
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700202 __func__, res);
203 return res;
204 }
205 return 0;
206}
207
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700208static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700209{
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700210 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700211 if (ctx->w.bounce_page == NULL)
212 return ERR_PTR(-ENOMEM);
213 ctx->flags |= FS_WRITE_PATH_FL;
214 return ctx->w.bounce_page;
215}
216
217/**
218 * fscypt_encrypt_page() - Encrypts a page
David Gstir14004512016-12-06 23:53:55 +0100219 * @inode: The inode for which the encryption should take place
220 * @page: The page to encrypt. Must be locked for bounce-page
221 * encryption.
222 * @len: Length of data to encrypt in @page and encrypted
223 * data in returned page.
224 * @offs: Offset of data within @page and returned
225 * page holding encrypted data.
226 * @lblk_num: Logical block number. This must be unique for multiple
227 * calls with same inode, except when overwriting
228 * previously written data.
229 * @gfp_flags: The gfp flag for memory allocation
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700230 *
David Gstir14004512016-12-06 23:53:55 +0100231 * Encrypts @page using the ctx encryption context. Performs encryption
232 * either in-place or into a newly allocated bounce page.
233 * Called on the page write path.
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700234 *
David Gstir14004512016-12-06 23:53:55 +0100235 * Bounce page allocation is the default.
236 * In this case, the contents of @page are encrypted and stored in an
237 * allocated bounce page. @page has to be locked and the caller must call
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700238 * fscrypt_restore_control_page() on the returned ciphertext page to
239 * release the bounce buffer and the encryption context.
240 *
David Gstir14004512016-12-06 23:53:55 +0100241 * In-place encryption is used by setting the FS_CFLG_INPLACE_ENCRYPTION flag in
242 * fscrypt_operations. Here, the input-page is returned with its content
243 * encrypted.
244 *
245 * Return: A page with the encrypted content on success. Else, an
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700246 * error value or NULL.
247 */
David Gstir0b93e1b2016-11-13 22:20:47 +0100248struct page *fscrypt_encrypt_page(const struct inode *inode,
David Gstir14004512016-12-06 23:53:55 +0100249 struct page *page,
250 unsigned int len,
251 unsigned int offs,
252 u64 lblk_num, gfp_t gfp_flags)
David Gstir7821d4d2016-11-13 22:20:46 +0100253
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700254{
255 struct fscrypt_ctx *ctx;
David Gstir14004512016-12-06 23:53:55 +0100256 struct page *ciphertext_page = page;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700257 int err;
258
David Gstir14004512016-12-06 23:53:55 +0100259 BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700260
David Gstir9e532772016-12-06 23:53:54 +0100261 if (inode->i_sb->s_cop->flags & FS_CFLG_INPLACE_ENCRYPTION) {
262 /* with inplace-encryption we just encrypt the page */
David Gstir14004512016-12-06 23:53:55 +0100263 err = do_page_crypto(inode, FS_ENCRYPT, lblk_num,
264 page, ciphertext_page,
265 len, offs, gfp_flags);
David Gstir9e532772016-12-06 23:53:54 +0100266 if (err)
267 return ERR_PTR(err);
268
269 return ciphertext_page;
270 }
271
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700272 ctx = fscrypt_get_ctx(inode, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700273 if (IS_ERR(ctx))
274 return (struct page *)ctx;
275
David Gstir9e532772016-12-06 23:53:54 +0100276 /* The encryption operation will require a bounce page. */
277 ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
278 if (IS_ERR(ciphertext_page))
279 goto errout;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700280
David Gstir14004512016-12-06 23:53:55 +0100281 ctx->w.control_page = page;
282 err = do_page_crypto(inode, FS_ENCRYPT, lblk_num,
283 page, ciphertext_page,
284 len, offs, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700285 if (err) {
286 ciphertext_page = ERR_PTR(err);
287 goto errout;
288 }
David Gstir9e532772016-12-06 23:53:54 +0100289 SetPagePrivate(ciphertext_page);
290 set_page_private(ciphertext_page, (unsigned long)ctx);
291 lock_page(ciphertext_page);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700292 return ciphertext_page;
293
294errout:
295 fscrypt_release_ctx(ctx);
296 return ciphertext_page;
297}
298EXPORT_SYMBOL(fscrypt_encrypt_page);
299
300/**
David Gstir7821d4d2016-11-13 22:20:46 +0100301 * fscrypt_decrypt_page() - Decrypts a page in-place
David Gstir14004512016-12-06 23:53:55 +0100302 * @inode: The corresponding inode for the page to decrypt.
303 * @page: The page to decrypt. Must be locked in case
304 * it is a writeback page.
305 * @len: Number of bytes in @page to be decrypted.
306 * @offs: Start of data in @page.
307 * @lblk_num: Logical block number.
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700308 *
309 * Decrypts page in-place using the ctx encryption context.
310 *
311 * Called from the read completion callback.
312 *
313 * Return: Zero on success, non-zero otherwise.
314 */
David Gstir0b93e1b2016-11-13 22:20:47 +0100315int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
David Gstir14004512016-12-06 23:53:55 +0100316 unsigned int len, unsigned int offs, u64 lblk_num)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700317{
David Gstir14004512016-12-06 23:53:55 +0100318 return do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, len,
319 offs, GFP_NOFS);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700320}
321EXPORT_SYMBOL(fscrypt_decrypt_page);
322
David Gstir0b93e1b2016-11-13 22:20:47 +0100323int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700324 sector_t pblk, unsigned int len)
325{
326 struct fscrypt_ctx *ctx;
327 struct page *ciphertext_page = NULL;
328 struct bio *bio;
329 int ret, err = 0;
330
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300331 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700332
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700333 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700334 if (IS_ERR(ctx))
335 return PTR_ERR(ctx);
336
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700337 ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700338 if (IS_ERR(ciphertext_page)) {
339 err = PTR_ERR(ciphertext_page);
340 goto errout;
341 }
342
343 while (len--) {
344 err = do_page_crypto(inode, FS_ENCRYPT, lblk,
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700345 ZERO_PAGE(0), ciphertext_page,
David Gstir7821d4d2016-11-13 22:20:46 +0100346 PAGE_SIZE, 0, GFP_NOFS);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700347 if (err)
348 goto errout;
349
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700350 bio = bio_alloc(GFP_NOWAIT, 1);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700351 if (!bio) {
352 err = -ENOMEM;
353 goto errout;
354 }
355 bio->bi_bdev = inode->i_sb->s_bdev;
356 bio->bi_iter.bi_sector =
357 pblk << (inode->i_sb->s_blocksize_bits - 9);
Mike Christie95fe6c12016-06-05 14:31:48 -0500358 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700359 ret = bio_add_page(bio, ciphertext_page,
360 inode->i_sb->s_blocksize, 0);
361 if (ret != inode->i_sb->s_blocksize) {
362 /* should never happen! */
363 WARN_ON(1);
364 bio_put(bio);
365 err = -EIO;
366 goto errout;
367 }
Mike Christie4e49ea42016-06-05 14:31:41 -0500368 err = submit_bio_wait(bio);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700369 if ((err == 0) && bio->bi_error)
370 err = -EIO;
371 bio_put(bio);
372 if (err)
373 goto errout;
374 lblk++;
375 pblk++;
376 }
377 err = 0;
378errout:
379 fscrypt_release_ctx(ctx);
380 return err;
381}
382EXPORT_SYMBOL(fscrypt_zeroout_range);
383
384/*
385 * Validate dentries for encrypted directories to make sure we aren't
386 * potentially caching stale data after a key has been added or
387 * removed.
388 */
389static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
390{
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700391 struct dentry *dir;
392 struct fscrypt_info *ci;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700393 int dir_has_key, cached_with_key;
394
Jaegeuk Kim03a8bb02016-04-12 16:05:36 -0700395 if (flags & LOOKUP_RCU)
396 return -ECHILD;
397
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700398 dir = dget_parent(dentry);
399 if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
400 dput(dir);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700401 return 0;
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700402 }
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700403
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700404 ci = d_inode(dir)->i_crypt_info;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700405 if (ci && ci->ci_keyring_key &&
406 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
407 (1 << KEY_FLAG_REVOKED) |
408 (1 << KEY_FLAG_DEAD))))
409 ci = NULL;
410
411 /* this should eventually be an flag in d_flags */
412 spin_lock(&dentry->d_lock);
413 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
414 spin_unlock(&dentry->d_lock);
415 dir_has_key = (ci != NULL);
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700416 dput(dir);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700417
418 /*
419 * If the dentry was cached without the key, and it is a
420 * negative dentry, it might be a valid name. We can't check
421 * if the key has since been made available due to locking
422 * reasons, so we fail the validation so ext4_lookup() can do
423 * this check.
424 *
425 * We also fail the validation if the dentry was created with
426 * the key present, but we no longer have the key, or vice versa.
427 */
428 if ((!cached_with_key && d_is_negative(dentry)) ||
429 (!cached_with_key && dir_has_key) ||
430 (cached_with_key && !dir_has_key))
431 return 0;
432 return 1;
433}
434
435const struct dentry_operations fscrypt_d_ops = {
436 .d_revalidate = fscrypt_d_revalidate,
437};
438EXPORT_SYMBOL(fscrypt_d_ops);
439
440/*
441 * Call fscrypt_decrypt_page on every single page, reusing the encryption
442 * context.
443 */
444static void completion_pages(struct work_struct *work)
445{
446 struct fscrypt_ctx *ctx =
447 container_of(work, struct fscrypt_ctx, r.work);
448 struct bio *bio = ctx->r.bio;
449 struct bio_vec *bv;
450 int i;
451
452 bio_for_each_segment_all(bv, bio, i) {
453 struct page *page = bv->bv_page;
David Gstir7821d4d2016-11-13 22:20:46 +0100454 int ret = fscrypt_decrypt_page(page->mapping->host, page,
David Gstir9c4bb8a2016-11-13 22:20:48 +0100455 PAGE_SIZE, 0, page->index);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700456
457 if (ret) {
458 WARN_ON_ONCE(1);
459 SetPageError(page);
460 } else {
461 SetPageUptodate(page);
462 }
463 unlock_page(page);
464 }
465 fscrypt_release_ctx(ctx);
466 bio_put(bio);
467}
468
469void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
470{
471 INIT_WORK(&ctx->r.work, completion_pages);
472 ctx->r.bio = bio;
473 queue_work(fscrypt_read_workqueue, &ctx->r.work);
474}
475EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
476
477void fscrypt_pullback_bio_page(struct page **page, bool restore)
478{
479 struct fscrypt_ctx *ctx;
480 struct page *bounce_page;
481
482 /* The bounce data pages are unmapped. */
483 if ((*page)->mapping)
484 return;
485
486 /* The bounce data page is unmapped. */
487 bounce_page = *page;
488 ctx = (struct fscrypt_ctx *)page_private(bounce_page);
489
490 /* restore control page */
491 *page = ctx->w.control_page;
492
493 if (restore)
494 fscrypt_restore_control_page(bounce_page);
495}
496EXPORT_SYMBOL(fscrypt_pullback_bio_page);
497
498void fscrypt_restore_control_page(struct page *page)
499{
500 struct fscrypt_ctx *ctx;
501
502 ctx = (struct fscrypt_ctx *)page_private(page);
503 set_page_private(page, (unsigned long)NULL);
504 ClearPagePrivate(page);
505 unlock_page(page);
506 fscrypt_release_ctx(ctx);
507}
508EXPORT_SYMBOL(fscrypt_restore_control_page);
509
510static void fscrypt_destroy(void)
511{
512 struct fscrypt_ctx *pos, *n;
513
514 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
515 kmem_cache_free(fscrypt_ctx_cachep, pos);
516 INIT_LIST_HEAD(&fscrypt_free_ctxs);
517 mempool_destroy(fscrypt_bounce_page_pool);
518 fscrypt_bounce_page_pool = NULL;
519}
520
521/**
522 * fscrypt_initialize() - allocate major buffers for fs encryption.
523 *
524 * We only call this when we start accessing encrypted files, since it
525 * results in memory getting allocated that wouldn't otherwise be used.
526 *
527 * Return: Zero on success, non-zero otherwise.
528 */
529int fscrypt_initialize(void)
530{
531 int i, res = -ENOMEM;
532
533 if (fscrypt_bounce_page_pool)
534 return 0;
535
536 mutex_lock(&fscrypt_init_mutex);
537 if (fscrypt_bounce_page_pool)
538 goto already_initialized;
539
540 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
541 struct fscrypt_ctx *ctx;
542
543 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
544 if (!ctx)
545 goto fail;
546 list_add(&ctx->free_list, &fscrypt_free_ctxs);
547 }
548
549 fscrypt_bounce_page_pool =
550 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
551 if (!fscrypt_bounce_page_pool)
552 goto fail;
553
554already_initialized:
555 mutex_unlock(&fscrypt_init_mutex);
556 return 0;
557fail:
558 fscrypt_destroy();
559 mutex_unlock(&fscrypt_init_mutex);
560 return res;
561}
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700562
563/**
564 * fscrypt_init() - Set up for fs encryption.
565 */
566static int __init fscrypt_init(void)
567{
568 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
569 WQ_HIGHPRI, 0);
570 if (!fscrypt_read_workqueue)
571 goto fail;
572
573 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
574 if (!fscrypt_ctx_cachep)
575 goto fail_free_queue;
576
577 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
578 if (!fscrypt_info_cachep)
579 goto fail_free_ctx;
580
581 return 0;
582
583fail_free_ctx:
584 kmem_cache_destroy(fscrypt_ctx_cachep);
585fail_free_queue:
586 destroy_workqueue(fscrypt_read_workqueue);
587fail:
588 return -ENOMEM;
589}
590module_init(fscrypt_init)
591
592/**
593 * fscrypt_exit() - Shutdown the fs encryption system
594 */
595static void __exit fscrypt_exit(void)
596{
597 fscrypt_destroy();
598
599 if (fscrypt_read_workqueue)
600 destroy_workqueue(fscrypt_read_workqueue);
601 kmem_cache_destroy(fscrypt_ctx_cachep);
602 kmem_cache_destroy(fscrypt_info_cachep);
603}
604module_exit(fscrypt_exit);
605
606MODULE_LICENSE("GPL");