blob: 845c22a5a7448998fcbc430b4fad847979c064d6 [file] [log] [blame]
Michael Halcrowb30ab0e2015-04-12 00:43:56 -04001/*
2 * linux/fs/ext4/crypto.c
3 *
4 * Copyright (C) 2015, Google, Inc.
5 *
6 * This contains encryption functions for ext4
7 *
8 * Written by Michael Halcrow, 2014.
9 *
10 * Filename encryption additions
11 * Uday Savagaonkar, 2014
12 * Encryption policy handling additions
13 * Ildar Muslukhov, 2014
14 *
15 * This has not yet undergone a rigorous security audit.
16 *
17 * The usage of AES-XTS should conform to recommendations in NIST
18 * Special Publication 800-38E and IEEE P1619/D16.
19 */
20
Herbert Xu3f32a5b2016-01-24 21:17:38 +080021#include <crypto/skcipher.h>
Michael Halcrowb30ab0e2015-04-12 00:43:56 -040022#include <keys/user-type.h>
23#include <keys/encrypted-type.h>
Michael Halcrowb30ab0e2015-04-12 00:43:56 -040024#include <linux/ecryptfs.h>
25#include <linux/gfp.h>
26#include <linux/kernel.h>
27#include <linux/key.h>
28#include <linux/list.h>
29#include <linux/mempool.h>
30#include <linux/module.h>
31#include <linux/mutex.h>
32#include <linux/random.h>
33#include <linux/scatterlist.h>
34#include <linux/spinlock_types.h>
35
36#include "ext4_extents.h"
37#include "xattr.h"
38
39/* Encryption added and removed here! (L: */
40
41static unsigned int num_prealloc_crypto_pages = 32;
42static unsigned int num_prealloc_crypto_ctxs = 128;
43
44module_param(num_prealloc_crypto_pages, uint, 0444);
45MODULE_PARM_DESC(num_prealloc_crypto_pages,
46 "Number of crypto pages to preallocate");
47module_param(num_prealloc_crypto_ctxs, uint, 0444);
48MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
49 "Number of crypto contexts to preallocate");
50
51static mempool_t *ext4_bounce_page_pool;
52
53static LIST_HEAD(ext4_free_crypto_ctxs);
54static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
55
Theodore Ts'o8ee037142015-05-18 13:19:47 -040056static struct kmem_cache *ext4_crypto_ctx_cachep;
57struct kmem_cache *ext4_crypt_info_cachep;
58
Michael Halcrowb30ab0e2015-04-12 00:43:56 -040059/**
60 * ext4_release_crypto_ctx() - Releases an encryption context
61 * @ctx: The encryption context to release.
62 *
63 * If the encryption context was allocated from the pre-allocated pool, returns
64 * it to that pool. Else, frees it.
65 *
66 * If there's a bounce page in the context, this frees that.
67 */
68void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
69{
70 unsigned long flags;
71
Theodore Ts'o3dbb5eb2015-06-03 09:32:39 -040072 if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page)
73 mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
Theodore Ts'o614def72015-05-31 13:31:34 -040074 ctx->w.bounce_page = NULL;
75 ctx->w.control_page = NULL;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -040076 if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
Theodore Ts'o8ee037142015-05-18 13:19:47 -040077 kmem_cache_free(ext4_crypto_ctx_cachep, ctx);
Michael Halcrowb30ab0e2015-04-12 00:43:56 -040078 } else {
79 spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
80 list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
81 spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
82 }
83}
84
85/**
Michael Halcrowb30ab0e2015-04-12 00:43:56 -040086 * ext4_get_crypto_ctx() - Gets an encryption context
87 * @inode: The inode for which we are doing the crypto
88 *
89 * Allocates and initializes an encryption context.
90 *
91 * Return: An allocated and initialized encryption context on success; error
92 * value or NULL otherwise.
93 */
94struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
95{
96 struct ext4_crypto_ctx *ctx = NULL;
97 int res = 0;
98 unsigned long flags;
Theodore Ts'ob7236e22015-05-18 13:17:47 -040099 struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400100
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400101 if (ci == NULL)
102 return ERR_PTR(-ENOKEY);
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400103
104 /*
105 * We first try getting the ctx from a free list because in
106 * the common case the ctx will have an allocated and
107 * initialized crypto tfm, so it's probably a worthwhile
108 * optimization. For the bounce page, we first try getting it
109 * from the kernel allocator because that's just about as fast
110 * as getting it from a list and because a cache of free pages
111 * should generally be a "last resort" option for a filesystem
112 * to be able to do its job.
113 */
114 spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
115 ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
116 struct ext4_crypto_ctx, free_list);
117 if (ctx)
118 list_del(&ctx->free_list);
119 spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
120 if (!ctx) {
Theodore Ts'o8ee037142015-05-18 13:19:47 -0400121 ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
122 if (!ctx) {
123 res = -ENOMEM;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400124 goto out;
125 }
126 ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
127 } else {
128 ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
129 }
Theodore Ts'o614def72015-05-31 13:31:34 -0400130 ctx->flags &= ~EXT4_WRITE_PATH_FL;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400131
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400132out:
133 if (res) {
134 if (!IS_ERR_OR_NULL(ctx))
135 ext4_release_crypto_ctx(ctx);
136 ctx = ERR_PTR(res);
137 }
138 return ctx;
139}
140
141struct workqueue_struct *ext4_read_workqueue;
142static DEFINE_MUTEX(crypto_init);
143
144/**
145 * ext4_exit_crypto() - Shutdown the ext4 encryption system
146 */
147void ext4_exit_crypto(void)
148{
149 struct ext4_crypto_ctx *pos, *n;
150
Theodore Ts'oc936e1e2015-05-31 13:34:22 -0400151 list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list)
Theodore Ts'o8ee037142015-05-18 13:19:47 -0400152 kmem_cache_free(ext4_crypto_ctx_cachep, pos);
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400153 INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
154 if (ext4_bounce_page_pool)
155 mempool_destroy(ext4_bounce_page_pool);
156 ext4_bounce_page_pool = NULL;
157 if (ext4_read_workqueue)
158 destroy_workqueue(ext4_read_workqueue);
159 ext4_read_workqueue = NULL;
Theodore Ts'o8ee037142015-05-18 13:19:47 -0400160 if (ext4_crypto_ctx_cachep)
161 kmem_cache_destroy(ext4_crypto_ctx_cachep);
162 ext4_crypto_ctx_cachep = NULL;
163 if (ext4_crypt_info_cachep)
164 kmem_cache_destroy(ext4_crypt_info_cachep);
165 ext4_crypt_info_cachep = NULL;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400166}
167
168/**
169 * ext4_init_crypto() - Set up for ext4 encryption.
170 *
171 * We only call this when we start accessing encrypted files, since it
172 * results in memory getting allocated that wouldn't otherwise be used.
173 *
174 * Return: Zero on success, non-zero otherwise.
175 */
176int ext4_init_crypto(void)
177{
Theodore Ts'o8ee037142015-05-18 13:19:47 -0400178 int i, res = -ENOMEM;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400179
180 mutex_lock(&crypto_init);
181 if (ext4_read_workqueue)
182 goto already_initialized;
183 ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
Theodore Ts'o8ee037142015-05-18 13:19:47 -0400184 if (!ext4_read_workqueue)
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400185 goto fail;
Theodore Ts'o8ee037142015-05-18 13:19:47 -0400186
187 ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx,
188 SLAB_RECLAIM_ACCOUNT);
189 if (!ext4_crypto_ctx_cachep)
190 goto fail;
191
192 ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info,
193 SLAB_RECLAIM_ACCOUNT);
194 if (!ext4_crypt_info_cachep)
195 goto fail;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400196
197 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
198 struct ext4_crypto_ctx *ctx;
199
Theodore Ts'o8ee037142015-05-18 13:19:47 -0400200 ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
201 if (!ctx) {
202 res = -ENOMEM;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400203 goto fail;
204 }
205 list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
206 }
207
208 ext4_bounce_page_pool =
209 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
210 if (!ext4_bounce_page_pool) {
211 res = -ENOMEM;
212 goto fail;
213 }
214already_initialized:
215 mutex_unlock(&crypto_init);
216 return 0;
217fail:
218 ext4_exit_crypto();
219 mutex_unlock(&crypto_init);
220 return res;
221}
222
223void ext4_restore_control_page(struct page *data_page)
224{
225 struct ext4_crypto_ctx *ctx =
226 (struct ext4_crypto_ctx *)page_private(data_page);
227
228 set_page_private(data_page, (unsigned long)NULL);
229 ClearPagePrivate(data_page);
230 unlock_page(data_page);
231 ext4_release_crypto_ctx(ctx);
232}
233
234/**
235 * ext4_crypt_complete() - The completion callback for page encryption
236 * @req: The asynchronous encryption request context
237 * @res: The result of the encryption operation
238 */
239static void ext4_crypt_complete(struct crypto_async_request *req, int res)
240{
241 struct ext4_completion_result *ecr = req->data;
242
243 if (res == -EINPROGRESS)
244 return;
245 ecr->res = res;
246 complete(&ecr->completion);
247}
248
249typedef enum {
250 EXT4_DECRYPT = 0,
251 EXT4_ENCRYPT,
252} ext4_direction_t;
253
Theodore Ts'o3684de82015-10-03 10:49:26 -0400254static int ext4_page_crypto(struct inode *inode,
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400255 ext4_direction_t rw,
256 pgoff_t index,
257 struct page *src_page,
258 struct page *dest_page)
259
260{
261 u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
Herbert Xu3f32a5b2016-01-24 21:17:38 +0800262 struct skcipher_request *req = NULL;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400263 DECLARE_EXT4_COMPLETION_RESULT(ecr);
264 struct scatterlist dst, src;
Theodore Ts'oc936e1e2015-05-31 13:34:22 -0400265 struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
Herbert Xu3f32a5b2016-01-24 21:17:38 +0800266 struct crypto_skcipher *tfm = ci->ci_ctfm;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400267 int res = 0;
268
Herbert Xu3f32a5b2016-01-24 21:17:38 +0800269 req = skcipher_request_alloc(tfm, GFP_NOFS);
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400270 if (!req) {
271 printk_ratelimited(KERN_ERR
272 "%s: crypto_request_alloc() failed\n",
273 __func__);
274 return -ENOMEM;
275 }
Herbert Xu3f32a5b2016-01-24 21:17:38 +0800276 skcipher_request_set_callback(
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400277 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
278 ext4_crypt_complete, &ecr);
279
280 BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
281 memcpy(xts_tweak, &index, sizeof(index));
282 memset(&xts_tweak[sizeof(index)], 0,
283 EXT4_XTS_TWEAK_SIZE - sizeof(index));
284
285 sg_init_table(&dst, 1);
286 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
287 sg_init_table(&src, 1);
288 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
Herbert Xu3f32a5b2016-01-24 21:17:38 +0800289 skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
290 xts_tweak);
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400291 if (rw == EXT4_DECRYPT)
Herbert Xu3f32a5b2016-01-24 21:17:38 +0800292 res = crypto_skcipher_decrypt(req);
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400293 else
Herbert Xu3f32a5b2016-01-24 21:17:38 +0800294 res = crypto_skcipher_encrypt(req);
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400295 if (res == -EINPROGRESS || res == -EBUSY) {
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400296 wait_for_completion(&ecr.completion);
297 res = ecr.res;
298 }
Herbert Xu3f32a5b2016-01-24 21:17:38 +0800299 skcipher_request_free(req);
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400300 if (res) {
301 printk_ratelimited(
302 KERN_ERR
Herbert Xu3f32a5b2016-01-24 21:17:38 +0800303 "%s: crypto_skcipher_encrypt() returned %d\n",
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400304 __func__, res);
305 return res;
306 }
307 return 0;
308}
309
Theodore Ts'o95ea68b2015-05-31 13:34:24 -0400310static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
311{
Theodore Ts'o3dbb5eb2015-06-03 09:32:39 -0400312 ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT);
313 if (ctx->w.bounce_page == NULL)
314 return ERR_PTR(-ENOMEM);
Theodore Ts'o95ea68b2015-05-31 13:34:24 -0400315 ctx->flags |= EXT4_WRITE_PATH_FL;
Theodore Ts'o3dbb5eb2015-06-03 09:32:39 -0400316 return ctx->w.bounce_page;
Theodore Ts'o95ea68b2015-05-31 13:34:24 -0400317}
318
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400319/**
320 * ext4_encrypt() - Encrypts a page
321 * @inode: The inode for which the encryption should take place
322 * @plaintext_page: The page to encrypt. Must be locked.
323 *
324 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
325 * encryption context.
326 *
327 * Called on the page write path. The caller must call
328 * ext4_restore_control_page() on the returned ciphertext page to
329 * release the bounce buffer and the encryption context.
330 *
331 * Return: An allocated page with the encrypted content on success. Else, an
332 * error value or NULL.
333 */
334struct page *ext4_encrypt(struct inode *inode,
335 struct page *plaintext_page)
336{
337 struct ext4_crypto_ctx *ctx;
338 struct page *ciphertext_page = NULL;
339 int err;
340
341 BUG_ON(!PageLocked(plaintext_page));
342
343 ctx = ext4_get_crypto_ctx(inode);
344 if (IS_ERR(ctx))
345 return (struct page *) ctx;
346
347 /* The encryption operation will require a bounce page. */
Theodore Ts'o95ea68b2015-05-31 13:34:24 -0400348 ciphertext_page = alloc_bounce_page(ctx);
349 if (IS_ERR(ciphertext_page))
350 goto errout;
Theodore Ts'o614def72015-05-31 13:31:34 -0400351 ctx->w.control_page = plaintext_page;
Theodore Ts'o3684de82015-10-03 10:49:26 -0400352 err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400353 plaintext_page, ciphertext_page);
354 if (err) {
Theodore Ts'o95ea68b2015-05-31 13:34:24 -0400355 ciphertext_page = ERR_PTR(err);
356 errout:
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400357 ext4_release_crypto_ctx(ctx);
Theodore Ts'o95ea68b2015-05-31 13:34:24 -0400358 return ciphertext_page;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400359 }
360 SetPagePrivate(ciphertext_page);
361 set_page_private(ciphertext_page, (unsigned long)ctx);
362 lock_page(ciphertext_page);
363 return ciphertext_page;
364}
365
366/**
367 * ext4_decrypt() - Decrypts a page in-place
368 * @ctx: The encryption context.
369 * @page: The page to decrypt. Must be locked.
370 *
371 * Decrypts page in-place using the ctx encryption context.
372 *
373 * Called from the read completion callback.
374 *
375 * Return: Zero on success, non-zero otherwise.
376 */
Theodore Ts'o3684de82015-10-03 10:49:26 -0400377int ext4_decrypt(struct page *page)
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400378{
379 BUG_ON(!PageLocked(page));
380
Theodore Ts'o3684de82015-10-03 10:49:26 -0400381 return ext4_page_crypto(page->mapping->host,
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400382 EXT4_DECRYPT, page->index, page, page);
383}
384
Jan Kara53085fa2015-12-07 15:09:35 -0500385int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
386 ext4_fsblk_t pblk, ext4_lblk_t len)
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400387{
388 struct ext4_crypto_ctx *ctx;
389 struct page *ciphertext_page = NULL;
390 struct bio *bio;
Theodore Ts'o36086d42015-10-03 10:49:29 -0400391 int ret, err = 0;
392
393#if 0
394 ext4_msg(inode->i_sb, KERN_CRIT,
395 "ext4_encrypted_zeroout ino %lu lblk %u len %u",
396 (unsigned long) inode->i_ino, lblk, len);
397#endif
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400398
399 BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
400
401 ctx = ext4_get_crypto_ctx(inode);
402 if (IS_ERR(ctx))
403 return PTR_ERR(ctx);
404
Theodore Ts'o95ea68b2015-05-31 13:34:24 -0400405 ciphertext_page = alloc_bounce_page(ctx);
406 if (IS_ERR(ciphertext_page)) {
407 err = PTR_ERR(ciphertext_page);
408 goto errout;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400409 }
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400410
411 while (len--) {
Theodore Ts'o3684de82015-10-03 10:49:26 -0400412 err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400413 ZERO_PAGE(0), ciphertext_page);
414 if (err)
415 goto errout;
416
417 bio = bio_alloc(GFP_KERNEL, 1);
418 if (!bio) {
419 err = -ENOMEM;
420 goto errout;
421 }
422 bio->bi_bdev = inode->i_sb->s_bdev;
Theodore Ts'o36086d42015-10-03 10:49:29 -0400423 bio->bi_iter.bi_sector =
424 pblk << (inode->i_sb->s_blocksize_bits - 9);
425 ret = bio_add_page(bio, ciphertext_page,
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400426 inode->i_sb->s_blocksize, 0);
Theodore Ts'o36086d42015-10-03 10:49:29 -0400427 if (ret != inode->i_sb->s_blocksize) {
428 /* should never happen! */
429 ext4_msg(inode->i_sb, KERN_ERR,
430 "bio_add_page failed: %d", ret);
431 WARN_ON(1);
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400432 bio_put(bio);
Theodore Ts'o36086d42015-10-03 10:49:29 -0400433 err = -EIO;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400434 goto errout;
435 }
436 err = submit_bio_wait(WRITE, bio);
Theodore Ts'o36086d42015-10-03 10:49:29 -0400437 if ((err == 0) && bio->bi_error)
438 err = -EIO;
Theodore Ts'o95ea68b2015-05-31 13:34:24 -0400439 bio_put(bio);
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400440 if (err)
441 goto errout;
Theodore Ts'o36086d42015-10-03 10:49:29 -0400442 lblk++; pblk++;
Michael Halcrowb30ab0e2015-04-12 00:43:56 -0400443 }
444 err = 0;
445errout:
446 ext4_release_crypto_ctx(ctx);
447 return err;
448}
449
450bool ext4_valid_contents_enc_mode(uint32_t mode)
451{
452 return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
453}
454
455/**
456 * ext4_validate_encryption_key_size() - Validate the encryption key size
457 * @mode: The key mode.
458 * @size: The key size to validate.
459 *
460 * Return: The validated key size for @mode. Zero if invalid.
461 */
462uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
463{
464 if (size == ext4_encryption_key_size(mode))
465 return size;
466 return 0;
467}