Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 1 | /* |
| 2 | * linux/fs/ext4/crypto.c |
| 3 | * |
| 4 | * Copyright (C) 2015, Google, Inc. |
| 5 | * |
| 6 | * This contains encryption functions for ext4 |
| 7 | * |
| 8 | * Written by Michael Halcrow, 2014. |
| 9 | * |
| 10 | * Filename encryption additions |
| 11 | * Uday Savagaonkar, 2014 |
| 12 | * Encryption policy handling additions |
| 13 | * Ildar Muslukhov, 2014 |
| 14 | * |
| 15 | * This has not yet undergone a rigorous security audit. |
| 16 | * |
| 17 | * The usage of AES-XTS should conform to recommendations in NIST |
| 18 | * Special Publication 800-38E and IEEE P1619/D16. |
| 19 | */ |
| 20 | |
| 21 | #include <crypto/hash.h> |
| 22 | #include <crypto/sha.h> |
| 23 | #include <keys/user-type.h> |
| 24 | #include <keys/encrypted-type.h> |
| 25 | #include <linux/crypto.h> |
| 26 | #include <linux/ecryptfs.h> |
| 27 | #include <linux/gfp.h> |
| 28 | #include <linux/kernel.h> |
| 29 | #include <linux/key.h> |
| 30 | #include <linux/list.h> |
| 31 | #include <linux/mempool.h> |
| 32 | #include <linux/module.h> |
| 33 | #include <linux/mutex.h> |
| 34 | #include <linux/random.h> |
| 35 | #include <linux/scatterlist.h> |
| 36 | #include <linux/spinlock_types.h> |
| 37 | |
| 38 | #include "ext4_extents.h" |
| 39 | #include "xattr.h" |
| 40 | |
| 41 | /* Encryption added and removed here! (L: */ |
| 42 | |
| 43 | static unsigned int num_prealloc_crypto_pages = 32; |
| 44 | static unsigned int num_prealloc_crypto_ctxs = 128; |
| 45 | |
| 46 | module_param(num_prealloc_crypto_pages, uint, 0444); |
| 47 | MODULE_PARM_DESC(num_prealloc_crypto_pages, |
| 48 | "Number of crypto pages to preallocate"); |
| 49 | module_param(num_prealloc_crypto_ctxs, uint, 0444); |
| 50 | MODULE_PARM_DESC(num_prealloc_crypto_ctxs, |
| 51 | "Number of crypto contexts to preallocate"); |
| 52 | |
| 53 | static mempool_t *ext4_bounce_page_pool; |
| 54 | |
| 55 | static LIST_HEAD(ext4_free_crypto_ctxs); |
| 56 | static DEFINE_SPINLOCK(ext4_crypto_ctx_lock); |
| 57 | |
Theodore Ts'o | 8ee03714 | 2015-05-18 13:19:47 -0400 | [diff] [blame] | 58 | static struct kmem_cache *ext4_crypto_ctx_cachep; |
| 59 | struct kmem_cache *ext4_crypt_info_cachep; |
| 60 | |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 61 | /** |
| 62 | * ext4_release_crypto_ctx() - Releases an encryption context |
| 63 | * @ctx: The encryption context to release. |
| 64 | * |
| 65 | * If the encryption context was allocated from the pre-allocated pool, returns |
| 66 | * it to that pool. Else, frees it. |
| 67 | * |
| 68 | * If there's a bounce page in the context, this frees that. |
| 69 | */ |
| 70 | void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx) |
| 71 | { |
| 72 | unsigned long flags; |
| 73 | |
Theodore Ts'o | 3dbb5eb | 2015-06-03 09:32:39 -0400 | [diff] [blame] | 74 | if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page) |
| 75 | mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool); |
Theodore Ts'o | 614def7 | 2015-05-31 13:31:34 -0400 | [diff] [blame] | 76 | ctx->w.bounce_page = NULL; |
| 77 | ctx->w.control_page = NULL; |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 78 | if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) { |
Theodore Ts'o | 8ee03714 | 2015-05-18 13:19:47 -0400 | [diff] [blame] | 79 | kmem_cache_free(ext4_crypto_ctx_cachep, ctx); |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 80 | } else { |
| 81 | spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); |
| 82 | list_add(&ctx->free_list, &ext4_free_crypto_ctxs); |
| 83 | spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); |
| 84 | } |
| 85 | } |
| 86 | |
| 87 | /** |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 88 | * ext4_get_crypto_ctx() - Gets an encryption context |
| 89 | * @inode: The inode for which we are doing the crypto |
| 90 | * |
| 91 | * Allocates and initializes an encryption context. |
| 92 | * |
| 93 | * Return: An allocated and initialized encryption context on success; error |
| 94 | * value or NULL otherwise. |
| 95 | */ |
| 96 | struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode) |
| 97 | { |
| 98 | struct ext4_crypto_ctx *ctx = NULL; |
| 99 | int res = 0; |
| 100 | unsigned long flags; |
Theodore Ts'o | b7236e2 | 2015-05-18 13:17:47 -0400 | [diff] [blame] | 101 | struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 102 | |
Theodore Ts'o | abdd438 | 2015-05-31 13:35:39 -0400 | [diff] [blame] | 103 | if (ci == NULL) |
| 104 | return ERR_PTR(-ENOKEY); |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 105 | |
| 106 | /* |
| 107 | * We first try getting the ctx from a free list because in |
| 108 | * the common case the ctx will have an allocated and |
| 109 | * initialized crypto tfm, so it's probably a worthwhile |
| 110 | * optimization. For the bounce page, we first try getting it |
| 111 | * from the kernel allocator because that's just about as fast |
| 112 | * as getting it from a list and because a cache of free pages |
| 113 | * should generally be a "last resort" option for a filesystem |
| 114 | * to be able to do its job. |
| 115 | */ |
| 116 | spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); |
| 117 | ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs, |
| 118 | struct ext4_crypto_ctx, free_list); |
| 119 | if (ctx) |
| 120 | list_del(&ctx->free_list); |
| 121 | spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); |
| 122 | if (!ctx) { |
Theodore Ts'o | 8ee03714 | 2015-05-18 13:19:47 -0400 | [diff] [blame] | 123 | ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS); |
| 124 | if (!ctx) { |
| 125 | res = -ENOMEM; |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 126 | goto out; |
| 127 | } |
| 128 | ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; |
| 129 | } else { |
| 130 | ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; |
| 131 | } |
Theodore Ts'o | 614def7 | 2015-05-31 13:31:34 -0400 | [diff] [blame] | 132 | ctx->flags &= ~EXT4_WRITE_PATH_FL; |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 133 | |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 134 | out: |
| 135 | if (res) { |
| 136 | if (!IS_ERR_OR_NULL(ctx)) |
| 137 | ext4_release_crypto_ctx(ctx); |
| 138 | ctx = ERR_PTR(res); |
| 139 | } |
| 140 | return ctx; |
| 141 | } |
| 142 | |
| 143 | struct workqueue_struct *ext4_read_workqueue; |
| 144 | static DEFINE_MUTEX(crypto_init); |
| 145 | |
| 146 | /** |
| 147 | * ext4_exit_crypto() - Shutdown the ext4 encryption system |
| 148 | */ |
| 149 | void ext4_exit_crypto(void) |
| 150 | { |
| 151 | struct ext4_crypto_ctx *pos, *n; |
| 152 | |
Theodore Ts'o | c936e1e | 2015-05-31 13:34:22 -0400 | [diff] [blame] | 153 | list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) |
Theodore Ts'o | 8ee03714 | 2015-05-18 13:19:47 -0400 | [diff] [blame] | 154 | kmem_cache_free(ext4_crypto_ctx_cachep, pos); |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 155 | INIT_LIST_HEAD(&ext4_free_crypto_ctxs); |
| 156 | if (ext4_bounce_page_pool) |
| 157 | mempool_destroy(ext4_bounce_page_pool); |
| 158 | ext4_bounce_page_pool = NULL; |
| 159 | if (ext4_read_workqueue) |
| 160 | destroy_workqueue(ext4_read_workqueue); |
| 161 | ext4_read_workqueue = NULL; |
Theodore Ts'o | 8ee03714 | 2015-05-18 13:19:47 -0400 | [diff] [blame] | 162 | if (ext4_crypto_ctx_cachep) |
| 163 | kmem_cache_destroy(ext4_crypto_ctx_cachep); |
| 164 | ext4_crypto_ctx_cachep = NULL; |
| 165 | if (ext4_crypt_info_cachep) |
| 166 | kmem_cache_destroy(ext4_crypt_info_cachep); |
| 167 | ext4_crypt_info_cachep = NULL; |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 168 | } |
| 169 | |
| 170 | /** |
| 171 | * ext4_init_crypto() - Set up for ext4 encryption. |
| 172 | * |
| 173 | * We only call this when we start accessing encrypted files, since it |
| 174 | * results in memory getting allocated that wouldn't otherwise be used. |
| 175 | * |
| 176 | * Return: Zero on success, non-zero otherwise. |
| 177 | */ |
| 178 | int ext4_init_crypto(void) |
| 179 | { |
Theodore Ts'o | 8ee03714 | 2015-05-18 13:19:47 -0400 | [diff] [blame] | 180 | int i, res = -ENOMEM; |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 181 | |
| 182 | mutex_lock(&crypto_init); |
| 183 | if (ext4_read_workqueue) |
| 184 | goto already_initialized; |
| 185 | ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0); |
Theodore Ts'o | 8ee03714 | 2015-05-18 13:19:47 -0400 | [diff] [blame] | 186 | if (!ext4_read_workqueue) |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 187 | goto fail; |
Theodore Ts'o | 8ee03714 | 2015-05-18 13:19:47 -0400 | [diff] [blame] | 188 | |
| 189 | ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx, |
| 190 | SLAB_RECLAIM_ACCOUNT); |
| 191 | if (!ext4_crypto_ctx_cachep) |
| 192 | goto fail; |
| 193 | |
| 194 | ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info, |
| 195 | SLAB_RECLAIM_ACCOUNT); |
| 196 | if (!ext4_crypt_info_cachep) |
| 197 | goto fail; |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 198 | |
| 199 | for (i = 0; i < num_prealloc_crypto_ctxs; i++) { |
| 200 | struct ext4_crypto_ctx *ctx; |
| 201 | |
Theodore Ts'o | 8ee03714 | 2015-05-18 13:19:47 -0400 | [diff] [blame] | 202 | ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS); |
| 203 | if (!ctx) { |
| 204 | res = -ENOMEM; |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 205 | goto fail; |
| 206 | } |
| 207 | list_add(&ctx->free_list, &ext4_free_crypto_ctxs); |
| 208 | } |
| 209 | |
| 210 | ext4_bounce_page_pool = |
| 211 | mempool_create_page_pool(num_prealloc_crypto_pages, 0); |
| 212 | if (!ext4_bounce_page_pool) { |
| 213 | res = -ENOMEM; |
| 214 | goto fail; |
| 215 | } |
| 216 | already_initialized: |
| 217 | mutex_unlock(&crypto_init); |
| 218 | return 0; |
| 219 | fail: |
| 220 | ext4_exit_crypto(); |
| 221 | mutex_unlock(&crypto_init); |
| 222 | return res; |
| 223 | } |
| 224 | |
| 225 | void ext4_restore_control_page(struct page *data_page) |
| 226 | { |
| 227 | struct ext4_crypto_ctx *ctx = |
| 228 | (struct ext4_crypto_ctx *)page_private(data_page); |
| 229 | |
| 230 | set_page_private(data_page, (unsigned long)NULL); |
| 231 | ClearPagePrivate(data_page); |
| 232 | unlock_page(data_page); |
| 233 | ext4_release_crypto_ctx(ctx); |
| 234 | } |
| 235 | |
| 236 | /** |
| 237 | * ext4_crypt_complete() - The completion callback for page encryption |
| 238 | * @req: The asynchronous encryption request context |
| 239 | * @res: The result of the encryption operation |
| 240 | */ |
| 241 | static void ext4_crypt_complete(struct crypto_async_request *req, int res) |
| 242 | { |
| 243 | struct ext4_completion_result *ecr = req->data; |
| 244 | |
| 245 | if (res == -EINPROGRESS) |
| 246 | return; |
| 247 | ecr->res = res; |
| 248 | complete(&ecr->completion); |
| 249 | } |
| 250 | |
| 251 | typedef enum { |
| 252 | EXT4_DECRYPT = 0, |
| 253 | EXT4_ENCRYPT, |
| 254 | } ext4_direction_t; |
| 255 | |
| 256 | static int ext4_page_crypto(struct ext4_crypto_ctx *ctx, |
| 257 | struct inode *inode, |
| 258 | ext4_direction_t rw, |
| 259 | pgoff_t index, |
| 260 | struct page *src_page, |
| 261 | struct page *dest_page) |
| 262 | |
| 263 | { |
| 264 | u8 xts_tweak[EXT4_XTS_TWEAK_SIZE]; |
| 265 | struct ablkcipher_request *req = NULL; |
| 266 | DECLARE_EXT4_COMPLETION_RESULT(ecr); |
| 267 | struct scatterlist dst, src; |
Theodore Ts'o | c936e1e | 2015-05-31 13:34:22 -0400 | [diff] [blame] | 268 | struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; |
| 269 | struct crypto_ablkcipher *tfm = ci->ci_ctfm; |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 270 | int res = 0; |
| 271 | |
Theodore Ts'o | c936e1e | 2015-05-31 13:34:22 -0400 | [diff] [blame] | 272 | req = ablkcipher_request_alloc(tfm, GFP_NOFS); |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 273 | if (!req) { |
| 274 | printk_ratelimited(KERN_ERR |
| 275 | "%s: crypto_request_alloc() failed\n", |
| 276 | __func__); |
| 277 | return -ENOMEM; |
| 278 | } |
| 279 | ablkcipher_request_set_callback( |
| 280 | req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
| 281 | ext4_crypt_complete, &ecr); |
| 282 | |
| 283 | BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index)); |
| 284 | memcpy(xts_tweak, &index, sizeof(index)); |
| 285 | memset(&xts_tweak[sizeof(index)], 0, |
| 286 | EXT4_XTS_TWEAK_SIZE - sizeof(index)); |
| 287 | |
| 288 | sg_init_table(&dst, 1); |
| 289 | sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); |
| 290 | sg_init_table(&src, 1); |
| 291 | sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); |
| 292 | ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, |
| 293 | xts_tweak); |
| 294 | if (rw == EXT4_DECRYPT) |
| 295 | res = crypto_ablkcipher_decrypt(req); |
| 296 | else |
| 297 | res = crypto_ablkcipher_encrypt(req); |
| 298 | if (res == -EINPROGRESS || res == -EBUSY) { |
| 299 | BUG_ON(req->base.data != &ecr); |
| 300 | wait_for_completion(&ecr.completion); |
| 301 | res = ecr.res; |
| 302 | } |
| 303 | ablkcipher_request_free(req); |
| 304 | if (res) { |
| 305 | printk_ratelimited( |
| 306 | KERN_ERR |
| 307 | "%s: crypto_ablkcipher_encrypt() returned %d\n", |
| 308 | __func__, res); |
| 309 | return res; |
| 310 | } |
| 311 | return 0; |
| 312 | } |
| 313 | |
Theodore Ts'o | 95ea68b | 2015-05-31 13:34:24 -0400 | [diff] [blame] | 314 | static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx) |
| 315 | { |
Theodore Ts'o | 3dbb5eb | 2015-06-03 09:32:39 -0400 | [diff] [blame] | 316 | ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT); |
| 317 | if (ctx->w.bounce_page == NULL) |
| 318 | return ERR_PTR(-ENOMEM); |
Theodore Ts'o | 95ea68b | 2015-05-31 13:34:24 -0400 | [diff] [blame] | 319 | ctx->flags |= EXT4_WRITE_PATH_FL; |
Theodore Ts'o | 3dbb5eb | 2015-06-03 09:32:39 -0400 | [diff] [blame] | 320 | return ctx->w.bounce_page; |
Theodore Ts'o | 95ea68b | 2015-05-31 13:34:24 -0400 | [diff] [blame] | 321 | } |
| 322 | |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 323 | /** |
| 324 | * ext4_encrypt() - Encrypts a page |
| 325 | * @inode: The inode for which the encryption should take place |
| 326 | * @plaintext_page: The page to encrypt. Must be locked. |
| 327 | * |
| 328 | * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx |
| 329 | * encryption context. |
| 330 | * |
| 331 | * Called on the page write path. The caller must call |
| 332 | * ext4_restore_control_page() on the returned ciphertext page to |
| 333 | * release the bounce buffer and the encryption context. |
| 334 | * |
| 335 | * Return: An allocated page with the encrypted content on success. Else, an |
| 336 | * error value or NULL. |
| 337 | */ |
| 338 | struct page *ext4_encrypt(struct inode *inode, |
| 339 | struct page *plaintext_page) |
| 340 | { |
| 341 | struct ext4_crypto_ctx *ctx; |
| 342 | struct page *ciphertext_page = NULL; |
| 343 | int err; |
| 344 | |
| 345 | BUG_ON(!PageLocked(plaintext_page)); |
| 346 | |
| 347 | ctx = ext4_get_crypto_ctx(inode); |
| 348 | if (IS_ERR(ctx)) |
| 349 | return (struct page *) ctx; |
| 350 | |
| 351 | /* The encryption operation will require a bounce page. */ |
Theodore Ts'o | 95ea68b | 2015-05-31 13:34:24 -0400 | [diff] [blame] | 352 | ciphertext_page = alloc_bounce_page(ctx); |
| 353 | if (IS_ERR(ciphertext_page)) |
| 354 | goto errout; |
Theodore Ts'o | 614def7 | 2015-05-31 13:31:34 -0400 | [diff] [blame] | 355 | ctx->w.control_page = plaintext_page; |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 356 | err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index, |
| 357 | plaintext_page, ciphertext_page); |
| 358 | if (err) { |
Theodore Ts'o | 95ea68b | 2015-05-31 13:34:24 -0400 | [diff] [blame] | 359 | ciphertext_page = ERR_PTR(err); |
| 360 | errout: |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 361 | ext4_release_crypto_ctx(ctx); |
Theodore Ts'o | 95ea68b | 2015-05-31 13:34:24 -0400 | [diff] [blame] | 362 | return ciphertext_page; |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 363 | } |
| 364 | SetPagePrivate(ciphertext_page); |
| 365 | set_page_private(ciphertext_page, (unsigned long)ctx); |
| 366 | lock_page(ciphertext_page); |
| 367 | return ciphertext_page; |
| 368 | } |
| 369 | |
| 370 | /** |
| 371 | * ext4_decrypt() - Decrypts a page in-place |
| 372 | * @ctx: The encryption context. |
| 373 | * @page: The page to decrypt. Must be locked. |
| 374 | * |
| 375 | * Decrypts page in-place using the ctx encryption context. |
| 376 | * |
| 377 | * Called from the read completion callback. |
| 378 | * |
| 379 | * Return: Zero on success, non-zero otherwise. |
| 380 | */ |
| 381 | int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page) |
| 382 | { |
| 383 | BUG_ON(!PageLocked(page)); |
| 384 | |
| 385 | return ext4_page_crypto(ctx, page->mapping->host, |
| 386 | EXT4_DECRYPT, page->index, page, page); |
| 387 | } |
| 388 | |
| 389 | /* |
| 390 | * Convenience function which takes care of allocating and |
| 391 | * deallocating the encryption context |
| 392 | */ |
| 393 | int ext4_decrypt_one(struct inode *inode, struct page *page) |
| 394 | { |
| 395 | int ret; |
| 396 | |
| 397 | struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode); |
| 398 | |
Theodore Ts'o | ad0a0ce | 2015-06-08 11:54:56 -0400 | [diff] [blame] | 399 | if (IS_ERR(ctx)) |
| 400 | return PTR_ERR(ctx); |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 401 | ret = ext4_decrypt(ctx, page); |
| 402 | ext4_release_crypto_ctx(ctx); |
| 403 | return ret; |
| 404 | } |
| 405 | |
| 406 | int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) |
| 407 | { |
| 408 | struct ext4_crypto_ctx *ctx; |
| 409 | struct page *ciphertext_page = NULL; |
| 410 | struct bio *bio; |
| 411 | ext4_lblk_t lblk = ex->ee_block; |
| 412 | ext4_fsblk_t pblk = ext4_ext_pblock(ex); |
| 413 | unsigned int len = ext4_ext_get_actual_len(ex); |
| 414 | int err = 0; |
| 415 | |
| 416 | BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); |
| 417 | |
| 418 | ctx = ext4_get_crypto_ctx(inode); |
| 419 | if (IS_ERR(ctx)) |
| 420 | return PTR_ERR(ctx); |
| 421 | |
Theodore Ts'o | 95ea68b | 2015-05-31 13:34:24 -0400 | [diff] [blame] | 422 | ciphertext_page = alloc_bounce_page(ctx); |
| 423 | if (IS_ERR(ciphertext_page)) { |
| 424 | err = PTR_ERR(ciphertext_page); |
| 425 | goto errout; |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 426 | } |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 427 | |
| 428 | while (len--) { |
| 429 | err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk, |
| 430 | ZERO_PAGE(0), ciphertext_page); |
| 431 | if (err) |
| 432 | goto errout; |
| 433 | |
| 434 | bio = bio_alloc(GFP_KERNEL, 1); |
| 435 | if (!bio) { |
| 436 | err = -ENOMEM; |
| 437 | goto errout; |
| 438 | } |
| 439 | bio->bi_bdev = inode->i_sb->s_bdev; |
| 440 | bio->bi_iter.bi_sector = pblk; |
| 441 | err = bio_add_page(bio, ciphertext_page, |
| 442 | inode->i_sb->s_blocksize, 0); |
| 443 | if (err) { |
| 444 | bio_put(bio); |
| 445 | goto errout; |
| 446 | } |
| 447 | err = submit_bio_wait(WRITE, bio); |
Theodore Ts'o | 95ea68b | 2015-05-31 13:34:24 -0400 | [diff] [blame] | 448 | bio_put(bio); |
Michael Halcrow | b30ab0e | 2015-04-12 00:43:56 -0400 | [diff] [blame] | 449 | if (err) |
| 450 | goto errout; |
| 451 | } |
| 452 | err = 0; |
| 453 | errout: |
| 454 | ext4_release_crypto_ctx(ctx); |
| 455 | return err; |
| 456 | } |
| 457 | |
| 458 | bool ext4_valid_contents_enc_mode(uint32_t mode) |
| 459 | { |
| 460 | return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS); |
| 461 | } |
| 462 | |
| 463 | /** |
| 464 | * ext4_validate_encryption_key_size() - Validate the encryption key size |
| 465 | * @mode: The key mode. |
| 466 | * @size: The key size to validate. |
| 467 | * |
| 468 | * Return: The validated key size for @mode. Zero if invalid. |
| 469 | */ |
| 470 | uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size) |
| 471 | { |
| 472 | if (size == ext4_encryption_key_size(mode)) |
| 473 | return size; |
| 474 | return 0; |
| 475 | } |