blob: 93cd5e567d07f138f1eb22b2626784372ad8667c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Richard Weinberger58ae7462016-12-19 12:25:32 +01002/*
3 * This contains encryption functions for per-file encryption.
4 *
5 * Copyright (C) 2015, Google, Inc.
6 * Copyright (C) 2015, Motorola Mobility
7 *
8 * Written by Michael Halcrow, 2014.
9 *
10 * Filename encryption additions
11 * Uday Savagaonkar, 2014
12 * Encryption policy handling additions
13 * Ildar Muslukhov, 2014
14 * Add fscrypt_pullback_bio_page()
15 * Jaegeuk Kim, 2015.
16 *
17 * This has not yet undergone a rigorous security audit.
18 *
19 * The usage of AES-XTS should conform to recommendations in NIST
20 * Special Publication 800-38E and IEEE P1619/D16.
21 */
22
23#include <linux/pagemap.h>
24#include <linux/module.h>
25#include <linux/bio.h>
26#include <linux/namei.h>
27#include "fscrypt_private.h"
28
Eric Biggers0cb8dae2018-04-18 11:09:47 -070029static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
Richard Weinberger58ae7462016-12-19 12:25:32 +010030{
Richard Weinberger58ae7462016-12-19 12:25:32 +010031 struct bio_vec *bv;
32 int i;
33
34 bio_for_each_segment_all(bv, bio, i) {
35 struct page *page = bv->bv_page;
Zhen Kongee7bdc62019-03-14 10:55:19 -070036 if (fscrypt_using_hardware_encryption(page->mapping->host)) {
Richard Weinberger58ae7462016-12-19 12:25:32 +010037 SetPageUptodate(page);
Zhen Kongee7bdc62019-03-14 10:55:19 -070038 } else {
39 int ret = fscrypt_decrypt_page(page->mapping->host,
40 page, PAGE_SIZE, 0, page->index);
41 if (ret) {
42 WARN_ON_ONCE(1);
43 SetPageError(page);
44 } else if (done) {
45 SetPageUptodate(page);
46 }
Richard Weinberger58ae7462016-12-19 12:25:32 +010047 }
Eric Biggers0cb8dae2018-04-18 11:09:47 -070048 if (done)
49 unlock_page(page);
Richard Weinberger58ae7462016-12-19 12:25:32 +010050 }
Eric Biggers0cb8dae2018-04-18 11:09:47 -070051}
52
53void fscrypt_decrypt_bio(struct bio *bio)
54{
55 __fscrypt_decrypt_bio(bio, false);
56}
57EXPORT_SYMBOL(fscrypt_decrypt_bio);
58
59static void completion_pages(struct work_struct *work)
60{
61 struct fscrypt_ctx *ctx =
62 container_of(work, struct fscrypt_ctx, r.work);
63 struct bio *bio = ctx->r.bio;
64
65 __fscrypt_decrypt_bio(bio, true);
Richard Weinberger58ae7462016-12-19 12:25:32 +010066 fscrypt_release_ctx(ctx);
67 bio_put(bio);
68}
69
Eric Biggers0cb8dae2018-04-18 11:09:47 -070070void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
Richard Weinberger58ae7462016-12-19 12:25:32 +010071{
72 INIT_WORK(&ctx->r.work, completion_pages);
73 ctx->r.bio = bio;
Eric Biggers0cb8dae2018-04-18 11:09:47 -070074 fscrypt_enqueue_decrypt_work(&ctx->r.work);
Richard Weinberger58ae7462016-12-19 12:25:32 +010075}
Eric Biggers0cb8dae2018-04-18 11:09:47 -070076EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
Richard Weinberger58ae7462016-12-19 12:25:32 +010077
78void fscrypt_pullback_bio_page(struct page **page, bool restore)
79{
80 struct fscrypt_ctx *ctx;
81 struct page *bounce_page;
82
83 /* The bounce data pages are unmapped. */
84 if ((*page)->mapping)
85 return;
86
87 /* The bounce data page is unmapped. */
88 bounce_page = *page;
89 ctx = (struct fscrypt_ctx *)page_private(bounce_page);
90
91 /* restore control page */
92 *page = ctx->w.control_page;
93
94 if (restore)
95 fscrypt_restore_control_page(bounce_page);
96}
97EXPORT_SYMBOL(fscrypt_pullback_bio_page);
98
99int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
100 sector_t pblk, unsigned int len)
101{
102 struct fscrypt_ctx *ctx;
103 struct page *ciphertext_page = NULL;
104 struct bio *bio;
105 int ret, err = 0;
106
107 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
108
109 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
110 if (IS_ERR(ctx))
111 return PTR_ERR(ctx);
112
113 ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT);
114 if (IS_ERR(ciphertext_page)) {
115 err = PTR_ERR(ciphertext_page);
116 goto errout;
117 }
118
119 while (len--) {
120 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk,
121 ZERO_PAGE(0), ciphertext_page,
122 PAGE_SIZE, 0, GFP_NOFS);
123 if (err)
124 goto errout;
125
126 bio = bio_alloc(GFP_NOWAIT, 1);
127 if (!bio) {
128 err = -ENOMEM;
129 goto errout;
130 }
Christoph Hellwig74d46992017-08-23 19:10:32 +0200131 bio_set_dev(bio, inode->i_sb->s_bdev);
Richard Weinberger58ae7462016-12-19 12:25:32 +0100132 bio->bi_iter.bi_sector =
133 pblk << (inode->i_sb->s_blocksize_bits - 9);
134 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
135 ret = bio_add_page(bio, ciphertext_page,
136 inode->i_sb->s_blocksize, 0);
137 if (ret != inode->i_sb->s_blocksize) {
138 /* should never happen! */
139 WARN_ON(1);
140 bio_put(bio);
141 err = -EIO;
142 goto errout;
143 }
144 err = submit_bio_wait(bio);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200145 if (err == 0 && bio->bi_status)
Richard Weinberger58ae7462016-12-19 12:25:32 +0100146 err = -EIO;
147 bio_put(bio);
148 if (err)
149 goto errout;
150 lblk++;
151 pblk++;
152 }
153 err = 0;
154errout:
155 fscrypt_release_ctx(ctx);
156 return err;
157}
158EXPORT_SYMBOL(fscrypt_zeroout_range);