blob: 46008d5ac504cdc2c4392433dd566a8d0ed4a330 [file] [log] [blame]
Sage Weil8b6e4f22010-02-02 16:07:07 -08001
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -07002#include <linux/ceph/ceph_debug.h>
Sage Weil8b6e4f22010-02-02 16:07:07 -08003
4#include <linux/err.h>
5#include <linux/scatterlist.h>
Ilya Dryomov7fea24c2017-01-16 14:35:17 +01006#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Herbert Xue59dd982016-01-24 21:18:40 +08008#include <crypto/aes.h>
9#include <crypto/skcipher.h>
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -070010#include <linux/key-type.h>
Ingo Molnar5b3cc152017-02-02 20:43:54 +010011#include <linux/sched/mm.h>
Sage Weil8b6e4f22010-02-02 16:07:07 -080012
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -070013#include <keys/ceph-type.h>
David Howells7c3bec02014-07-18 18:56:35 +010014#include <keys/user-type.h>
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070015#include <linux/ceph/decode.h>
Sage Weil8b6e4f22010-02-02 16:07:07 -080016#include "crypto.h"
Sage Weil8b6e4f22010-02-02 16:07:07 -080017
Ilya Dryomov7af3ea12016-12-02 16:35:08 +010018/*
19 * Set ->key and ->tfm. The rest of the key should be filled in before
20 * this function is called.
21 */
22static int set_secret(struct ceph_crypto_key *key, void *buf)
23{
24 unsigned int noio_flag;
25 int ret;
26
27 key->key = NULL;
28 key->tfm = NULL;
29
30 switch (key->type) {
31 case CEPH_CRYPTO_NONE:
32 return 0; /* nothing to do */
33 case CEPH_CRYPTO_AES:
34 break;
35 default:
36 return -ENOTSUPP;
37 }
38
39 WARN_ON(!key->len);
40 key->key = kmemdup(buf, key->len, GFP_NOIO);
41 if (!key->key) {
42 ret = -ENOMEM;
43 goto fail;
44 }
45
46 /* crypto_alloc_skcipher() allocates with GFP_KERNEL */
47 noio_flag = memalloc_noio_save();
48 key->tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
49 memalloc_noio_restore(noio_flag);
50 if (IS_ERR(key->tfm)) {
51 ret = PTR_ERR(key->tfm);
52 key->tfm = NULL;
53 goto fail;
54 }
55
56 ret = crypto_skcipher_setkey(key->tfm, key->key, key->len);
57 if (ret)
58 goto fail;
59
60 return 0;
61
62fail:
63 ceph_crypto_key_destroy(key);
64 return ret;
65}
66
Tommi Virtanen8323c3a2011-03-25 16:32:57 -070067int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
68 const struct ceph_crypto_key *src)
69{
70 memcpy(dst, src, sizeof(struct ceph_crypto_key));
Ilya Dryomov7af3ea12016-12-02 16:35:08 +010071 return set_secret(dst, src->key);
Tommi Virtanen8323c3a2011-03-25 16:32:57 -070072}
73
Sage Weil8b6e4f22010-02-02 16:07:07 -080074int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
75{
76 if (*p + sizeof(u16) + sizeof(key->created) +
77 sizeof(u16) + key->len > end)
78 return -ERANGE;
79 ceph_encode_16(p, key->type);
80 ceph_encode_copy(p, &key->created, sizeof(key->created));
81 ceph_encode_16(p, key->len);
82 ceph_encode_copy(p, key->key, key->len);
83 return 0;
84}
85
86int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
87{
Ilya Dryomov7af3ea12016-12-02 16:35:08 +010088 int ret;
89
Sage Weil8b6e4f22010-02-02 16:07:07 -080090 ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
91 key->type = ceph_decode_16(p);
92 ceph_decode_copy(p, &key->created, sizeof(key->created));
93 key->len = ceph_decode_16(p);
94 ceph_decode_need(p, end, key->len, bad);
Ilya Dryomov7af3ea12016-12-02 16:35:08 +010095 ret = set_secret(key, *p);
96 *p += key->len;
97 return ret;
Sage Weil8b6e4f22010-02-02 16:07:07 -080098
99bad:
100 dout("failed to decode crypto key\n");
101 return -EINVAL;
102}
103
104int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
105{
106 int inlen = strlen(inkey);
107 int blen = inlen * 3 / 4;
108 void *buf, *p;
109 int ret;
110
111 dout("crypto_key_unarmor %s\n", inkey);
112 buf = kmalloc(blen, GFP_NOFS);
113 if (!buf)
114 return -ENOMEM;
115 blen = ceph_unarmor(buf, inkey, inkey+inlen);
116 if (blen < 0) {
117 kfree(buf);
118 return blen;
119 }
120
121 p = buf;
122 ret = ceph_crypto_key_decode(key, &p, p + blen);
123 kfree(buf);
124 if (ret)
125 return ret;
126 dout("crypto_key_unarmor key %p type %d len %d\n", key,
127 key->type, key->len);
128 return 0;
129}
130
Ilya Dryomov6db23042016-12-02 16:35:08 +0100131void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
132{
133 if (key) {
134 kfree(key->key);
135 key->key = NULL;
Ilya Dryomov7af3ea12016-12-02 16:35:08 +0100136 crypto_free_skcipher(key->tfm);
137 key->tfm = NULL;
Ilya Dryomov6db23042016-12-02 16:35:08 +0100138 }
139}
140
Sage Weilcbbfe492010-08-02 15:48:23 -0700141static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
Sage Weil8b6e4f22010-02-02 16:07:07 -0800142
Ilya Dryomovaaef3172014-10-23 00:25:22 +0400143/*
144 * Should be used for buffers allocated with ceph_kvmalloc().
145 * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
146 * in-buffer (msg front).
147 *
148 * Dispose of @sgt with teardown_sgtable().
149 *
150 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
151 * in cases where a single sg is sufficient. No attempt to reduce the
152 * number of sgs by squeezing physically contiguous pages together is
153 * made though, for simplicity.
154 */
155static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
156 const void *buf, unsigned int buf_len)
157{
158 struct scatterlist *sg;
159 const bool is_vmalloc = is_vmalloc_addr(buf);
160 unsigned int off = offset_in_page(buf);
161 unsigned int chunk_cnt = 1;
162 unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
163 int i;
164 int ret;
165
166 if (buf_len == 0) {
167 memset(sgt, 0, sizeof(*sgt));
168 return -EINVAL;
169 }
170
171 if (is_vmalloc) {
172 chunk_cnt = chunk_len >> PAGE_SHIFT;
173 chunk_len = PAGE_SIZE;
174 }
175
176 if (chunk_cnt > 1) {
177 ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
178 if (ret)
179 return ret;
180 } else {
181 WARN_ON(chunk_cnt != 1);
182 sg_init_table(prealloc_sg, 1);
183 sgt->sgl = prealloc_sg;
184 sgt->nents = sgt->orig_nents = 1;
185 }
186
187 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
188 struct page *page;
189 unsigned int len = min(chunk_len - off, buf_len);
190
191 if (is_vmalloc)
192 page = vmalloc_to_page(buf);
193 else
194 page = virt_to_page(buf);
195
196 sg_set_page(sg, page, len, off);
197
198 off = 0;
199 buf += len;
200 buf_len -= len;
201 }
202 WARN_ON(buf_len != 0);
203
204 return 0;
205}
206
207static void teardown_sgtable(struct sg_table *sgt)
208{
209 if (sgt->orig_nents > 1)
210 sg_free_table(sgt);
211}
212
Ilya Dryomova45f7952016-12-02 16:35:07 +0100213static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
214 void *buf, int buf_len, int in_len, int *pout_len)
215{
Ilya Dryomov7af3ea12016-12-02 16:35:08 +0100216 SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
Ilya Dryomova45f7952016-12-02 16:35:07 +0100217 struct sg_table sgt;
218 struct scatterlist prealloc_sg;
Ilya Dryomov124f9302017-01-16 19:16:46 +0100219 char iv[AES_BLOCK_SIZE] __aligned(8);
Ilya Dryomova45f7952016-12-02 16:35:07 +0100220 int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
221 int crypt_len = encrypt ? in_len + pad_byte : in_len;
222 int ret;
223
Ilya Dryomova45f7952016-12-02 16:35:07 +0100224 WARN_ON(crypt_len > buf_len);
225 if (encrypt)
226 memset(buf + in_len, pad_byte, pad_byte);
227 ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
228 if (ret)
Ilya Dryomov7af3ea12016-12-02 16:35:08 +0100229 return ret;
Ilya Dryomova45f7952016-12-02 16:35:07 +0100230
Ilya Dryomova45f7952016-12-02 16:35:07 +0100231 memcpy(iv, aes_iv, AES_BLOCK_SIZE);
Ilya Dryomov7af3ea12016-12-02 16:35:08 +0100232 skcipher_request_set_tfm(req, key->tfm);
Ilya Dryomova45f7952016-12-02 16:35:07 +0100233 skcipher_request_set_callback(req, 0, NULL, NULL);
234 skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
235
236 /*
237 print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
238 key->key, key->len, 1);
239 print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
240 buf, crypt_len, 1);
241 */
242 if (encrypt)
243 ret = crypto_skcipher_encrypt(req);
244 else
245 ret = crypto_skcipher_decrypt(req);
246 skcipher_request_zero(req);
247 if (ret) {
248 pr_err("%s %scrypt failed: %d\n", __func__,
249 encrypt ? "en" : "de", ret);
250 goto out_sgt;
251 }
252 /*
253 print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
254 buf, crypt_len, 1);
255 */
256
257 if (encrypt) {
258 *pout_len = crypt_len;
259 } else {
260 pad_byte = *(char *)(buf + in_len - 1);
261 if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
262 in_len >= pad_byte) {
263 *pout_len = in_len - pad_byte;
264 } else {
265 pr_err("%s got bad padding %d on in_len %d\n",
266 __func__, pad_byte, in_len);
267 ret = -EPERM;
268 goto out_sgt;
269 }
270 }
271
272out_sgt:
273 teardown_sgtable(&sgt);
Ilya Dryomova45f7952016-12-02 16:35:07 +0100274 return ret;
275}
276
277int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
278 void *buf, int buf_len, int in_len, int *pout_len)
279{
280 switch (key->type) {
281 case CEPH_CRYPTO_NONE:
282 *pout_len = in_len;
283 return 0;
284 case CEPH_CRYPTO_AES:
285 return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
286 pout_len);
287 default:
288 return -ENOTSUPP;
289 }
290}
291
David Howellsefa64c02014-07-18 18:56:35 +0100292static int ceph_key_preparse(struct key_preparsed_payload *prep)
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700293{
294 struct ceph_crypto_key *ckey;
David Howellscf7f6012012-09-13 13:06:29 +0100295 size_t datalen = prep->datalen;
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700296 int ret;
297 void *p;
298
299 ret = -EINVAL;
David Howellscf7f6012012-09-13 13:06:29 +0100300 if (datalen <= 0 || datalen > 32767 || !prep->data)
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700301 goto err;
302
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700303 ret = -ENOMEM;
304 ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
305 if (!ckey)
306 goto err;
307
308 /* TODO ceph_crypto_key_decode should really take const input */
David Howellscf7f6012012-09-13 13:06:29 +0100309 p = (void *)prep->data;
310 ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700311 if (ret < 0)
312 goto err_ckey;
313
David Howells146aa8b2015-10-21 14:04:48 +0100314 prep->payload.data[0] = ckey;
David Howellsefa64c02014-07-18 18:56:35 +0100315 prep->quotalen = datalen;
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700316 return 0;
317
318err_ckey:
319 kfree(ckey);
320err:
321 return ret;
322}
323
David Howellsefa64c02014-07-18 18:56:35 +0100324static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
325{
David Howells146aa8b2015-10-21 14:04:48 +0100326 struct ceph_crypto_key *ckey = prep->payload.data[0];
David Howellsefa64c02014-07-18 18:56:35 +0100327 ceph_crypto_key_destroy(ckey);
328 kfree(ckey);
329}
330
David Howellsefa64c02014-07-18 18:56:35 +0100331static void ceph_key_destroy(struct key *key)
332{
David Howells146aa8b2015-10-21 14:04:48 +0100333 struct ceph_crypto_key *ckey = key->payload.data[0];
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700334
335 ceph_crypto_key_destroy(ckey);
Sylvain Munautf0666b12012-08-02 09:12:59 -0700336 kfree(ckey);
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700337}
338
339struct key_type key_type_ceph = {
340 .name = "ceph",
David Howellsefa64c02014-07-18 18:56:35 +0100341 .preparse = ceph_key_preparse,
342 .free_preparse = ceph_key_free_preparse,
343 .instantiate = generic_key_instantiate,
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700344 .destroy = ceph_key_destroy,
345};
346
347int ceph_crypto_init(void) {
348 return register_key_type(&key_type_ceph);
349}
350
351void ceph_crypto_shutdown(void) {
352 unregister_key_type(&key_type_ceph);
353}