blob: 85747b7f91a91894d4902636d5145dc957184df3 [file] [log] [blame]
Sage Weil8b6e4f22010-02-02 16:07:07 -08001
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -07002#include <linux/ceph/ceph_debug.h>
Sage Weil8b6e4f22010-02-02 16:07:07 -08003
4#include <linux/err.h>
5#include <linux/scatterlist.h>
Ilya Dryomov7fea24c2017-01-16 14:35:17 +01006#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Herbert Xue59dd982016-01-24 21:18:40 +08008#include <crypto/aes.h>
9#include <crypto/skcipher.h>
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -070010#include <linux/key-type.h>
Sage Weil8b6e4f22010-02-02 16:07:07 -080011
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -070012#include <keys/ceph-type.h>
David Howells7c3bec02014-07-18 18:56:35 +010013#include <keys/user-type.h>
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070014#include <linux/ceph/decode.h>
Sage Weil8b6e4f22010-02-02 16:07:07 -080015#include "crypto.h"
Sage Weil8b6e4f22010-02-02 16:07:07 -080016
Ilya Dryomov7af3ea12016-12-02 16:35:08 +010017/*
18 * Set ->key and ->tfm. The rest of the key should be filled in before
19 * this function is called.
20 */
21static int set_secret(struct ceph_crypto_key *key, void *buf)
22{
23 unsigned int noio_flag;
24 int ret;
25
26 key->key = NULL;
27 key->tfm = NULL;
28
29 switch (key->type) {
30 case CEPH_CRYPTO_NONE:
31 return 0; /* nothing to do */
32 case CEPH_CRYPTO_AES:
33 break;
34 default:
35 return -ENOTSUPP;
36 }
37
38 WARN_ON(!key->len);
39 key->key = kmemdup(buf, key->len, GFP_NOIO);
40 if (!key->key) {
41 ret = -ENOMEM;
42 goto fail;
43 }
44
45 /* crypto_alloc_skcipher() allocates with GFP_KERNEL */
46 noio_flag = memalloc_noio_save();
47 key->tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
48 memalloc_noio_restore(noio_flag);
49 if (IS_ERR(key->tfm)) {
50 ret = PTR_ERR(key->tfm);
51 key->tfm = NULL;
52 goto fail;
53 }
54
55 ret = crypto_skcipher_setkey(key->tfm, key->key, key->len);
56 if (ret)
57 goto fail;
58
59 return 0;
60
61fail:
62 ceph_crypto_key_destroy(key);
63 return ret;
64}
65
Tommi Virtanen8323c3a2011-03-25 16:32:57 -070066int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
67 const struct ceph_crypto_key *src)
68{
69 memcpy(dst, src, sizeof(struct ceph_crypto_key));
Ilya Dryomov7af3ea12016-12-02 16:35:08 +010070 return set_secret(dst, src->key);
Tommi Virtanen8323c3a2011-03-25 16:32:57 -070071}
72
Sage Weil8b6e4f22010-02-02 16:07:07 -080073int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
74{
75 if (*p + sizeof(u16) + sizeof(key->created) +
76 sizeof(u16) + key->len > end)
77 return -ERANGE;
78 ceph_encode_16(p, key->type);
79 ceph_encode_copy(p, &key->created, sizeof(key->created));
80 ceph_encode_16(p, key->len);
81 ceph_encode_copy(p, key->key, key->len);
82 return 0;
83}
84
85int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
86{
Ilya Dryomov7af3ea12016-12-02 16:35:08 +010087 int ret;
88
Sage Weil8b6e4f22010-02-02 16:07:07 -080089 ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
90 key->type = ceph_decode_16(p);
91 ceph_decode_copy(p, &key->created, sizeof(key->created));
92 key->len = ceph_decode_16(p);
93 ceph_decode_need(p, end, key->len, bad);
Ilya Dryomov7af3ea12016-12-02 16:35:08 +010094 ret = set_secret(key, *p);
95 *p += key->len;
96 return ret;
Sage Weil8b6e4f22010-02-02 16:07:07 -080097
98bad:
99 dout("failed to decode crypto key\n");
100 return -EINVAL;
101}
102
103int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
104{
105 int inlen = strlen(inkey);
106 int blen = inlen * 3 / 4;
107 void *buf, *p;
108 int ret;
109
110 dout("crypto_key_unarmor %s\n", inkey);
111 buf = kmalloc(blen, GFP_NOFS);
112 if (!buf)
113 return -ENOMEM;
114 blen = ceph_unarmor(buf, inkey, inkey+inlen);
115 if (blen < 0) {
116 kfree(buf);
117 return blen;
118 }
119
120 p = buf;
121 ret = ceph_crypto_key_decode(key, &p, p + blen);
122 kfree(buf);
123 if (ret)
124 return ret;
125 dout("crypto_key_unarmor key %p type %d len %d\n", key,
126 key->type, key->len);
127 return 0;
128}
129
Ilya Dryomov6db23042016-12-02 16:35:08 +0100130void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
131{
132 if (key) {
133 kfree(key->key);
134 key->key = NULL;
Ilya Dryomov7af3ea12016-12-02 16:35:08 +0100135 crypto_free_skcipher(key->tfm);
136 key->tfm = NULL;
Ilya Dryomov6db23042016-12-02 16:35:08 +0100137 }
138}
139
Sage Weilcbbfe492010-08-02 15:48:23 -0700140static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
Sage Weil8b6e4f22010-02-02 16:07:07 -0800141
Ilya Dryomovaaef3172014-10-23 00:25:22 +0400142/*
143 * Should be used for buffers allocated with ceph_kvmalloc().
144 * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
145 * in-buffer (msg front).
146 *
147 * Dispose of @sgt with teardown_sgtable().
148 *
149 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
150 * in cases where a single sg is sufficient. No attempt to reduce the
151 * number of sgs by squeezing physically contiguous pages together is
152 * made though, for simplicity.
153 */
154static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
155 const void *buf, unsigned int buf_len)
156{
157 struct scatterlist *sg;
158 const bool is_vmalloc = is_vmalloc_addr(buf);
159 unsigned int off = offset_in_page(buf);
160 unsigned int chunk_cnt = 1;
161 unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
162 int i;
163 int ret;
164
165 if (buf_len == 0) {
166 memset(sgt, 0, sizeof(*sgt));
167 return -EINVAL;
168 }
169
170 if (is_vmalloc) {
171 chunk_cnt = chunk_len >> PAGE_SHIFT;
172 chunk_len = PAGE_SIZE;
173 }
174
175 if (chunk_cnt > 1) {
176 ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
177 if (ret)
178 return ret;
179 } else {
180 WARN_ON(chunk_cnt != 1);
181 sg_init_table(prealloc_sg, 1);
182 sgt->sgl = prealloc_sg;
183 sgt->nents = sgt->orig_nents = 1;
184 }
185
186 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
187 struct page *page;
188 unsigned int len = min(chunk_len - off, buf_len);
189
190 if (is_vmalloc)
191 page = vmalloc_to_page(buf);
192 else
193 page = virt_to_page(buf);
194
195 sg_set_page(sg, page, len, off);
196
197 off = 0;
198 buf += len;
199 buf_len -= len;
200 }
201 WARN_ON(buf_len != 0);
202
203 return 0;
204}
205
206static void teardown_sgtable(struct sg_table *sgt)
207{
208 if (sgt->orig_nents > 1)
209 sg_free_table(sgt);
210}
211
Ilya Dryomova45f7952016-12-02 16:35:07 +0100212static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
213 void *buf, int buf_len, int in_len, int *pout_len)
214{
Ilya Dryomov7af3ea12016-12-02 16:35:08 +0100215 SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
Ilya Dryomova45f7952016-12-02 16:35:07 +0100216 struct sg_table sgt;
217 struct scatterlist prealloc_sg;
Ilya Dryomov124f9302017-01-16 19:16:46 +0100218 char iv[AES_BLOCK_SIZE] __aligned(8);
Ilya Dryomova45f7952016-12-02 16:35:07 +0100219 int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
220 int crypt_len = encrypt ? in_len + pad_byte : in_len;
221 int ret;
222
Ilya Dryomova45f7952016-12-02 16:35:07 +0100223 WARN_ON(crypt_len > buf_len);
224 if (encrypt)
225 memset(buf + in_len, pad_byte, pad_byte);
226 ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
227 if (ret)
Ilya Dryomov7af3ea12016-12-02 16:35:08 +0100228 return ret;
Ilya Dryomova45f7952016-12-02 16:35:07 +0100229
Ilya Dryomova45f7952016-12-02 16:35:07 +0100230 memcpy(iv, aes_iv, AES_BLOCK_SIZE);
Ilya Dryomov7af3ea12016-12-02 16:35:08 +0100231 skcipher_request_set_tfm(req, key->tfm);
Ilya Dryomova45f7952016-12-02 16:35:07 +0100232 skcipher_request_set_callback(req, 0, NULL, NULL);
233 skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
234
235 /*
236 print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
237 key->key, key->len, 1);
238 print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
239 buf, crypt_len, 1);
240 */
241 if (encrypt)
242 ret = crypto_skcipher_encrypt(req);
243 else
244 ret = crypto_skcipher_decrypt(req);
245 skcipher_request_zero(req);
246 if (ret) {
247 pr_err("%s %scrypt failed: %d\n", __func__,
248 encrypt ? "en" : "de", ret);
249 goto out_sgt;
250 }
251 /*
252 print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
253 buf, crypt_len, 1);
254 */
255
256 if (encrypt) {
257 *pout_len = crypt_len;
258 } else {
259 pad_byte = *(char *)(buf + in_len - 1);
260 if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
261 in_len >= pad_byte) {
262 *pout_len = in_len - pad_byte;
263 } else {
264 pr_err("%s got bad padding %d on in_len %d\n",
265 __func__, pad_byte, in_len);
266 ret = -EPERM;
267 goto out_sgt;
268 }
269 }
270
271out_sgt:
272 teardown_sgtable(&sgt);
Ilya Dryomova45f7952016-12-02 16:35:07 +0100273 return ret;
274}
275
276int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
277 void *buf, int buf_len, int in_len, int *pout_len)
278{
279 switch (key->type) {
280 case CEPH_CRYPTO_NONE:
281 *pout_len = in_len;
282 return 0;
283 case CEPH_CRYPTO_AES:
284 return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
285 pout_len);
286 default:
287 return -ENOTSUPP;
288 }
289}
290
David Howellsefa64c02014-07-18 18:56:35 +0100291static int ceph_key_preparse(struct key_preparsed_payload *prep)
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700292{
293 struct ceph_crypto_key *ckey;
David Howellscf7f6012012-09-13 13:06:29 +0100294 size_t datalen = prep->datalen;
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700295 int ret;
296 void *p;
297
298 ret = -EINVAL;
David Howellscf7f6012012-09-13 13:06:29 +0100299 if (datalen <= 0 || datalen > 32767 || !prep->data)
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700300 goto err;
301
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700302 ret = -ENOMEM;
303 ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
304 if (!ckey)
305 goto err;
306
307 /* TODO ceph_crypto_key_decode should really take const input */
David Howellscf7f6012012-09-13 13:06:29 +0100308 p = (void *)prep->data;
309 ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700310 if (ret < 0)
311 goto err_ckey;
312
David Howells146aa8b2015-10-21 14:04:48 +0100313 prep->payload.data[0] = ckey;
David Howellsefa64c02014-07-18 18:56:35 +0100314 prep->quotalen = datalen;
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700315 return 0;
316
317err_ckey:
318 kfree(ckey);
319err:
320 return ret;
321}
322
David Howellsefa64c02014-07-18 18:56:35 +0100323static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
324{
David Howells146aa8b2015-10-21 14:04:48 +0100325 struct ceph_crypto_key *ckey = prep->payload.data[0];
David Howellsefa64c02014-07-18 18:56:35 +0100326 ceph_crypto_key_destroy(ckey);
327 kfree(ckey);
328}
329
David Howellsefa64c02014-07-18 18:56:35 +0100330static void ceph_key_destroy(struct key *key)
331{
David Howells146aa8b2015-10-21 14:04:48 +0100332 struct ceph_crypto_key *ckey = key->payload.data[0];
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700333
334 ceph_crypto_key_destroy(ckey);
Sylvain Munautf0666b12012-08-02 09:12:59 -0700335 kfree(ckey);
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700336}
337
338struct key_type key_type_ceph = {
339 .name = "ceph",
David Howellsefa64c02014-07-18 18:56:35 +0100340 .preparse = ceph_key_preparse,
341 .free_preparse = ceph_key_free_preparse,
342 .instantiate = generic_key_instantiate,
Tommi Virtanen4b2a58a2011-03-28 14:59:38 -0700343 .destroy = ceph_key_destroy,
344};
345
346int ceph_crypto_init(void) {
347 return register_key_type(&key_type_ceph);
348}
349
350void ceph_crypto_shutdown(void) {
351 unregister_key_type(&key_type_ceph);
352}