blob: 4b614c604fe09afd8a7ef03c635662d2117d05db [file] [log] [blame]
Kevin Coffman81d4a432010-03-17 13:02:51 -04001/*
2 * COPYRIGHT (c) 2008
3 * The Regents of the University of Michigan
4 * ALL RIGHTS RESERVED
5 *
6 * Permission is granted to use, copy, create derivative works
7 * and redistribute this software and such derivative works
8 * for any purpose, so long as the name of The University of
9 * Michigan is not used in any advertising or publicity
10 * pertaining to the use of distribution of this software
11 * without specific, written prior authorization. If the
12 * above copyright notice or any other identification of the
13 * University of Michigan is included in any copy of any
14 * portion of this software, then the disclaimer below must
15 * also be included.
16 *
17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGES.
29 */
30
J. Bruce Fields14ae1622005-10-13 16:55:13 -040031#include <linux/types.h>
J. Bruce Fields14ae1622005-10-13 16:55:13 -040032#include <linux/jiffies.h>
33#include <linux/sunrpc/gss_krb5.h>
34#include <linux/random.h>
35#include <linux/pagemap.h>
J. Bruce Fields14ae1622005-10-13 16:55:13 -040036#include <linux/crypto.h>
37
38#ifdef RPC_DEBUG
39# define RPCDBG_FACILITY RPCDBG_AUTH
40#endif
41
42static inline int
43gss_krb5_padding(int blocksize, int length)
44{
J. Bruce Fields54ec3d42010-03-17 13:02:48 -040045 return blocksize - (length % blocksize);
J. Bruce Fields14ae1622005-10-13 16:55:13 -040046}
47
48static inline void
49gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
50{
51 int padding = gss_krb5_padding(blocksize, buf->len - offset);
52 char *p;
53 struct kvec *iov;
54
55 if (buf->page_len || buf->tail[0].iov_len)
56 iov = &buf->tail[0];
57 else
58 iov = &buf->head[0];
59 p = iov->iov_base + iov->iov_len;
60 iov->iov_len += padding;
61 buf->len += padding;
62 memset(p, padding, padding);
63}
64
65static inline int
66gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
67{
68 u8 *ptr;
69 u8 pad;
Chuck Lever67f97d82007-09-26 14:38:10 -040070 size_t len = buf->len;
J. Bruce Fields14ae1622005-10-13 16:55:13 -040071
72 if (len <= buf->head[0].iov_len) {
73 pad = *(u8 *)(buf->head[0].iov_base + len - 1);
74 if (pad > buf->head[0].iov_len)
75 return -EINVAL;
76 buf->head[0].iov_len -= pad;
77 goto out;
78 } else
79 len -= buf->head[0].iov_len;
80 if (len <= buf->page_len) {
Chuck Lever67f97d82007-09-26 14:38:10 -040081 unsigned int last = (buf->page_base + len - 1)
J. Bruce Fields14ae1622005-10-13 16:55:13 -040082 >>PAGE_CACHE_SHIFT;
Chuck Lever67f97d82007-09-26 14:38:10 -040083 unsigned int offset = (buf->page_base + len - 1)
J. Bruce Fields14ae1622005-10-13 16:55:13 -040084 & (PAGE_CACHE_SIZE - 1);
Cong Wangb8541782011-11-25 23:14:40 +080085 ptr = kmap_atomic(buf->pages[last]);
J. Bruce Fields14ae1622005-10-13 16:55:13 -040086 pad = *(ptr + offset);
Cong Wangb8541782011-11-25 23:14:40 +080087 kunmap_atomic(ptr);
J. Bruce Fields14ae1622005-10-13 16:55:13 -040088 goto out;
89 } else
90 len -= buf->page_len;
91 BUG_ON(len > buf->tail[0].iov_len);
92 pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
93out:
94 /* XXX: NOTE: we do not adjust the page lengths--they represent
95 * a range of data in the real filesystem page cache, and we need
96 * to know that range so the xdr code can properly place read data.
97 * However adjusting the head length, as we do above, is harmless.
98 * In the case of a request that fits into a single page, the server
99 * also uses length and head length together to determine the original
100 * start of the request to copy the request for deferal; so it's
101 * easier on the server if we adjust head and tail length in tandem.
102 * It's not really a problem that we don't fool with the page and
103 * tail lengths, though--at worst badly formed xdr might lead the
104 * server to attempt to parse the padding.
105 * XXX: Document all these weird requirements for gss mechanism
106 * wrap/unwrap functions. */
107 if (pad > blocksize)
108 return -EINVAL;
109 if (buf->len > pad)
110 buf->len -= pad;
111 else
112 return -EINVAL;
113 return 0;
114}
115
Kevin Coffman934a95a2010-03-17 13:03:00 -0400116void
117gss_krb5_make_confounder(char *p, u32 conflen)
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400118{
119 static u64 i = 0;
120 u64 *q = (u64 *)p;
121
122 /* rfc1964 claims this should be "random". But all that's really
123 * necessary is that it be unique. And not even that is necessary in
124 * our case since our "gssapi" implementation exists only to support
125 * rpcsec_gss, so we know that the only buffers we will ever encrypt
126 * already begin with a unique sequence number. Just to hedge my bets
127 * I'll make a half-hearted attempt at something unique, but ensuring
128 * uniqueness would mean worrying about atomicity and rollover, and I
129 * don't care enough. */
130
Kevin Coffman863a2482008-04-30 12:46:08 -0400131 /* initialize to random value */
132 if (i == 0) {
Akinobu Mitac86d2dd2013-04-29 16:21:37 -0700133 i = prandom_u32();
134 i = (i << 32) | prandom_u32();
Kevin Coffman863a2482008-04-30 12:46:08 -0400135 }
136
137 switch (conflen) {
138 case 16:
139 *q++ = i++;
140 /* fall through */
141 case 8:
142 *q++ = i++;
143 break;
144 default:
145 BUG();
146 }
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400147}
148
149/* Assumptions: the head and tail of inbuf are ours to play with.
150 * The pages, however, may be real pages in the page cache and we replace
151 * them with scratch pages from **pages before writing to them. */
152/* XXX: obviously the above should be documentation of wrap interface,
153 * and shouldn't be in this kerberos-specific file. */
154
155/* XXX factor out common code with seal/unseal. */
156
Kevin Coffman1ac37192010-03-17 13:02:49 -0400157static u32
158gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400159 struct xdr_buf *buf, struct page **pages)
160{
Kevin Coffman81d4a432010-03-17 13:02:51 -0400161 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
162 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
163 .data = cksumdata};
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400164 int blocksize = 0, plainlen;
Kevin Coffmand00953a2008-04-30 12:45:53 -0400165 unsigned char *ptr, *msg_start;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400166 s32 now;
167 int headlen;
168 struct page **tmp_pages;
J. Bruce Fieldseaa82ed2006-03-20 23:24:04 -0500169 u32 seq_send;
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400170 u8 *cksumkey;
Kevin Coffman5af46542010-03-17 13:03:05 -0400171 u32 conflen = kctx->gk5e->conflen;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400172
Kevin Coffman81d4a432010-03-17 13:02:51 -0400173 dprintk("RPC: %s\n", __func__);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400174
175 now = get_seconds();
176
Herbert Xu378c6692006-08-22 20:33:54 +1000177 blocksize = crypto_blkcipher_blocksize(kctx->enc);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400178 gss_krb5_add_padding(buf, offset, blocksize);
179 BUG_ON((buf->len - offset) % blocksize);
Kevin Coffman5af46542010-03-17 13:03:05 -0400180 plainlen = conflen + buf->len - offset;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400181
Kevin Coffman81d4a432010-03-17 13:02:51 -0400182 headlen = g_token_size(&kctx->mech_used,
183 GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
184 (buf->len - offset);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400185
186 ptr = buf->head[0].iov_base + offset;
187 /* shift data to make room for header. */
Kevin Coffman725f2862010-03-17 13:02:46 -0400188 xdr_extend_head(buf, offset, headlen);
189
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400190 /* XXX Would be cleverer to encrypt while copying. */
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400191 BUG_ON((buf->len - offset - headlen) % blocksize);
192
Kevin Coffmand00953a2008-04-30 12:45:53 -0400193 g_make_token_header(&kctx->mech_used,
Kevin Coffman81d4a432010-03-17 13:02:51 -0400194 GSS_KRB5_TOK_HDR_LEN +
195 kctx->gk5e->cksumlength + plainlen, &ptr);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400196
197
Kevin Coffmand00953a2008-04-30 12:45:53 -0400198 /* ptr now at header described in rfc 1964, section 1.2.1: */
199 ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
200 ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400201
Kevin Coffman81d4a432010-03-17 13:02:51 -0400202 msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400203
Jeff Laytonb36e9c42014-07-16 06:52:21 -0400204 /*
205 * signalg and sealalg are stored as if they were converted from LE
206 * to host endian, even though they're opaque pairs of bytes according
207 * to the RFC.
208 */
209 *(__le16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
210 *(__le16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
211 ptr[6] = 0xff;
212 ptr[7] = 0xff;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400213
Kevin Coffman5af46542010-03-17 13:03:05 -0400214 gss_krb5_make_confounder(msg_start, conflen);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400215
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400216 if (kctx->gk5e->keyed_cksum)
217 cksumkey = kctx->cksum;
218 else
219 cksumkey = NULL;
220
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400221 /* XXXJBF: UGH!: */
222 tmp_pages = buf->pages;
223 buf->pages = pages;
Kevin Coffman5af46542010-03-17 13:03:05 -0400224 if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
Kevin Coffman8b237072010-03-17 13:03:02 -0400225 cksumkey, KG_USAGE_SEAL, &md5cksum))
J. Bruce Fields39a21dd2006-12-04 20:22:39 -0500226 return GSS_S_FAILURE;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400227 buf->pages = tmp_pages;
228
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400229 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400230
J. Bruce Fieldseaa82ed2006-03-20 23:24:04 -0500231 spin_lock(&krb5_seq_lock);
232 seq_send = kctx->seq_send++;
233 spin_unlock(&krb5_seq_lock);
234
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400235 /* XXX would probably be more efficient to compute checksum
236 * and encrypt at the same time: */
Kevin Coffman1dbd9022010-03-17 13:03:04 -0400237 if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
Kevin Coffmand00953a2008-04-30 12:45:53 -0400238 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
J. Bruce Fields39a21dd2006-12-04 20:22:39 -0500239 return GSS_S_FAILURE;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400240
Kevin Coffmanfffdaef2010-03-17 13:03:06 -0400241 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
242 struct crypto_blkcipher *cipher;
243 int err;
244 cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
245 CRYPTO_ALG_ASYNC);
246 if (IS_ERR(cipher))
247 return GSS_S_FAILURE;
248
249 krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
250
251 err = gss_encrypt_xdr_buf(cipher, buf,
252 offset + headlen - conflen, pages);
253 crypto_free_blkcipher(cipher);
254 if (err)
255 return GSS_S_FAILURE;
256 } else {
257 if (gss_encrypt_xdr_buf(kctx->enc, buf,
258 offset + headlen - conflen, pages))
259 return GSS_S_FAILURE;
260 }
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400261
J. Bruce Fields94efa932006-12-04 20:22:42 -0500262 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400263}
264
Kevin Coffman1ac37192010-03-17 13:02:49 -0400265static u32
266gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400267{
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400268 int signalg;
269 int sealalg;
Kevin Coffman81d4a432010-03-17 13:02:51 -0400270 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
271 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
272 .data = cksumdata};
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400273 s32 now;
274 int direction;
275 s32 seqnum;
276 unsigned char *ptr;
277 int bodysize;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400278 void *data_start, *orig_start;
279 int data_len;
280 int blocksize;
Kevin Coffman5af46542010-03-17 13:03:05 -0400281 u32 conflen = kctx->gk5e->conflen;
Kevin Coffman81d4a432010-03-17 13:02:51 -0400282 int crypt_offset;
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400283 u8 *cksumkey;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400284
Chuck Lever8885cb32007-01-31 12:14:05 -0500285 dprintk("RPC: gss_unwrap_kerberos\n");
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400286
287 ptr = (u8 *)buf->head[0].iov_base + offset;
288 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
289 buf->len - offset))
J. Bruce Fields39a21dd2006-12-04 20:22:39 -0500290 return GSS_S_DEFECTIVE_TOKEN;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400291
Kevin Coffmand00953a2008-04-30 12:45:53 -0400292 if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
293 (ptr[1] != (KG_TOK_WRAP_MSG & 0xff)))
J. Bruce Fields39a21dd2006-12-04 20:22:39 -0500294 return GSS_S_DEFECTIVE_TOKEN;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400295
296 /* XXX sanity-check bodysize?? */
297
298 /* get the sign and seal algorithms */
299
Kevin Coffmand00953a2008-04-30 12:45:53 -0400300 signalg = ptr[2] + (ptr[3] << 8);
Kevin Coffman81d4a432010-03-17 13:02:51 -0400301 if (signalg != kctx->gk5e->signalg)
J. Bruce Fields39a21dd2006-12-04 20:22:39 -0500302 return GSS_S_DEFECTIVE_TOKEN;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400303
Kevin Coffmand00953a2008-04-30 12:45:53 -0400304 sealalg = ptr[4] + (ptr[5] << 8);
Kevin Coffman81d4a432010-03-17 13:02:51 -0400305 if (sealalg != kctx->gk5e->sealalg)
J. Bruce Fields39a21dd2006-12-04 20:22:39 -0500306 return GSS_S_DEFECTIVE_TOKEN;
J. Bruce Fields94efa932006-12-04 20:22:42 -0500307
Kevin Coffmand00953a2008-04-30 12:45:53 -0400308 if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
J. Bruce Fields39a21dd2006-12-04 20:22:39 -0500309 return GSS_S_DEFECTIVE_TOKEN;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400310
Kevin Coffman81d4a432010-03-17 13:02:51 -0400311 /*
312 * Data starts after token header and checksum. ptr points
313 * to the beginning of the token header
314 */
315 crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
316 (unsigned char *)buf->head[0].iov_base;
Kevin Coffmanfffdaef2010-03-17 13:03:06 -0400317
318 /*
319 * Need plaintext seqnum to derive encryption key for arcfour-hmac
320 */
321 if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
322 ptr + 8, &direction, &seqnum))
323 return GSS_S_BAD_SIG;
324
325 if ((kctx->initiate && direction != 0xff) ||
326 (!kctx->initiate && direction != 0))
327 return GSS_S_BAD_SIG;
328
329 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
330 struct crypto_blkcipher *cipher;
331 int err;
332
333 cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
334 CRYPTO_ALG_ASYNC);
335 if (IS_ERR(cipher))
336 return GSS_S_FAILURE;
337
338 krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
339
340 err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
341 crypto_free_blkcipher(cipher);
342 if (err)
343 return GSS_S_DEFECTIVE_TOKEN;
344 } else {
345 if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
346 return GSS_S_DEFECTIVE_TOKEN;
347 }
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400348
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400349 if (kctx->gk5e->keyed_cksum)
350 cksumkey = kctx->cksum;
351 else
352 cksumkey = NULL;
353
354 if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
Kevin Coffman8b237072010-03-17 13:03:02 -0400355 cksumkey, KG_USAGE_SEAL, &md5cksum))
J. Bruce Fields39a21dd2006-12-04 20:22:39 -0500356 return GSS_S_FAILURE;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400357
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400358 if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
359 kctx->gk5e->cksumlength))
J. Bruce Fields39a21dd2006-12-04 20:22:39 -0500360 return GSS_S_BAD_SIG;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400361
362 /* it got through unscathed. Make sure the context is unexpired */
363
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400364 now = get_seconds();
365
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400366 if (now > kctx->endtime)
J. Bruce Fields39a21dd2006-12-04 20:22:39 -0500367 return GSS_S_CONTEXT_EXPIRED;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400368
369 /* do sequencing checks */
370
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400371 /* Copy the data back to the right position. XXX: Would probably be
372 * better to copy and encrypt at the same time. */
373
Herbert Xu378c6692006-08-22 20:33:54 +1000374 blocksize = crypto_blkcipher_blocksize(kctx->enc);
Kevin Coffman81d4a432010-03-17 13:02:51 -0400375 data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
Kevin Coffman5af46542010-03-17 13:03:05 -0400376 conflen;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400377 orig_start = buf->head[0].iov_base + offset;
378 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
379 memmove(orig_start, data_start, data_len);
380 buf->head[0].iov_len -= (data_start - orig_start);
381 buf->len -= (data_start - orig_start);
382
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400383 if (gss_krb5_remove_padding(buf, blocksize))
J. Bruce Fields39a21dd2006-12-04 20:22:39 -0500384 return GSS_S_DEFECTIVE_TOKEN;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400385
J. Bruce Fields39a21dd2006-12-04 20:22:39 -0500386 return GSS_S_COMPLETE;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400387}
Kevin Coffman1ac37192010-03-17 13:02:49 -0400388
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400389/*
J. Bruce Fieldsc52226d2012-04-11 20:08:45 -0400390 * We can shift data by up to LOCAL_BUF_LEN bytes in a pass. If we need
391 * to do more than that, we shift repeatedly. Kevin Coffman reports
392 * seeing 28 bytes as the value used by Microsoft clients and servers
393 * with AES, so this constant is chosen to allow handling 28 in one pass
394 * without using too much stack space.
395 *
396 * If that proves to a problem perhaps we could use a more clever
397 * algorithm.
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400398 */
J. Bruce Fieldsc52226d2012-04-11 20:08:45 -0400399#define LOCAL_BUF_LEN 32u
400
401static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400402{
J. Bruce Fieldsc52226d2012-04-11 20:08:45 -0400403 char head[LOCAL_BUF_LEN];
404 char tmp[LOCAL_BUF_LEN];
405 unsigned int this_len, i;
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400406
J. Bruce Fieldsc52226d2012-04-11 20:08:45 -0400407 BUG_ON(shift > LOCAL_BUF_LEN);
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400408
J. Bruce Fieldsc52226d2012-04-11 20:08:45 -0400409 read_bytes_from_xdr_buf(buf, 0, head, shift);
410 for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
411 this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
412 read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
413 write_bytes_to_xdr_buf(buf, i, tmp, this_len);
414 }
415 write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
416}
417
418static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
419{
420 int shifted = 0;
421 int this_shift;
422
423 shift %= buf->len;
424 while (shifted < shift) {
425 this_shift = min(shift - shifted, LOCAL_BUF_LEN);
426 rotate_buf_a_little(buf, this_shift);
427 shifted += this_shift;
428 }
429}
430
431static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
432{
433 struct xdr_buf subbuf;
434
435 xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
436 _rotate_left(&subbuf, shift);
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400437}
438
439static u32
440gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
441 struct xdr_buf *buf, struct page **pages)
442{
443 int blocksize;
444 u8 *ptr, *plainhdr;
445 s32 now;
446 u8 flags = 0x00;
Jeff Laytonb36e9c42014-07-16 06:52:21 -0400447 __be16 *be16ptr;
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400448 __be64 *be64ptr;
449 u32 err;
450
451 dprintk("RPC: %s\n", __func__);
452
453 if (kctx->gk5e->encrypt_v2 == NULL)
454 return GSS_S_FAILURE;
455
456 /* make room for gss token header */
457 if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
458 return GSS_S_FAILURE;
459
460 /* construct gss token header */
461 ptr = plainhdr = buf->head[0].iov_base + offset;
462 *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
463 *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
464
465 if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
466 flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
467 if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
468 flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
469 /* We always do confidentiality in wrap tokens */
470 flags |= KG2_TOKEN_FLAG_SEALED;
471
472 *ptr++ = flags;
473 *ptr++ = 0xff;
474 be16ptr = (__be16 *)ptr;
475
476 blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc);
Jeff Laytonb36e9c42014-07-16 06:52:21 -0400477 *be16ptr++ = 0;
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400478 /* "inner" token header always uses 0 for RRC */
Jeff Laytonb36e9c42014-07-16 06:52:21 -0400479 *be16ptr++ = 0;
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400480
481 be64ptr = (__be64 *)be16ptr;
482 spin_lock(&krb5_seq_lock);
483 *be64ptr = cpu_to_be64(kctx->seq_send64++);
484 spin_unlock(&krb5_seq_lock);
485
Jeff Laytonec254222014-07-16 06:52:22 -0400486 err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400487 if (err)
488 return err;
489
490 now = get_seconds();
491 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
492}
493
494static u32
495gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
496{
497 s32 now;
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400498 u8 *ptr;
499 u8 flags = 0x00;
500 u16 ec, rrc;
501 int err;
502 u32 headskip, tailskip;
503 u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
504 unsigned int movelen;
505
506
507 dprintk("RPC: %s\n", __func__);
508
509 if (kctx->gk5e->decrypt_v2 == NULL)
510 return GSS_S_FAILURE;
511
512 ptr = buf->head[0].iov_base + offset;
513
514 if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
515 return GSS_S_DEFECTIVE_TOKEN;
516
517 flags = ptr[2];
518 if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
519 (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
520 return GSS_S_BAD_SIG;
521
522 if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
523 dprintk("%s: token missing expected sealed flag\n", __func__);
524 return GSS_S_DEFECTIVE_TOKEN;
525 }
526
527 if (ptr[3] != 0xff)
528 return GSS_S_DEFECTIVE_TOKEN;
529
530 ec = be16_to_cpup((__be16 *)(ptr + 4));
531 rrc = be16_to_cpup((__be16 *)(ptr + 6));
532
J. Bruce Fields5d6baef2013-10-09 15:59:29 -0400533 /*
534 * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss
535 * doesn't want it checked; see page 6 of rfc 2203.
536 */
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400537
J. Bruce Fieldsc52226d2012-04-11 20:08:45 -0400538 if (rrc != 0)
539 rotate_left(offset + 16, buf, rrc);
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400540
541 err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
542 &headskip, &tailskip);
543 if (err)
544 return GSS_S_FAILURE;
545
546 /*
547 * Retrieve the decrypted gss token header and verify
548 * it against the original
549 */
550 err = read_bytes_from_xdr_buf(buf,
551 buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
552 decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
553 if (err) {
554 dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
555 return GSS_S_FAILURE;
556 }
557 if (memcmp(ptr, decrypted_hdr, 6)
558 || memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
559 dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
560 return GSS_S_FAILURE;
561 }
562
563 /* do sequencing checks */
564
565 /* it got through unscathed. Make sure the context is unexpired */
566 now = get_seconds();
567 if (now > kctx->endtime)
568 return GSS_S_CONTEXT_EXPIRED;
569
570 /*
571 * Move the head data back to the right position in xdr_buf.
572 * We ignore any "ec" data since it might be in the head or
573 * the tail, and we really don't need to deal with it.
574 * Note that buf->head[0].iov_len may indicate the available
575 * head buffer space rather than that actually occupied.
576 */
577 movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
578 movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
579 BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
580 buf->head[0].iov_len);
581 memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
582 buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
583 buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
584
Jeff Laytoncf4c0242013-10-10 06:55:35 -0400585 /* Trim off the trailing "extra count" and checksum blob */
586 xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400587 return GSS_S_COMPLETE;
588}
589
Kevin Coffman1ac37192010-03-17 13:02:49 -0400590u32
591gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
592 struct xdr_buf *buf, struct page **pages)
593{
594 struct krb5_ctx *kctx = gctx->internal_ctx_id;
595
596 switch (kctx->enctype) {
597 default:
598 BUG();
599 case ENCTYPE_DES_CBC_RAW:
Kevin Coffman958142e2010-03-17 13:02:55 -0400600 case ENCTYPE_DES3_CBC_RAW:
Kevin Coffmanfffdaef2010-03-17 13:03:06 -0400601 case ENCTYPE_ARCFOUR_HMAC:
Kevin Coffman1ac37192010-03-17 13:02:49 -0400602 return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400603 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
604 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
605 return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
Kevin Coffman1ac37192010-03-17 13:02:49 -0400606 }
607}
608
609u32
610gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
611{
612 struct krb5_ctx *kctx = gctx->internal_ctx_id;
613
614 switch (kctx->enctype) {
615 default:
616 BUG();
617 case ENCTYPE_DES_CBC_RAW:
Kevin Coffman958142e2010-03-17 13:02:55 -0400618 case ENCTYPE_DES3_CBC_RAW:
Kevin Coffmanfffdaef2010-03-17 13:03:06 -0400619 case ENCTYPE_ARCFOUR_HMAC:
Kevin Coffman1ac37192010-03-17 13:02:49 -0400620 return gss_unwrap_kerberos_v1(kctx, offset, buf);
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400621 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
622 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
623 return gss_unwrap_kerberos_v2(kctx, offset, buf);
Kevin Coffman1ac37192010-03-17 13:02:49 -0400624 }
625}
626