blob: ca52ac28a5379cf4783ec126b90a0a454b7ad719 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/net/sunrpc/gss_krb5_crypto.c
3 *
Kevin Coffman81d4a432010-03-17 13:02:51 -04004 * Copyright (c) 2000-2008 The Regents of the University of Michigan.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * All rights reserved.
6 *
7 * Andy Adamson <andros@umich.edu>
8 * Bruce Fields <bfields@umich.edu>
9 */
10
11/*
12 * Copyright (C) 1998 by the FundsXpress, INC.
13 *
14 * All rights reserved.
15 *
16 * Export of this software from the United States of America may require
17 * a specific license from the United States Government. It is the
18 * responsibility of any person or organization contemplating export to
19 * obtain such a license before exporting.
20 *
21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
22 * distribute this software and its documentation for any purpose and
23 * without fee is hereby granted, provided that the above copyright
24 * notice appear in all copies and that both that copyright notice and
25 * this permission notice appear in supporting documentation, and that
26 * the name of FundsXpress. not be used in advertising or publicity pertaining
27 * to distribution of the software without specific, written prior
28 * permission. FundsXpress makes no representations about the suitability of
29 * this software for any purpose. It is provided "as is" without express
30 * or implied warranty.
31 *
32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
35 */
36
Herbert Xu35058682006-08-24 19:10:20 +100037#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/types.h>
39#include <linux/mm.h>
David Hardeman378f0582005-09-17 17:55:31 +100040#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/crypto.h>
42#include <linux/highmem.h>
43#include <linux/pagemap.h>
44#include <linux/sunrpc/gss_krb5.h>
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -050045#include <linux/sunrpc/xdr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#ifdef RPC_DEBUG
48# define RPCDBG_FACILITY RPCDBG_AUTH
49#endif
50
51u32
52krb5_encrypt(
Herbert Xu378c6692006-08-22 20:33:54 +100053 struct crypto_blkcipher *tfm,
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 void * iv,
55 void * in,
56 void * out,
57 int length)
58{
59 u32 ret = -EINVAL;
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -080060 struct scatterlist sg[1];
Kevin Coffman81d4a432010-03-17 13:02:51 -040061 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
Herbert Xu378c6692006-08-22 20:33:54 +100062 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Herbert Xu378c6692006-08-22 20:33:54 +100064 if (length % crypto_blkcipher_blocksize(tfm) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 goto out;
66
Kevin Coffman81d4a432010-03-17 13:02:51 -040067 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
Kevin Coffman3d4a6882008-02-21 13:44:12 -050068 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
69 crypto_blkcipher_ivsize(tfm));
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 goto out;
71 }
72
73 if (iv)
Herbert Xu378c6692006-08-22 20:33:54 +100074 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76 memcpy(out, in, length);
Herbert Xu68e3f5d2007-10-27 00:52:07 -070077 sg_init_one(sg, out, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Herbert Xu378c6692006-08-22 20:33:54 +100079 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080out:
Chuck Lever8885cb32007-01-31 12:14:05 -050081 dprintk("RPC: krb5_encrypt returns %d\n", ret);
J. Bruce Fields8fc75002006-12-04 20:22:31 -050082 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085u32
86krb5_decrypt(
Herbert Xu378c6692006-08-22 20:33:54 +100087 struct crypto_blkcipher *tfm,
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 void * iv,
89 void * in,
90 void * out,
91 int length)
92{
93 u32 ret = -EINVAL;
94 struct scatterlist sg[1];
Kevin Coffman81d4a432010-03-17 13:02:51 -040095 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
Herbert Xu378c6692006-08-22 20:33:54 +100096 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Herbert Xu378c6692006-08-22 20:33:54 +100098 if (length % crypto_blkcipher_blocksize(tfm) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 goto out;
100
Kevin Coffman81d4a432010-03-17 13:02:51 -0400101 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
Kevin Coffman3d4a6882008-02-21 13:44:12 -0500102 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
Herbert Xu378c6692006-08-22 20:33:54 +1000103 crypto_blkcipher_ivsize(tfm));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 goto out;
105 }
106 if (iv)
Herbert Xu378c6692006-08-22 20:33:54 +1000107 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109 memcpy(out, in, length);
Herbert Xu68e3f5d2007-10-27 00:52:07 -0700110 sg_init_one(sg, out, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Herbert Xu378c6692006-08-22 20:33:54 +1000112 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113out:
Chuck Lever8885cb32007-01-31 12:14:05 -0500114 dprintk("RPC: gss_k5decrypt returns %d\n",ret);
J. Bruce Fields8fc75002006-12-04 20:22:31 -0500115 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116}
117
J. Bruce Fieldsf7b3af62005-10-13 16:55:03 -0400118static int
J. Bruce Fieldsf7b3af62005-10-13 16:55:03 -0400119checksummer(struct scatterlist *sg, void *data)
120{
Herbert Xu35058682006-08-24 19:10:20 +1000121 struct hash_desc *desc = data;
J. Bruce Fieldsf7b3af62005-10-13 16:55:03 -0400122
Herbert Xu35058682006-08-24 19:10:20 +1000123 return crypto_hash_update(desc, sg, sg->length);
J. Bruce Fieldsf7b3af62005-10-13 16:55:03 -0400124}
125
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400126/*
127 * checksum the plaintext data and hdrlen bytes of the token header
128 * The checksum is performed over the first 8 bytes of the
129 * gss token header and then over the data body
130 */
131u32
132make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
133 struct xdr_buf *body, int body_offset, u8 *cksumkey,
134 struct xdr_netobj *cksumout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400136 struct hash_desc desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 struct scatterlist sg[1];
Herbert Xu35058682006-08-24 19:10:20 +1000138 int err;
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400139 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
140 unsigned int checksumlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400142 if (cksumout->len < kctx->gk5e->cksumlength) {
143 dprintk("%s: checksum buffer length, %u, too small for %s\n",
144 __func__, cksumout->len, kctx->gk5e->name);
145 return GSS_S_FAILURE;
146 }
147
148 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
Herbert Xu35058682006-08-24 19:10:20 +1000149 if (IS_ERR(desc.tfm))
J. Bruce Fieldsd4a30e72006-04-18 13:14:02 -0400150 return GSS_S_FAILURE;
Herbert Xu35058682006-08-24 19:10:20 +1000151 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400153 checksumlen = crypto_hash_digestsize(desc.tfm);
154
155 if (cksumkey != NULL) {
156 err = crypto_hash_setkey(desc.tfm, cksumkey,
157 kctx->gk5e->keylength);
158 if (err)
159 goto out;
160 }
161
Herbert Xu35058682006-08-24 19:10:20 +1000162 err = crypto_hash_init(&desc);
163 if (err)
164 goto out;
Herbert Xu68e3f5d2007-10-27 00:52:07 -0700165 sg_init_one(sg, header, hdrlen);
Herbert Xu35058682006-08-24 19:10:20 +1000166 err = crypto_hash_update(&desc, sg, hdrlen);
167 if (err)
168 goto out;
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -0500169 err = xdr_process_buf(body, body_offset, body->len - body_offset,
Herbert Xu35058682006-08-24 19:10:20 +1000170 checksummer, &desc);
171 if (err)
172 goto out;
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400173 err = crypto_hash_final(&desc, checksumdata);
174 if (err)
175 goto out;
Herbert Xu35058682006-08-24 19:10:20 +1000176
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400177 switch (kctx->gk5e->ctype) {
178 case CKSUMTYPE_RSA_MD5:
179 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
180 checksumdata, checksumlen);
181 if (err)
182 goto out;
183 memcpy(cksumout->data,
184 checksumdata + checksumlen - kctx->gk5e->cksumlength,
185 kctx->gk5e->cksumlength);
186 break;
Kevin Coffman958142e2010-03-17 13:02:55 -0400187 case CKSUMTYPE_HMAC_SHA1_DES3:
188 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
189 break;
Kevin Coffmane1f6c072010-03-17 13:02:52 -0400190 default:
191 BUG();
192 break;
193 }
194 cksumout->len = kctx->gk5e->cksumlength;
Herbert Xu35058682006-08-24 19:10:20 +1000195out:
196 crypto_free_hash(desc.tfm);
197 return err ? GSS_S_FAILURE : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
Kevin Coffmande9c17e2010-03-17 13:02:59 -0400200/*
201 * checksum the plaintext data and hdrlen bytes of the token header
202 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
203 * body then over the first 16 octets of the MIC token
204 * Inclusion of the header data in the calculation of the
205 * checksum is optional.
206 */
207u32
208make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
209 struct xdr_buf *body, int body_offset, u8 *cksumkey,
210 struct xdr_netobj *cksumout)
211{
212 struct hash_desc desc;
213 struct scatterlist sg[1];
214 int err;
215 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
216 unsigned int checksumlen;
217
218 if (kctx->gk5e->keyed_cksum == 0) {
219 dprintk("%s: expected keyed hash for %s\n",
220 __func__, kctx->gk5e->name);
221 return GSS_S_FAILURE;
222 }
223 if (cksumkey == NULL) {
224 dprintk("%s: no key supplied for %s\n",
225 __func__, kctx->gk5e->name);
226 return GSS_S_FAILURE;
227 }
228
229 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
230 CRYPTO_ALG_ASYNC);
231 if (IS_ERR(desc.tfm))
232 return GSS_S_FAILURE;
233 checksumlen = crypto_hash_digestsize(desc.tfm);
234 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
235
236 err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
237 if (err)
238 goto out;
239
240 err = crypto_hash_init(&desc);
241 if (err)
242 goto out;
243 err = xdr_process_buf(body, body_offset, body->len - body_offset,
244 checksummer, &desc);
245 if (err)
246 goto out;
247 if (header != NULL) {
248 sg_init_one(sg, header, hdrlen);
249 err = crypto_hash_update(&desc, sg, hdrlen);
250 if (err)
251 goto out;
252 }
253 err = crypto_hash_final(&desc, checksumdata);
254 if (err)
255 goto out;
256
257 cksumout->len = kctx->gk5e->cksumlength;
258
259 switch (kctx->gk5e->ctype) {
260 case CKSUMTYPE_HMAC_SHA1_96_AES128:
261 case CKSUMTYPE_HMAC_SHA1_96_AES256:
262 /* note that this truncates the hash */
263 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
264 break;
265 default:
266 BUG();
267 break;
268 }
269out:
270 crypto_free_hash(desc.tfm);
271 return err ? GSS_S_FAILURE : 0;
272}
273
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400274struct encryptor_desc {
Kevin Coffman81d4a432010-03-17 13:02:51 -0400275 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
Herbert Xu378c6692006-08-22 20:33:54 +1000276 struct blkcipher_desc desc;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400277 int pos;
278 struct xdr_buf *outbuf;
279 struct page **pages;
280 struct scatterlist infrags[4];
281 struct scatterlist outfrags[4];
282 int fragno;
283 int fraglen;
284};
285
286static int
287encryptor(struct scatterlist *sg, void *data)
288{
289 struct encryptor_desc *desc = data;
290 struct xdr_buf *outbuf = desc->outbuf;
291 struct page *in_page;
292 int thislen = desc->fraglen + sg->length;
293 int fraglen, ret;
294 int page_pos;
295
296 /* Worst case is 4 fragments: head, end of page 1, start
297 * of page 2, tail. Anything more is a bug. */
298 BUG_ON(desc->fragno > 3);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400299
300 page_pos = desc->pos - outbuf->head[0].iov_len;
301 if (page_pos >= 0 && page_pos < outbuf->page_len) {
302 /* pages are not in place: */
303 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
304 in_page = desc->pages[i];
305 } else {
Jens Axboefa05f122007-10-22 19:44:26 +0200306 in_page = sg_page(sg);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400307 }
Herbert Xu68e3f5d2007-10-27 00:52:07 -0700308 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
309 sg->offset);
310 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
311 sg->offset);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400312 desc->fragno++;
313 desc->fraglen += sg->length;
314 desc->pos += sg->length;
315
Kevin Coffman81d4a432010-03-17 13:02:51 -0400316 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400317 thislen -= fraglen;
318
319 if (thislen == 0)
320 return 0;
321
Jens Axboec46f2332007-10-31 12:06:37 +0100322 sg_mark_end(&desc->infrags[desc->fragno - 1]);
323 sg_mark_end(&desc->outfrags[desc->fragno - 1]);
Herbert Xu68e3f5d2007-10-27 00:52:07 -0700324
Herbert Xu378c6692006-08-22 20:33:54 +1000325 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
326 desc->infrags, thislen);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400327 if (ret)
328 return ret;
Herbert Xu68e3f5d2007-10-27 00:52:07 -0700329
330 sg_init_table(desc->infrags, 4);
331 sg_init_table(desc->outfrags, 4);
332
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400333 if (fraglen) {
Jens Axboe642f149032007-10-24 11:20:47 +0200334 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
335 sg->offset + sg->length - fraglen);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400336 desc->infrags[0] = desc->outfrags[0];
Jens Axboe642f149032007-10-24 11:20:47 +0200337 sg_assign_page(&desc->infrags[0], in_page);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400338 desc->fragno = 1;
339 desc->fraglen = fraglen;
340 } else {
341 desc->fragno = 0;
342 desc->fraglen = 0;
343 }
344 return 0;
345}
346
347int
Herbert Xu378c6692006-08-22 20:33:54 +1000348gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
349 int offset, struct page **pages)
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400350{
351 int ret;
352 struct encryptor_desc desc;
353
Herbert Xu378c6692006-08-22 20:33:54 +1000354 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400355
356 memset(desc.iv, 0, sizeof(desc.iv));
Herbert Xu378c6692006-08-22 20:33:54 +1000357 desc.desc.tfm = tfm;
358 desc.desc.info = desc.iv;
359 desc.desc.flags = 0;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400360 desc.pos = offset;
361 desc.outbuf = buf;
362 desc.pages = pages;
363 desc.fragno = 0;
364 desc.fraglen = 0;
365
Herbert Xu68e3f5d2007-10-27 00:52:07 -0700366 sg_init_table(desc.infrags, 4);
367 sg_init_table(desc.outfrags, 4);
368
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -0500369 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400370 return ret;
371}
372
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400373struct decryptor_desc {
Kevin Coffman81d4a432010-03-17 13:02:51 -0400374 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
Herbert Xu378c6692006-08-22 20:33:54 +1000375 struct blkcipher_desc desc;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400376 struct scatterlist frags[4];
377 int fragno;
378 int fraglen;
379};
380
381static int
382decryptor(struct scatterlist *sg, void *data)
383{
384 struct decryptor_desc *desc = data;
385 int thislen = desc->fraglen + sg->length;
386 int fraglen, ret;
387
388 /* Worst case is 4 fragments: head, end of page 1, start
389 * of page 2, tail. Anything more is a bug. */
390 BUG_ON(desc->fragno > 3);
Herbert Xu68e3f5d2007-10-27 00:52:07 -0700391 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
392 sg->offset);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400393 desc->fragno++;
394 desc->fraglen += sg->length;
395
Kevin Coffman81d4a432010-03-17 13:02:51 -0400396 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400397 thislen -= fraglen;
398
399 if (thislen == 0)
400 return 0;
401
Jens Axboec46f2332007-10-31 12:06:37 +0100402 sg_mark_end(&desc->frags[desc->fragno - 1]);
Herbert Xu68e3f5d2007-10-27 00:52:07 -0700403
Herbert Xu378c6692006-08-22 20:33:54 +1000404 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
405 desc->frags, thislen);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400406 if (ret)
407 return ret;
Herbert Xu68e3f5d2007-10-27 00:52:07 -0700408
409 sg_init_table(desc->frags, 4);
410
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400411 if (fraglen) {
Jens Axboe642f149032007-10-24 11:20:47 +0200412 sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
413 sg->offset + sg->length - fraglen);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400414 desc->fragno = 1;
415 desc->fraglen = fraglen;
416 } else {
417 desc->fragno = 0;
418 desc->fraglen = 0;
419 }
420 return 0;
421}
422
423int
Herbert Xu378c6692006-08-22 20:33:54 +1000424gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
425 int offset)
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400426{
427 struct decryptor_desc desc;
428
429 /* XXXJBF: */
Herbert Xu378c6692006-08-22 20:33:54 +1000430 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400431
432 memset(desc.iv, 0, sizeof(desc.iv));
Herbert Xu378c6692006-08-22 20:33:54 +1000433 desc.desc.tfm = tfm;
434 desc.desc.info = desc.iv;
435 desc.desc.flags = 0;
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400436 desc.fragno = 0;
437 desc.fraglen = 0;
Herbert Xu68e3f5d2007-10-27 00:52:07 -0700438
439 sg_init_table(desc.frags, 4);
440
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -0500441 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
J. Bruce Fields14ae1622005-10-13 16:55:13 -0400442}
Kevin Coffman725f2862010-03-17 13:02:46 -0400443
444/*
445 * This function makes the assumption that it was ultimately called
446 * from gss_wrap().
447 *
448 * The client auth_gss code moves any existing tail data into a
449 * separate page before calling gss_wrap.
450 * The server svcauth_gss code ensures that both the head and the
451 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
452 *
453 * Even with that guarantee, this function may be called more than
454 * once in the processing of gss_wrap(). The best we can do is
455 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
456 * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
457 * At run-time we can verify that a single invocation of this
458 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
459 */
460
461int
462xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
463{
464 u8 *p;
465
466 if (shiftlen == 0)
467 return 0;
468
469 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
470 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
471
472 p = buf->head[0].iov_base + base;
473
474 memmove(p + shiftlen, p, buf->head[0].iov_len - base);
475
476 buf->head[0].iov_len += shiftlen;
477 buf->len += shiftlen;
478
479 return 0;
480}