J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 1 | #include <linux/types.h> |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 2 | #include <linux/jiffies.h> |
| 3 | #include <linux/sunrpc/gss_krb5.h> |
| 4 | #include <linux/random.h> |
| 5 | #include <linux/pagemap.h> |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 6 | #include <linux/crypto.h> |
| 7 | |
| 8 | #ifdef RPC_DEBUG |
| 9 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 10 | #endif |
| 11 | |
| 12 | static inline int |
| 13 | gss_krb5_padding(int blocksize, int length) |
| 14 | { |
J. Bruce Fields | 54ec3d4 | 2010-03-17 13:02:48 -0400 | [diff] [blame^] | 15 | return blocksize - (length % blocksize); |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 16 | } |
| 17 | |
| 18 | static inline void |
| 19 | gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) |
| 20 | { |
| 21 | int padding = gss_krb5_padding(blocksize, buf->len - offset); |
| 22 | char *p; |
| 23 | struct kvec *iov; |
| 24 | |
| 25 | if (buf->page_len || buf->tail[0].iov_len) |
| 26 | iov = &buf->tail[0]; |
| 27 | else |
| 28 | iov = &buf->head[0]; |
| 29 | p = iov->iov_base + iov->iov_len; |
| 30 | iov->iov_len += padding; |
| 31 | buf->len += padding; |
| 32 | memset(p, padding, padding); |
| 33 | } |
| 34 | |
| 35 | static inline int |
| 36 | gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) |
| 37 | { |
| 38 | u8 *ptr; |
| 39 | u8 pad; |
Chuck Lever | 67f97d8 | 2007-09-26 14:38:10 -0400 | [diff] [blame] | 40 | size_t len = buf->len; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 41 | |
| 42 | if (len <= buf->head[0].iov_len) { |
| 43 | pad = *(u8 *)(buf->head[0].iov_base + len - 1); |
| 44 | if (pad > buf->head[0].iov_len) |
| 45 | return -EINVAL; |
| 46 | buf->head[0].iov_len -= pad; |
| 47 | goto out; |
| 48 | } else |
| 49 | len -= buf->head[0].iov_len; |
| 50 | if (len <= buf->page_len) { |
Chuck Lever | 67f97d8 | 2007-09-26 14:38:10 -0400 | [diff] [blame] | 51 | unsigned int last = (buf->page_base + len - 1) |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 52 | >>PAGE_CACHE_SHIFT; |
Chuck Lever | 67f97d8 | 2007-09-26 14:38:10 -0400 | [diff] [blame] | 53 | unsigned int offset = (buf->page_base + len - 1) |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 54 | & (PAGE_CACHE_SIZE - 1); |
J. Bruce Fields | 87d918d | 2006-12-04 20:22:32 -0500 | [diff] [blame] | 55 | ptr = kmap_atomic(buf->pages[last], KM_USER0); |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 56 | pad = *(ptr + offset); |
J. Bruce Fields | 87d918d | 2006-12-04 20:22:32 -0500 | [diff] [blame] | 57 | kunmap_atomic(ptr, KM_USER0); |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 58 | goto out; |
| 59 | } else |
| 60 | len -= buf->page_len; |
| 61 | BUG_ON(len > buf->tail[0].iov_len); |
| 62 | pad = *(u8 *)(buf->tail[0].iov_base + len - 1); |
| 63 | out: |
| 64 | /* XXX: NOTE: we do not adjust the page lengths--they represent |
| 65 | * a range of data in the real filesystem page cache, and we need |
| 66 | * to know that range so the xdr code can properly place read data. |
| 67 | * However adjusting the head length, as we do above, is harmless. |
| 68 | * In the case of a request that fits into a single page, the server |
| 69 | * also uses length and head length together to determine the original |
| 70 | * start of the request to copy the request for deferal; so it's |
| 71 | * easier on the server if we adjust head and tail length in tandem. |
| 72 | * It's not really a problem that we don't fool with the page and |
| 73 | * tail lengths, though--at worst badly formed xdr might lead the |
| 74 | * server to attempt to parse the padding. |
| 75 | * XXX: Document all these weird requirements for gss mechanism |
| 76 | * wrap/unwrap functions. */ |
| 77 | if (pad > blocksize) |
| 78 | return -EINVAL; |
| 79 | if (buf->len > pad) |
| 80 | buf->len -= pad; |
| 81 | else |
| 82 | return -EINVAL; |
| 83 | return 0; |
| 84 | } |
| 85 | |
Kevin Coffman | 863a248 | 2008-04-30 12:46:08 -0400 | [diff] [blame] | 86 | static void |
| 87 | make_confounder(char *p, u32 conflen) |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 88 | { |
| 89 | static u64 i = 0; |
| 90 | u64 *q = (u64 *)p; |
| 91 | |
| 92 | /* rfc1964 claims this should be "random". But all that's really |
| 93 | * necessary is that it be unique. And not even that is necessary in |
| 94 | * our case since our "gssapi" implementation exists only to support |
| 95 | * rpcsec_gss, so we know that the only buffers we will ever encrypt |
| 96 | * already begin with a unique sequence number. Just to hedge my bets |
| 97 | * I'll make a half-hearted attempt at something unique, but ensuring |
| 98 | * uniqueness would mean worrying about atomicity and rollover, and I |
| 99 | * don't care enough. */ |
| 100 | |
Kevin Coffman | 863a248 | 2008-04-30 12:46:08 -0400 | [diff] [blame] | 101 | /* initialize to random value */ |
| 102 | if (i == 0) { |
| 103 | i = random32(); |
| 104 | i = (i << 32) | random32(); |
| 105 | } |
| 106 | |
| 107 | switch (conflen) { |
| 108 | case 16: |
| 109 | *q++ = i++; |
| 110 | /* fall through */ |
| 111 | case 8: |
| 112 | *q++ = i++; |
| 113 | break; |
| 114 | default: |
| 115 | BUG(); |
| 116 | } |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 117 | } |
| 118 | |
| 119 | /* Assumptions: the head and tail of inbuf are ours to play with. |
| 120 | * The pages, however, may be real pages in the page cache and we replace |
| 121 | * them with scratch pages from **pages before writing to them. */ |
| 122 | /* XXX: obviously the above should be documentation of wrap interface, |
| 123 | * and shouldn't be in this kerberos-specific file. */ |
| 124 | |
| 125 | /* XXX factor out common code with seal/unseal. */ |
| 126 | |
| 127 | u32 |
J. Bruce Fields | 00fd6e1 | 2005-10-13 16:55:18 -0400 | [diff] [blame] | 128 | gss_wrap_kerberos(struct gss_ctx *ctx, int offset, |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 129 | struct xdr_buf *buf, struct page **pages) |
| 130 | { |
| 131 | struct krb5_ctx *kctx = ctx->internal_ctx_id; |
J. Bruce Fields | 9e57b30 | 2006-03-20 23:23:11 -0500 | [diff] [blame] | 132 | char cksumdata[16]; |
| 133 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 134 | int blocksize = 0, plainlen; |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 135 | unsigned char *ptr, *msg_start; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 136 | s32 now; |
| 137 | int headlen; |
| 138 | struct page **tmp_pages; |
J. Bruce Fields | eaa82ed | 2006-03-20 23:24:04 -0500 | [diff] [blame] | 139 | u32 seq_send; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 140 | |
Chuck Lever | 8885cb3 | 2007-01-31 12:14:05 -0500 | [diff] [blame] | 141 | dprintk("RPC: gss_wrap_kerberos\n"); |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 142 | |
| 143 | now = get_seconds(); |
| 144 | |
Herbert Xu | 378c669 | 2006-08-22 20:33:54 +1000 | [diff] [blame] | 145 | blocksize = crypto_blkcipher_blocksize(kctx->enc); |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 146 | gss_krb5_add_padding(buf, offset, blocksize); |
| 147 | BUG_ON((buf->len - offset) % blocksize); |
| 148 | plainlen = blocksize + buf->len - offset; |
| 149 | |
Kevin Coffman | 4ab4b0b | 2008-03-31 10:31:44 -0400 | [diff] [blame] | 150 | headlen = g_token_size(&kctx->mech_used, 24 + plainlen) - |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 151 | (buf->len - offset); |
| 152 | |
| 153 | ptr = buf->head[0].iov_base + offset; |
| 154 | /* shift data to make room for header. */ |
Kevin Coffman | 725f286 | 2010-03-17 13:02:46 -0400 | [diff] [blame] | 155 | xdr_extend_head(buf, offset, headlen); |
| 156 | |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 157 | /* XXX Would be cleverer to encrypt while copying. */ |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 158 | BUG_ON((buf->len - offset - headlen) % blocksize); |
| 159 | |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 160 | g_make_token_header(&kctx->mech_used, |
| 161 | GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr); |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 162 | |
| 163 | |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 164 | /* ptr now at header described in rfc 1964, section 1.2.1: */ |
| 165 | ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); |
| 166 | ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 167 | |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 168 | msg_start = ptr + 24; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 169 | |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 170 | *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5); |
| 171 | memset(ptr + 4, 0xff, 4); |
| 172 | *(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES); |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 173 | |
| 174 | make_confounder(msg_start, blocksize); |
| 175 | |
| 176 | /* XXXJBF: UGH!: */ |
| 177 | tmp_pages = buf->pages; |
| 178 | buf->pages = pages; |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 179 | if (make_checksum("md5", ptr, 8, buf, |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 180 | offset + headlen - blocksize, &md5cksum)) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 181 | return GSS_S_FAILURE; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 182 | buf->pages = tmp_pages; |
| 183 | |
J. Bruce Fields | e678e06 | 2006-12-04 20:22:35 -0500 | [diff] [blame] | 184 | if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, |
| 185 | md5cksum.data, md5cksum.len)) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 186 | return GSS_S_FAILURE; |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 187 | memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8); |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 188 | |
J. Bruce Fields | eaa82ed | 2006-03-20 23:24:04 -0500 | [diff] [blame] | 189 | spin_lock(&krb5_seq_lock); |
| 190 | seq_send = kctx->seq_send++; |
| 191 | spin_unlock(&krb5_seq_lock); |
| 192 | |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 193 | /* XXX would probably be more efficient to compute checksum |
| 194 | * and encrypt at the same time: */ |
| 195 | if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 196 | seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 197 | return GSS_S_FAILURE; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 198 | |
| 199 | if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, |
| 200 | pages)) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 201 | return GSS_S_FAILURE; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 202 | |
J. Bruce Fields | 94efa93 | 2006-12-04 20:22:42 -0500 | [diff] [blame] | 203 | return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 204 | } |
| 205 | |
| 206 | u32 |
J. Bruce Fields | 00fd6e1 | 2005-10-13 16:55:18 -0400 | [diff] [blame] | 207 | gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 208 | { |
| 209 | struct krb5_ctx *kctx = ctx->internal_ctx_id; |
| 210 | int signalg; |
| 211 | int sealalg; |
J. Bruce Fields | 9e57b30 | 2006-03-20 23:23:11 -0500 | [diff] [blame] | 212 | char cksumdata[16]; |
| 213 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 214 | s32 now; |
| 215 | int direction; |
| 216 | s32 seqnum; |
| 217 | unsigned char *ptr; |
| 218 | int bodysize; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 219 | void *data_start, *orig_start; |
| 220 | int data_len; |
| 221 | int blocksize; |
| 222 | |
Chuck Lever | 8885cb3 | 2007-01-31 12:14:05 -0500 | [diff] [blame] | 223 | dprintk("RPC: gss_unwrap_kerberos\n"); |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 224 | |
| 225 | ptr = (u8 *)buf->head[0].iov_base + offset; |
| 226 | if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, |
| 227 | buf->len - offset)) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 228 | return GSS_S_DEFECTIVE_TOKEN; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 229 | |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 230 | if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) || |
| 231 | (ptr[1] != (KG_TOK_WRAP_MSG & 0xff))) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 232 | return GSS_S_DEFECTIVE_TOKEN; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 233 | |
| 234 | /* XXX sanity-check bodysize?? */ |
| 235 | |
| 236 | /* get the sign and seal algorithms */ |
| 237 | |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 238 | signalg = ptr[2] + (ptr[3] << 8); |
J. Bruce Fields | 94efa93 | 2006-12-04 20:22:42 -0500 | [diff] [blame] | 239 | if (signalg != SGN_ALG_DES_MAC_MD5) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 240 | return GSS_S_DEFECTIVE_TOKEN; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 241 | |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 242 | sealalg = ptr[4] + (ptr[5] << 8); |
J. Bruce Fields | d922a84 | 2006-12-04 20:22:40 -0500 | [diff] [blame] | 243 | if (sealalg != SEAL_ALG_DES) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 244 | return GSS_S_DEFECTIVE_TOKEN; |
J. Bruce Fields | 94efa93 | 2006-12-04 20:22:42 -0500 | [diff] [blame] | 245 | |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 246 | if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 247 | return GSS_S_DEFECTIVE_TOKEN; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 248 | |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 249 | if (gss_decrypt_xdr_buf(kctx->enc, buf, |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 250 | ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base)) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 251 | return GSS_S_DEFECTIVE_TOKEN; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 252 | |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 253 | if (make_checksum("md5", ptr, 8, buf, |
| 254 | ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum)) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 255 | return GSS_S_FAILURE; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 256 | |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 257 | if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, |
| 258 | md5cksum.data, md5cksum.len)) |
| 259 | return GSS_S_FAILURE; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 260 | |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 261 | if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8)) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 262 | return GSS_S_BAD_SIG; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 263 | |
| 264 | /* it got through unscathed. Make sure the context is unexpired */ |
| 265 | |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 266 | now = get_seconds(); |
| 267 | |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 268 | if (now > kctx->endtime) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 269 | return GSS_S_CONTEXT_EXPIRED; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 270 | |
| 271 | /* do sequencing checks */ |
| 272 | |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 273 | if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, |
| 274 | &direction, &seqnum)) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 275 | return GSS_S_BAD_SIG; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 276 | |
| 277 | if ((kctx->initiate && direction != 0xff) || |
| 278 | (!kctx->initiate && direction != 0)) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 279 | return GSS_S_BAD_SIG; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 280 | |
| 281 | /* Copy the data back to the right position. XXX: Would probably be |
| 282 | * better to copy and encrypt at the same time. */ |
| 283 | |
Herbert Xu | 378c669 | 2006-08-22 20:33:54 +1000 | [diff] [blame] | 284 | blocksize = crypto_blkcipher_blocksize(kctx->enc); |
Kevin Coffman | d00953a | 2008-04-30 12:45:53 -0400 | [diff] [blame] | 285 | data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 286 | orig_start = buf->head[0].iov_base + offset; |
| 287 | data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; |
| 288 | memmove(orig_start, data_start, data_len); |
| 289 | buf->head[0].iov_len -= (data_start - orig_start); |
| 290 | buf->len -= (data_start - orig_start); |
| 291 | |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 292 | if (gss_krb5_remove_padding(buf, blocksize)) |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 293 | return GSS_S_DEFECTIVE_TOKEN; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 294 | |
J. Bruce Fields | 39a21dd | 2006-12-04 20:22:39 -0500 | [diff] [blame] | 295 | return GSS_S_COMPLETE; |
J. Bruce Fields | 14ae162 | 2005-10-13 16:55:13 -0400 | [diff] [blame] | 296 | } |