Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 1 | /* $Id: cryptocop.c,v 1.13 2005/04/21 17:27:55 henriken Exp $ |
| 2 | * |
| 3 | * Stream co-processor driver for the ETRAX FS |
| 4 | * |
| 5 | * Copyright (C) 2003-2005 Axis Communications AB |
| 6 | */ |
| 7 | |
| 8 | #include <linux/init.h> |
| 9 | #include <linux/sched.h> |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/string.h> |
| 13 | #include <linux/fs.h> |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/stddef.h> |
| 17 | |
| 18 | #include <asm/uaccess.h> |
| 19 | #include <asm/io.h> |
| 20 | #include <asm/atomic.h> |
| 21 | |
| 22 | #include <linux/list.h> |
| 23 | #include <linux/interrupt.h> |
| 24 | |
| 25 | #include <asm/signal.h> |
| 26 | #include <asm/irq.h> |
| 27 | |
| 28 | #include <asm/arch/dma.h> |
| 29 | #include <asm/arch/hwregs/dma.h> |
| 30 | #include <asm/arch/hwregs/reg_map.h> |
| 31 | #include <asm/arch/hwregs/reg_rdwr.h> |
| 32 | #include <asm/arch/hwregs/intr_vect_defs.h> |
| 33 | |
| 34 | #include <asm/arch/hwregs/strcop.h> |
| 35 | #include <asm/arch/hwregs/strcop_defs.h> |
| 36 | #include <asm/arch/cryptocop.h> |
| 37 | |
| 38 | |
| 39 | |
| 40 | #define DESCR_ALLOC_PAD (31) |
| 41 | |
| 42 | struct cryptocop_dma_desc { |
| 43 | char *free_buf; /* If non-null will be kfreed in free_cdesc() */ |
| 44 | dma_descr_data *dma_descr; |
| 45 | |
| 46 | unsigned char dma_descr_buf[sizeof(dma_descr_data) + DESCR_ALLOC_PAD]; |
| 47 | |
| 48 | unsigned int from_pool:1; /* If 1 'allocated' from the descriptor pool. */ |
| 49 | struct cryptocop_dma_desc *next; |
| 50 | }; |
| 51 | |
| 52 | |
| 53 | struct cryptocop_int_operation{ |
| 54 | void *alloc_ptr; |
| 55 | cryptocop_session_id sid; |
| 56 | |
| 57 | dma_descr_context ctx_out; |
| 58 | dma_descr_context ctx_in; |
| 59 | |
| 60 | /* DMA descriptors allocated by driver. */ |
| 61 | struct cryptocop_dma_desc *cdesc_out; |
| 62 | struct cryptocop_dma_desc *cdesc_in; |
| 63 | |
| 64 | /* Strcop config to use. */ |
| 65 | cryptocop_3des_mode tdes_mode; |
| 66 | cryptocop_csum_type csum_mode; |
| 67 | |
| 68 | /* DMA descrs provided by consumer. */ |
| 69 | dma_descr_data *ddesc_out; |
| 70 | dma_descr_data *ddesc_in; |
| 71 | }; |
| 72 | |
| 73 | |
| 74 | struct cryptocop_tfrm_ctx { |
| 75 | cryptocop_tfrm_id tid; |
| 76 | unsigned int blocklength; |
| 77 | |
| 78 | unsigned int start_ix; |
| 79 | |
| 80 | struct cryptocop_tfrm_cfg *tcfg; |
| 81 | struct cryptocop_transform_ctx *tctx; |
| 82 | |
| 83 | unsigned char previous_src; |
| 84 | unsigned char current_src; |
| 85 | |
| 86 | /* Values to use in metadata out. */ |
| 87 | unsigned char hash_conf; |
| 88 | unsigned char hash_mode; |
| 89 | unsigned char ciph_conf; |
| 90 | unsigned char cbcmode; |
| 91 | unsigned char decrypt; |
| 92 | |
| 93 | unsigned int requires_padding:1; |
| 94 | unsigned int strict_block_length:1; |
| 95 | unsigned int active:1; |
| 96 | unsigned int done:1; |
| 97 | size_t consumed; |
| 98 | size_t produced; |
| 99 | |
| 100 | /* Pad (input) descriptors to put in the DMA out list when the transform |
| 101 | * output is put on the DMA in list. */ |
| 102 | struct cryptocop_dma_desc *pad_descs; |
| 103 | |
| 104 | struct cryptocop_tfrm_ctx *prev_src; |
| 105 | struct cryptocop_tfrm_ctx *curr_src; |
| 106 | |
| 107 | /* Mapping to HW. */ |
| 108 | unsigned char unit_no; |
| 109 | }; |
| 110 | |
| 111 | |
| 112 | struct cryptocop_private{ |
| 113 | cryptocop_session_id sid; |
| 114 | struct cryptocop_private *next; |
| 115 | }; |
| 116 | |
| 117 | /* Session list. */ |
| 118 | |
| 119 | struct cryptocop_transform_ctx{ |
| 120 | struct cryptocop_transform_init init; |
| 121 | unsigned char dec_key[CRYPTOCOP_MAX_KEY_LENGTH]; |
| 122 | unsigned int dec_key_set:1; |
| 123 | |
| 124 | struct cryptocop_transform_ctx *next; |
| 125 | }; |
| 126 | |
| 127 | |
| 128 | struct cryptocop_session{ |
| 129 | cryptocop_session_id sid; |
| 130 | |
| 131 | struct cryptocop_transform_ctx *tfrm_ctx; |
| 132 | |
| 133 | struct cryptocop_session *next; |
| 134 | }; |
| 135 | |
| 136 | /* Priority levels for jobs sent to the cryptocop. Checksum operations from |
| 137 | kernel have highest priority since TCPIP stack processing must not |
| 138 | be a bottleneck. */ |
| 139 | typedef enum { |
| 140 | cryptocop_prio_kernel_csum = 0, |
| 141 | cryptocop_prio_kernel = 1, |
| 142 | cryptocop_prio_user = 2, |
| 143 | cryptocop_prio_no_prios = 3 |
| 144 | } cryptocop_queue_priority; |
| 145 | |
| 146 | struct cryptocop_prio_queue{ |
| 147 | struct list_head jobs; |
| 148 | cryptocop_queue_priority prio; |
| 149 | }; |
| 150 | |
| 151 | struct cryptocop_prio_job{ |
| 152 | struct list_head node; |
| 153 | cryptocop_queue_priority prio; |
| 154 | |
| 155 | struct cryptocop_operation *oper; |
| 156 | struct cryptocop_int_operation *iop; |
| 157 | }; |
| 158 | |
| 159 | struct ioctl_job_cb_ctx { |
| 160 | unsigned int processed:1; |
| 161 | }; |
| 162 | |
| 163 | |
| 164 | static struct cryptocop_session *cryptocop_sessions = NULL; |
| 165 | spinlock_t cryptocop_sessions_lock; |
| 166 | |
| 167 | /* Next Session ID to assign. */ |
| 168 | static cryptocop_session_id next_sid = 1; |
| 169 | |
| 170 | /* Pad for checksum. */ |
| 171 | static const char csum_zero_pad[1] = {0x00}; |
| 172 | |
| 173 | /* Trash buffer for mem2mem operations. */ |
| 174 | #define MEM2MEM_DISCARD_BUF_LENGTH (512) |
| 175 | static unsigned char mem2mem_discard_buf[MEM2MEM_DISCARD_BUF_LENGTH]; |
| 176 | |
| 177 | /* Descriptor pool. */ |
| 178 | /* FIXME Tweak this value. */ |
| 179 | #define CRYPTOCOP_DESCRIPTOR_POOL_SIZE (100) |
| 180 | static struct cryptocop_dma_desc descr_pool[CRYPTOCOP_DESCRIPTOR_POOL_SIZE]; |
| 181 | static struct cryptocop_dma_desc *descr_pool_free_list; |
| 182 | static int descr_pool_no_free; |
| 183 | static spinlock_t descr_pool_lock; |
| 184 | |
| 185 | /* Lock to stop cryptocop to start processing of a new operation. The holder |
| 186 | of this lock MUST call cryptocop_start_job() after it is unlocked. */ |
| 187 | spinlock_t cryptocop_process_lock; |
| 188 | |
| 189 | static struct cryptocop_prio_queue cryptocop_job_queues[cryptocop_prio_no_prios]; |
| 190 | static spinlock_t cryptocop_job_queue_lock; |
| 191 | static struct cryptocop_prio_job *cryptocop_running_job = NULL; |
| 192 | static spinlock_t running_job_lock; |
| 193 | |
| 194 | /* The interrupt handler appends completed jobs to this list. The scehduled |
| 195 | * tasklet removes them upon sending the response to the crypto consumer. */ |
| 196 | static struct list_head cryptocop_completed_jobs; |
| 197 | static spinlock_t cryptocop_completed_jobs_lock; |
| 198 | |
| 199 | DECLARE_WAIT_QUEUE_HEAD(cryptocop_ioc_process_wq); |
| 200 | |
| 201 | |
| 202 | /** Local functions. **/ |
| 203 | |
| 204 | static int cryptocop_open(struct inode *, struct file *); |
| 205 | |
| 206 | static int cryptocop_release(struct inode *, struct file *); |
| 207 | |
| 208 | static int cryptocop_ioctl(struct inode *inode, struct file *file, |
| 209 | unsigned int cmd, unsigned long arg); |
| 210 | |
| 211 | static void cryptocop_start_job(void); |
| 212 | |
| 213 | static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation); |
| 214 | static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation); |
| 215 | |
| 216 | static int cryptocop_job_queue_init(void); |
| 217 | static void cryptocop_job_queue_close(void); |
| 218 | |
| 219 | static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length); |
| 220 | |
| 221 | static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length); |
| 222 | |
| 223 | static int transform_ok(struct cryptocop_transform_init *tinit); |
| 224 | |
| 225 | static struct cryptocop_session *get_session(cryptocop_session_id sid); |
| 226 | |
| 227 | static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid); |
| 228 | |
| 229 | static void delete_internal_operation(struct cryptocop_int_operation *iop); |
| 230 | |
| 231 | static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned char *key, unsigned int keylength); |
| 232 | |
| 233 | static int init_stream_coprocessor(void); |
| 234 | |
| 235 | static void __exit exit_stream_coprocessor(void); |
| 236 | |
| 237 | /*#define LDEBUG*/ |
| 238 | #ifdef LDEBUG |
| 239 | #define DEBUG(s) s |
| 240 | #define DEBUG_API(s) s |
| 241 | static void print_cryptocop_operation(struct cryptocop_operation *cop); |
| 242 | static void print_dma_descriptors(struct cryptocop_int_operation *iop); |
| 243 | static void print_strcop_crypto_op(struct strcop_crypto_op *cop); |
| 244 | static void print_lock_status(void); |
| 245 | static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op); |
| 246 | #define assert(s) do{if (!(s)) panic(#s);} while(0); |
| 247 | #else |
| 248 | #define DEBUG(s) |
| 249 | #define DEBUG_API(s) |
| 250 | #define assert(s) |
| 251 | #endif |
| 252 | |
| 253 | |
| 254 | /* Transform constants. */ |
| 255 | #define DES_BLOCK_LENGTH (8) |
| 256 | #define AES_BLOCK_LENGTH (16) |
| 257 | #define MD5_BLOCK_LENGTH (64) |
| 258 | #define SHA1_BLOCK_LENGTH (64) |
| 259 | #define CSUM_BLOCK_LENGTH (2) |
| 260 | #define MD5_STATE_LENGTH (16) |
| 261 | #define SHA1_STATE_LENGTH (20) |
| 262 | |
| 263 | /* The device number. */ |
| 264 | #define CRYPTOCOP_MAJOR (254) |
| 265 | #define CRYPTOCOP_MINOR (0) |
| 266 | |
| 267 | |
| 268 | |
| 269 | struct file_operations cryptocop_fops = { |
| 270 | owner: THIS_MODULE, |
| 271 | open: cryptocop_open, |
| 272 | release: cryptocop_release, |
| 273 | ioctl: cryptocop_ioctl |
| 274 | }; |
| 275 | |
| 276 | |
| 277 | static void free_cdesc(struct cryptocop_dma_desc *cdesc) |
| 278 | { |
| 279 | DEBUG(printk("free_cdesc: cdesc 0x%p, from_pool=%d\n", cdesc, cdesc->from_pool)); |
Jesper Juhl | b2325fe | 2005-11-07 01:01:35 -0800 | [diff] [blame] | 280 | kfree(cdesc->free_buf); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 281 | |
| 282 | if (cdesc->from_pool) { |
| 283 | unsigned long int flags; |
| 284 | spin_lock_irqsave(&descr_pool_lock, flags); |
| 285 | cdesc->next = descr_pool_free_list; |
| 286 | descr_pool_free_list = cdesc; |
| 287 | ++descr_pool_no_free; |
| 288 | spin_unlock_irqrestore(&descr_pool_lock, flags); |
| 289 | } else { |
| 290 | kfree(cdesc); |
| 291 | } |
| 292 | } |
| 293 | |
| 294 | |
| 295 | static struct cryptocop_dma_desc *alloc_cdesc(int alloc_flag) |
| 296 | { |
| 297 | int use_pool = (alloc_flag & GFP_ATOMIC) ? 1 : 0; |
| 298 | struct cryptocop_dma_desc *cdesc; |
| 299 | |
| 300 | if (use_pool) { |
| 301 | unsigned long int flags; |
| 302 | spin_lock_irqsave(&descr_pool_lock, flags); |
| 303 | if (!descr_pool_free_list) { |
| 304 | spin_unlock_irqrestore(&descr_pool_lock, flags); |
| 305 | DEBUG_API(printk("alloc_cdesc: pool is empty\n")); |
| 306 | return NULL; |
| 307 | } |
| 308 | cdesc = descr_pool_free_list; |
| 309 | descr_pool_free_list = descr_pool_free_list->next; |
| 310 | --descr_pool_no_free; |
| 311 | spin_unlock_irqrestore(&descr_pool_lock, flags); |
| 312 | cdesc->from_pool = 1; |
| 313 | } else { |
| 314 | cdesc = kmalloc(sizeof(struct cryptocop_dma_desc), alloc_flag); |
| 315 | if (!cdesc) { |
| 316 | DEBUG_API(printk("alloc_cdesc: kmalloc\n")); |
| 317 | return NULL; |
| 318 | } |
| 319 | cdesc->from_pool = 0; |
| 320 | } |
| 321 | cdesc->dma_descr = (dma_descr_data*)(((unsigned long int)cdesc + offsetof(struct cryptocop_dma_desc, dma_descr_buf) + DESCR_ALLOC_PAD) & ~0x0000001F); |
| 322 | |
| 323 | cdesc->next = NULL; |
| 324 | |
| 325 | cdesc->free_buf = NULL; |
| 326 | cdesc->dma_descr->out_eop = 0; |
| 327 | cdesc->dma_descr->in_eop = 0; |
| 328 | cdesc->dma_descr->intr = 0; |
| 329 | cdesc->dma_descr->eol = 0; |
| 330 | cdesc->dma_descr->wait = 0; |
| 331 | cdesc->dma_descr->buf = NULL; |
| 332 | cdesc->dma_descr->after = NULL; |
| 333 | |
| 334 | DEBUG_API(printk("alloc_cdesc: return 0x%p, cdesc->dma_descr=0x%p, from_pool=%d\n", cdesc, cdesc->dma_descr, cdesc->from_pool)); |
| 335 | return cdesc; |
| 336 | } |
| 337 | |
| 338 | |
| 339 | static void setup_descr_chain(struct cryptocop_dma_desc *cd) |
| 340 | { |
| 341 | DEBUG(printk("setup_descr_chain: entering\n")); |
| 342 | while (cd) { |
| 343 | if (cd->next) { |
| 344 | cd->dma_descr->next = (dma_descr_data*)virt_to_phys(cd->next->dma_descr); |
| 345 | } else { |
| 346 | cd->dma_descr->next = NULL; |
| 347 | } |
| 348 | cd = cd->next; |
| 349 | } |
| 350 | DEBUG(printk("setup_descr_chain: exit\n")); |
| 351 | } |
| 352 | |
| 353 | |
| 354 | /* Create a pad descriptor for the transform. |
| 355 | * Return -1 for error, 0 if pad created. */ |
| 356 | static int create_pad_descriptor(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **pad_desc, int alloc_flag) |
| 357 | { |
| 358 | struct cryptocop_dma_desc *cdesc = NULL; |
| 359 | int error = 0; |
| 360 | struct strcop_meta_out mo = { |
| 361 | .ciphsel = src_none, |
| 362 | .hashsel = src_none, |
| 363 | .csumsel = src_none |
| 364 | }; |
| 365 | char *pad; |
| 366 | size_t plen; |
| 367 | |
| 368 | DEBUG(printk("create_pad_descriptor: start.\n")); |
| 369 | /* Setup pad descriptor. */ |
| 370 | |
| 371 | DEBUG(printk("create_pad_descriptor: setting up padding.\n")); |
| 372 | cdesc = alloc_cdesc(alloc_flag); |
| 373 | if (!cdesc){ |
| 374 | DEBUG_API(printk("create_pad_descriptor: alloc pad desc\n")); |
| 375 | goto error_cleanup; |
| 376 | } |
| 377 | switch (tc->unit_no) { |
| 378 | case src_md5: |
| 379 | error = create_md5_pad(alloc_flag, tc->consumed, &pad, &plen); |
| 380 | if (error){ |
| 381 | DEBUG_API(printk("create_pad_descriptor: create_md5_pad_failed\n")); |
| 382 | goto error_cleanup; |
| 383 | } |
| 384 | cdesc->free_buf = pad; |
| 385 | mo.hashsel = src_dma; |
| 386 | mo.hashconf = tc->hash_conf; |
| 387 | mo.hashmode = tc->hash_mode; |
| 388 | break; |
| 389 | case src_sha1: |
| 390 | error = create_sha1_pad(alloc_flag, tc->consumed, &pad, &plen); |
| 391 | if (error){ |
| 392 | DEBUG_API(printk("create_pad_descriptor: create_sha1_pad_failed\n")); |
| 393 | goto error_cleanup; |
| 394 | } |
| 395 | cdesc->free_buf = pad; |
| 396 | mo.hashsel = src_dma; |
| 397 | mo.hashconf = tc->hash_conf; |
| 398 | mo.hashmode = tc->hash_mode; |
| 399 | break; |
| 400 | case src_csum: |
| 401 | if (tc->consumed % tc->blocklength){ |
| 402 | pad = (char*)csum_zero_pad; |
| 403 | plen = 1; |
| 404 | } else { |
| 405 | pad = (char*)cdesc; /* Use any pointer. */ |
| 406 | plen = 0; |
| 407 | } |
| 408 | mo.csumsel = src_dma; |
| 409 | break; |
| 410 | } |
| 411 | cdesc->dma_descr->wait = 1; |
| 412 | cdesc->dma_descr->out_eop = 1; /* Since this is a pad output is pushed. EOP is ok here since the padded unit is the only one active. */ |
| 413 | cdesc->dma_descr->buf = (char*)virt_to_phys((char*)pad); |
| 414 | cdesc->dma_descr->after = cdesc->dma_descr->buf + plen; |
| 415 | |
| 416 | cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo); |
| 417 | *pad_desc = cdesc; |
| 418 | |
| 419 | return 0; |
| 420 | |
| 421 | error_cleanup: |
| 422 | if (cdesc) free_cdesc(cdesc); |
| 423 | return -1; |
| 424 | } |
| 425 | |
| 426 | |
| 427 | static int setup_key_dl_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **kd, int alloc_flag) |
| 428 | { |
| 429 | struct cryptocop_dma_desc *key_desc = alloc_cdesc(alloc_flag); |
| 430 | struct strcop_meta_out mo = {0}; |
| 431 | |
| 432 | DEBUG(printk("setup_key_dl_desc\n")); |
| 433 | |
| 434 | if (!key_desc) { |
| 435 | DEBUG_API(printk("setup_key_dl_desc: failed descriptor allocation.\n")); |
| 436 | return -ENOMEM; |
| 437 | } |
| 438 | |
| 439 | /* Download key. */ |
| 440 | if ((tc->tctx->init.alg == cryptocop_alg_aes) && (tc->tcfg->flags & CRYPTOCOP_DECRYPT)) { |
| 441 | /* Precook the AES decrypt key. */ |
| 442 | if (!tc->tctx->dec_key_set){ |
| 443 | get_aes_decrypt_key(tc->tctx->dec_key, tc->tctx->init.key, tc->tctx->init.keylen); |
| 444 | tc->tctx->dec_key_set = 1; |
| 445 | } |
| 446 | key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->dec_key); |
| 447 | key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8; |
| 448 | } else { |
| 449 | key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->init.key); |
| 450 | key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8; |
| 451 | } |
| 452 | /* Setup metadata. */ |
| 453 | mo.dlkey = 1; |
| 454 | switch (tc->tctx->init.keylen) { |
| 455 | case 64: |
| 456 | mo.decrypt = 0; |
| 457 | mo.hashmode = 0; |
| 458 | break; |
| 459 | case 128: |
| 460 | mo.decrypt = 0; |
| 461 | mo.hashmode = 1; |
| 462 | break; |
| 463 | case 192: |
| 464 | mo.decrypt = 1; |
| 465 | mo.hashmode = 0; |
| 466 | break; |
| 467 | case 256: |
| 468 | mo.decrypt = 1; |
| 469 | mo.hashmode = 1; |
| 470 | break; |
| 471 | default: |
| 472 | break; |
| 473 | } |
| 474 | mo.ciphsel = mo.hashsel = mo.csumsel = src_none; |
| 475 | key_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo); |
| 476 | |
| 477 | key_desc->dma_descr->out_eop = 1; |
| 478 | key_desc->dma_descr->wait = 1; |
| 479 | key_desc->dma_descr->intr = 0; |
| 480 | |
| 481 | *kd = key_desc; |
| 482 | return 0; |
| 483 | } |
| 484 | |
| 485 | static int setup_cipher_iv_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag) |
| 486 | { |
| 487 | struct cryptocop_dma_desc *iv_desc = alloc_cdesc(alloc_flag); |
| 488 | struct strcop_meta_out mo = {0}; |
| 489 | |
| 490 | DEBUG(printk("setup_cipher_iv_desc\n")); |
| 491 | |
| 492 | if (!iv_desc) { |
| 493 | DEBUG_API(printk("setup_cipher_iv_desc: failed CBC IV descriptor allocation.\n")); |
| 494 | return -ENOMEM; |
| 495 | } |
| 496 | /* Download IV. */ |
| 497 | iv_desc->dma_descr->buf = (char*)virt_to_phys(tc->tcfg->iv); |
| 498 | iv_desc->dma_descr->after = iv_desc->dma_descr->buf + tc->blocklength; |
| 499 | |
| 500 | /* Setup metadata. */ |
| 501 | mo.hashsel = mo.csumsel = src_none; |
| 502 | mo.ciphsel = src_dma; |
| 503 | mo.ciphconf = tc->ciph_conf; |
| 504 | mo.cbcmode = tc->cbcmode; |
| 505 | |
| 506 | iv_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo); |
| 507 | |
| 508 | iv_desc->dma_descr->out_eop = 0; |
| 509 | iv_desc->dma_descr->wait = 1; |
| 510 | iv_desc->dma_descr->intr = 0; |
| 511 | |
| 512 | *id = iv_desc; |
| 513 | return 0; |
| 514 | } |
| 515 | |
| 516 | /* Map the ouput length of the transform to operation output starting on the inject index. */ |
| 517 | static int create_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag) |
| 518 | { |
| 519 | int err = 0; |
| 520 | struct cryptocop_dma_desc head = {0}; |
| 521 | struct cryptocop_dma_desc *outdesc = &head; |
| 522 | size_t iov_offset = 0; |
| 523 | size_t out_ix = 0; |
| 524 | int outiov_ix = 0; |
| 525 | struct strcop_meta_in mi = {0}; |
| 526 | |
| 527 | size_t out_length = tc->produced; |
| 528 | int rem_length; |
| 529 | int dlength; |
| 530 | |
| 531 | assert(out_length != 0); |
| 532 | if (((tc->produced + tc->tcfg->inject_ix) > operation->tfrm_op.outlen) || (tc->produced && (operation->tfrm_op.outlen == 0))) { |
| 533 | DEBUG_API(printk("create_input_descriptors: operation outdata too small\n")); |
| 534 | return -EINVAL; |
| 535 | } |
| 536 | /* Traverse the out iovec until the result inject index is reached. */ |
| 537 | while ((outiov_ix < operation->tfrm_op.outcount) && ((out_ix + operation->tfrm_op.outdata[outiov_ix].iov_len) <= tc->tcfg->inject_ix)){ |
| 538 | out_ix += operation->tfrm_op.outdata[outiov_ix].iov_len; |
| 539 | outiov_ix++; |
| 540 | } |
| 541 | if (outiov_ix >= operation->tfrm_op.outcount){ |
| 542 | DEBUG_API(printk("create_input_descriptors: operation outdata too small\n")); |
| 543 | return -EINVAL; |
| 544 | } |
| 545 | iov_offset = tc->tcfg->inject_ix - out_ix; |
| 546 | mi.dmasel = tc->unit_no; |
| 547 | |
| 548 | /* Setup the output descriptors. */ |
| 549 | while ((out_length > 0) && (outiov_ix < operation->tfrm_op.outcount)) { |
| 550 | outdesc->next = alloc_cdesc(alloc_flag); |
| 551 | if (!outdesc->next) { |
| 552 | DEBUG_API(printk("create_input_descriptors: alloc_cdesc\n")); |
| 553 | err = -ENOMEM; |
| 554 | goto error_cleanup; |
| 555 | } |
| 556 | outdesc = outdesc->next; |
| 557 | rem_length = operation->tfrm_op.outdata[outiov_ix].iov_len - iov_offset; |
| 558 | dlength = (out_length < rem_length) ? out_length : rem_length; |
| 559 | |
| 560 | DEBUG(printk("create_input_descriptors:\n" |
| 561 | "outiov_ix=%d, rem_length=%d, dlength=%d\n" |
| 562 | "iov_offset=%d, outdata[outiov_ix].iov_len=%d\n" |
| 563 | "outcount=%d, outiov_ix=%d\n", |
| 564 | outiov_ix, rem_length, dlength, iov_offset, operation->tfrm_op.outdata[outiov_ix].iov_len, operation->tfrm_op.outcount, outiov_ix)); |
| 565 | |
| 566 | outdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.outdata[outiov_ix].iov_base + iov_offset); |
| 567 | outdesc->dma_descr->after = outdesc->dma_descr->buf + dlength; |
| 568 | outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi); |
| 569 | |
| 570 | out_length -= dlength; |
| 571 | iov_offset += dlength; |
| 572 | if (iov_offset >= operation->tfrm_op.outdata[outiov_ix].iov_len) { |
| 573 | iov_offset = 0; |
| 574 | ++outiov_ix; |
| 575 | } |
| 576 | } |
| 577 | if (out_length > 0){ |
| 578 | DEBUG_API(printk("create_input_descriptors: not enough room for output, %d remained\n", out_length)); |
| 579 | err = -EINVAL; |
| 580 | goto error_cleanup; |
| 581 | } |
| 582 | /* Set sync in last descriptor. */ |
| 583 | mi.sync = 1; |
| 584 | outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi); |
| 585 | |
| 586 | *id = head.next; |
| 587 | return 0; |
| 588 | |
| 589 | error_cleanup: |
| 590 | while (head.next) { |
| 591 | outdesc = head.next->next; |
| 592 | free_cdesc(head.next); |
| 593 | head.next = outdesc; |
| 594 | } |
| 595 | return err; |
| 596 | } |
| 597 | |
| 598 | |
| 599 | static int create_output_descriptors(struct cryptocop_operation *operation, int *iniov_ix, int *iniov_offset, size_t desc_len, struct cryptocop_dma_desc **current_out_cdesc, struct strcop_meta_out *meta_out, int alloc_flag) |
| 600 | { |
| 601 | while (desc_len != 0) { |
| 602 | struct cryptocop_dma_desc *cdesc; |
| 603 | int rem_length = operation->tfrm_op.indata[*iniov_ix].iov_len - *iniov_offset; |
| 604 | int dlength = (desc_len < rem_length) ? desc_len : rem_length; |
| 605 | |
| 606 | cdesc = alloc_cdesc(alloc_flag); |
| 607 | if (!cdesc) { |
| 608 | DEBUG_API(printk("create_output_descriptors: alloc_cdesc\n")); |
| 609 | return -ENOMEM; |
| 610 | } |
| 611 | (*current_out_cdesc)->next = cdesc; |
| 612 | (*current_out_cdesc) = cdesc; |
| 613 | |
| 614 | cdesc->free_buf = NULL; |
| 615 | |
| 616 | cdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.indata[*iniov_ix].iov_base + *iniov_offset); |
| 617 | cdesc->dma_descr->after = cdesc->dma_descr->buf + dlength; |
| 618 | |
| 619 | desc_len -= dlength; |
| 620 | *iniov_offset += dlength; |
| 621 | assert(desc_len >= 0); |
| 622 | if (*iniov_offset >= operation->tfrm_op.indata[*iniov_ix].iov_len) { |
| 623 | *iniov_offset = 0; |
| 624 | ++(*iniov_ix); |
| 625 | if (*iniov_ix > operation->tfrm_op.incount) { |
| 626 | DEBUG_API(printk("create_output_descriptors: not enough indata in operation.")); |
| 627 | return -EINVAL; |
| 628 | } |
| 629 | } |
| 630 | cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, (*meta_out)); |
| 631 | } /* while (desc_len != 0) */ |
| 632 | /* Last DMA descriptor gets a 'wait' bit to signal expected change in metadata. */ |
| 633 | (*current_out_cdesc)->dma_descr->wait = 1; /* This will set extraneous WAIT in some situations, e.g. when padding hashes and checksums. */ |
| 634 | |
| 635 | return 0; |
| 636 | } |
| 637 | |
| 638 | |
| 639 | static int append_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_dma_desc **current_in_cdesc, struct cryptocop_dma_desc **current_out_cdesc, struct cryptocop_tfrm_ctx *tc, int alloc_flag) |
| 640 | { |
| 641 | DEBUG(printk("append_input_descriptors, tc=0x%p, unit_no=%d\n", tc, tc->unit_no)); |
| 642 | if (tc->tcfg) { |
| 643 | int failed = 0; |
| 644 | struct cryptocop_dma_desc *idescs = NULL; |
| 645 | DEBUG(printk("append_input_descriptors: pushing output, consumed %d produced %d bytes.\n", tc->consumed, tc->produced)); |
| 646 | if (tc->pad_descs) { |
| 647 | DEBUG(printk("append_input_descriptors: append pad descriptors to DMA out list.\n")); |
| 648 | while (tc->pad_descs) { |
| 649 | DEBUG(printk("append descriptor 0x%p\n", tc->pad_descs)); |
| 650 | (*current_out_cdesc)->next = tc->pad_descs; |
| 651 | tc->pad_descs = tc->pad_descs->next; |
| 652 | (*current_out_cdesc) = (*current_out_cdesc)->next; |
| 653 | } |
| 654 | } |
| 655 | |
| 656 | /* Setup and append output descriptors to DMA in list. */ |
| 657 | if (tc->unit_no == src_dma){ |
| 658 | /* mem2mem. Setup DMA in descriptors to discard all input prior to the requested mem2mem data. */ |
| 659 | struct strcop_meta_in mi = {.sync = 0, .dmasel = src_dma}; |
| 660 | unsigned int start_ix = tc->start_ix; |
| 661 | while (start_ix){ |
| 662 | unsigned int desclen = start_ix < MEM2MEM_DISCARD_BUF_LENGTH ? start_ix : MEM2MEM_DISCARD_BUF_LENGTH; |
| 663 | (*current_in_cdesc)->next = alloc_cdesc(alloc_flag); |
| 664 | if (!(*current_in_cdesc)->next){ |
| 665 | DEBUG_API(printk("append_input_descriptors: alloc_cdesc mem2mem discard failed\n")); |
| 666 | return -ENOMEM; |
| 667 | } |
| 668 | (*current_in_cdesc) = (*current_in_cdesc)->next; |
| 669 | (*current_in_cdesc)->dma_descr->buf = (char*)virt_to_phys(mem2mem_discard_buf); |
| 670 | (*current_in_cdesc)->dma_descr->after = (*current_in_cdesc)->dma_descr->buf + desclen; |
| 671 | (*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi); |
| 672 | start_ix -= desclen; |
| 673 | } |
| 674 | mi.sync = 1; |
| 675 | (*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi); |
| 676 | } |
| 677 | |
| 678 | failed = create_input_descriptors(operation, tc, &idescs, alloc_flag); |
| 679 | if (failed){ |
| 680 | DEBUG_API(printk("append_input_descriptors: output descriptor setup failed\n")); |
| 681 | return failed; |
| 682 | } |
| 683 | DEBUG(printk("append_input_descriptors: append output descriptors to DMA in list.\n")); |
| 684 | while (idescs) { |
| 685 | DEBUG(printk("append descriptor 0x%p\n", idescs)); |
| 686 | (*current_in_cdesc)->next = idescs; |
| 687 | idescs = idescs->next; |
| 688 | (*current_in_cdesc) = (*current_in_cdesc)->next; |
| 689 | } |
| 690 | } |
| 691 | return 0; |
| 692 | } |
| 693 | |
| 694 | |
| 695 | |
| 696 | static int cryptocop_setup_dma_list(struct cryptocop_operation *operation, struct cryptocop_int_operation **int_op, int alloc_flag) |
| 697 | { |
| 698 | struct cryptocop_session *sess; |
| 699 | struct cryptocop_transform_ctx *tctx; |
| 700 | |
| 701 | struct cryptocop_tfrm_ctx digest_ctx = { |
| 702 | .previous_src = src_none, |
| 703 | .current_src = src_none, |
| 704 | .start_ix = 0, |
| 705 | .requires_padding = 1, |
| 706 | .strict_block_length = 0, |
| 707 | .hash_conf = 0, |
| 708 | .hash_mode = 0, |
| 709 | .ciph_conf = 0, |
| 710 | .cbcmode = 0, |
| 711 | .decrypt = 0, |
| 712 | .consumed = 0, |
| 713 | .produced = 0, |
| 714 | .pad_descs = NULL, |
| 715 | .active = 0, |
| 716 | .done = 0, |
| 717 | .prev_src = NULL, |
| 718 | .curr_src = NULL, |
| 719 | .tcfg = NULL}; |
| 720 | struct cryptocop_tfrm_ctx cipher_ctx = { |
| 721 | .previous_src = src_none, |
| 722 | .current_src = src_none, |
| 723 | .start_ix = 0, |
| 724 | .requires_padding = 0, |
| 725 | .strict_block_length = 1, |
| 726 | .hash_conf = 0, |
| 727 | .hash_mode = 0, |
| 728 | .ciph_conf = 0, |
| 729 | .cbcmode = 0, |
| 730 | .decrypt = 0, |
| 731 | .consumed = 0, |
| 732 | .produced = 0, |
| 733 | .pad_descs = NULL, |
| 734 | .active = 0, |
| 735 | .done = 0, |
| 736 | .prev_src = NULL, |
| 737 | .curr_src = NULL, |
| 738 | .tcfg = NULL}; |
| 739 | struct cryptocop_tfrm_ctx csum_ctx = { |
| 740 | .previous_src = src_none, |
| 741 | .current_src = src_none, |
| 742 | .start_ix = 0, |
| 743 | .blocklength = 2, |
| 744 | .requires_padding = 1, |
| 745 | .strict_block_length = 0, |
| 746 | .hash_conf = 0, |
| 747 | .hash_mode = 0, |
| 748 | .ciph_conf = 0, |
| 749 | .cbcmode = 0, |
| 750 | .decrypt = 0, |
| 751 | .consumed = 0, |
| 752 | .produced = 0, |
| 753 | .pad_descs = NULL, |
| 754 | .active = 0, |
| 755 | .done = 0, |
| 756 | .tcfg = NULL, |
| 757 | .prev_src = NULL, |
| 758 | .curr_src = NULL, |
| 759 | .unit_no = src_csum}; |
| 760 | struct cryptocop_tfrm_cfg *tcfg = operation->tfrm_op.tfrm_cfg; |
| 761 | |
| 762 | unsigned int indata_ix = 0; |
| 763 | |
| 764 | /* iovec accounting. */ |
| 765 | int iniov_ix = 0; |
| 766 | int iniov_offset = 0; |
| 767 | |
| 768 | /* Operation descriptor cfg traversal pointer. */ |
| 769 | struct cryptocop_desc *odsc; |
| 770 | |
| 771 | int failed = 0; |
| 772 | /* List heads for allocated descriptors. */ |
| 773 | struct cryptocop_dma_desc out_cdesc_head = {0}; |
| 774 | struct cryptocop_dma_desc in_cdesc_head = {0}; |
| 775 | |
| 776 | struct cryptocop_dma_desc *current_out_cdesc = &out_cdesc_head; |
| 777 | struct cryptocop_dma_desc *current_in_cdesc = &in_cdesc_head; |
| 778 | |
| 779 | struct cryptocop_tfrm_ctx *output_tc = NULL; |
| 780 | void *iop_alloc_ptr; |
| 781 | |
| 782 | assert(operation != NULL); |
| 783 | assert(int_op != NULL); |
| 784 | |
| 785 | DEBUG(printk("cryptocop_setup_dma_list: start\n")); |
| 786 | DEBUG(print_cryptocop_operation(operation)); |
| 787 | |
| 788 | sess = get_session(operation->sid); |
| 789 | if (!sess) { |
| 790 | DEBUG_API(printk("cryptocop_setup_dma_list: no session found for operation.\n")); |
| 791 | failed = -EINVAL; |
| 792 | goto error_cleanup; |
| 793 | } |
| 794 | iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag); |
| 795 | if (!iop_alloc_ptr) { |
| 796 | DEBUG_API(printk("cryptocop_setup_dma_list: kmalloc cryptocop_int_operation\n")); |
| 797 | failed = -ENOMEM; |
| 798 | goto error_cleanup; |
| 799 | } |
| 800 | (*int_op) = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out)); |
| 801 | DEBUG(memset((*int_op), 0xff, sizeof(struct cryptocop_int_operation))); |
| 802 | (*int_op)->alloc_ptr = iop_alloc_ptr; |
| 803 | DEBUG(printk("cryptocop_setup_dma_list: *int_op=0x%p, alloc_ptr=0x%p\n", *int_op, (*int_op)->alloc_ptr)); |
| 804 | |
| 805 | (*int_op)->sid = operation->sid; |
| 806 | (*int_op)->cdesc_out = NULL; |
| 807 | (*int_op)->cdesc_in = NULL; |
| 808 | (*int_op)->tdes_mode = cryptocop_3des_ede; |
| 809 | (*int_op)->csum_mode = cryptocop_csum_le; |
| 810 | (*int_op)->ddesc_out = NULL; |
| 811 | (*int_op)->ddesc_in = NULL; |
| 812 | |
| 813 | /* Scan operation->tfrm_op.tfrm_cfg for bad configuration and set up the local contexts. */ |
| 814 | if (!tcfg) { |
| 815 | DEBUG_API(printk("cryptocop_setup_dma_list: no configured transforms in operation.\n")); |
| 816 | failed = -EINVAL; |
| 817 | goto error_cleanup; |
| 818 | } |
| 819 | while (tcfg) { |
| 820 | tctx = get_transform_ctx(sess, tcfg->tid); |
| 821 | if (!tctx) { |
| 822 | DEBUG_API(printk("cryptocop_setup_dma_list: no transform id %d in session.\n", tcfg->tid)); |
| 823 | failed = -EINVAL; |
| 824 | goto error_cleanup; |
| 825 | } |
| 826 | if (tcfg->inject_ix > operation->tfrm_op.outlen){ |
| 827 | DEBUG_API(printk("cryptocop_setup_dma_list: transform id %d inject_ix (%d) > operation->tfrm_op.outlen(%d)", tcfg->tid, tcfg->inject_ix, operation->tfrm_op.outlen)); |
| 828 | failed = -EINVAL; |
| 829 | goto error_cleanup; |
| 830 | } |
| 831 | switch (tctx->init.alg){ |
| 832 | case cryptocop_alg_mem2mem: |
| 833 | if (cipher_ctx.tcfg != NULL){ |
| 834 | DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n")); |
| 835 | failed = -EINVAL; |
| 836 | goto error_cleanup; |
| 837 | } |
| 838 | /* mem2mem is handled as a NULL cipher. */ |
| 839 | cipher_ctx.cbcmode = 0; |
| 840 | cipher_ctx.decrypt = 0; |
| 841 | cipher_ctx.blocklength = 1; |
| 842 | cipher_ctx.ciph_conf = 0; |
| 843 | cipher_ctx.unit_no = src_dma; |
| 844 | cipher_ctx.tcfg = tcfg; |
| 845 | cipher_ctx.tctx = tctx; |
| 846 | break; |
| 847 | case cryptocop_alg_des: |
| 848 | case cryptocop_alg_3des: |
| 849 | case cryptocop_alg_aes: |
| 850 | /* cipher */ |
| 851 | if (cipher_ctx.tcfg != NULL){ |
| 852 | DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n")); |
| 853 | failed = -EINVAL; |
| 854 | goto error_cleanup; |
| 855 | } |
| 856 | cipher_ctx.tcfg = tcfg; |
| 857 | cipher_ctx.tctx = tctx; |
| 858 | if (cipher_ctx.tcfg->flags & CRYPTOCOP_DECRYPT){ |
| 859 | cipher_ctx.decrypt = 1; |
| 860 | } |
| 861 | switch (tctx->init.cipher_mode) { |
| 862 | case cryptocop_cipher_mode_ecb: |
| 863 | cipher_ctx.cbcmode = 0; |
| 864 | break; |
| 865 | case cryptocop_cipher_mode_cbc: |
| 866 | cipher_ctx.cbcmode = 1; |
| 867 | break; |
| 868 | default: |
| 869 | DEBUG_API(printk("cryptocop_setup_dma_list: cipher_ctx, bad cipher mode==%d\n", tctx->init.cipher_mode)); |
| 870 | failed = -EINVAL; |
| 871 | goto error_cleanup; |
| 872 | } |
| 873 | DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx, set CBC mode==%d\n", cipher_ctx.cbcmode)); |
| 874 | switch (tctx->init.alg){ |
| 875 | case cryptocop_alg_des: |
| 876 | cipher_ctx.ciph_conf = 0; |
| 877 | cipher_ctx.unit_no = src_des; |
| 878 | cipher_ctx.blocklength = DES_BLOCK_LENGTH; |
| 879 | break; |
| 880 | case cryptocop_alg_3des: |
| 881 | cipher_ctx.ciph_conf = 1; |
| 882 | cipher_ctx.unit_no = src_des; |
| 883 | cipher_ctx.blocklength = DES_BLOCK_LENGTH; |
| 884 | break; |
| 885 | case cryptocop_alg_aes: |
| 886 | cipher_ctx.ciph_conf = 2; |
| 887 | cipher_ctx.unit_no = src_aes; |
| 888 | cipher_ctx.blocklength = AES_BLOCK_LENGTH; |
| 889 | break; |
| 890 | default: |
| 891 | panic("cryptocop_setup_dma_list: impossible algorithm %d\n", tctx->init.alg); |
| 892 | } |
| 893 | (*int_op)->tdes_mode = tctx->init.tdes_mode; |
| 894 | break; |
| 895 | case cryptocop_alg_md5: |
| 896 | case cryptocop_alg_sha1: |
| 897 | /* digest */ |
| 898 | if (digest_ctx.tcfg != NULL){ |
| 899 | DEBUG_API(printk("cryptocop_setup_dma_list: multiple digests in operation.\n")); |
| 900 | failed = -EINVAL; |
| 901 | goto error_cleanup; |
| 902 | } |
| 903 | digest_ctx.tcfg = tcfg; |
| 904 | digest_ctx.tctx = tctx; |
| 905 | digest_ctx.hash_mode = 0; /* Don't use explicit IV in this API. */ |
| 906 | switch (tctx->init.alg){ |
| 907 | case cryptocop_alg_md5: |
| 908 | digest_ctx.blocklength = MD5_BLOCK_LENGTH; |
| 909 | digest_ctx.unit_no = src_md5; |
| 910 | digest_ctx.hash_conf = 1; /* 1 => MD-5 */ |
| 911 | break; |
| 912 | case cryptocop_alg_sha1: |
| 913 | digest_ctx.blocklength = SHA1_BLOCK_LENGTH; |
| 914 | digest_ctx.unit_no = src_sha1; |
| 915 | digest_ctx.hash_conf = 0; /* 0 => SHA-1 */ |
| 916 | break; |
| 917 | default: |
| 918 | panic("cryptocop_setup_dma_list: impossible digest algorithm\n"); |
| 919 | } |
| 920 | break; |
| 921 | case cryptocop_alg_csum: |
| 922 | /* digest */ |
| 923 | if (csum_ctx.tcfg != NULL){ |
| 924 | DEBUG_API(printk("cryptocop_setup_dma_list: multiple checksums in operation.\n")); |
| 925 | failed = -EINVAL; |
| 926 | goto error_cleanup; |
| 927 | } |
| 928 | (*int_op)->csum_mode = tctx->init.csum_mode; |
| 929 | csum_ctx.tcfg = tcfg; |
| 930 | csum_ctx.tctx = tctx; |
| 931 | break; |
| 932 | default: |
| 933 | /* no algorithm. */ |
| 934 | DEBUG_API(printk("cryptocop_setup_dma_list: invalid algorithm %d specified in tfrm %d.\n", tctx->init.alg, tcfg->tid)); |
| 935 | failed = -EINVAL; |
| 936 | goto error_cleanup; |
| 937 | } |
| 938 | tcfg = tcfg->next; |
| 939 | } |
| 940 | /* Download key if a cipher is used. */ |
| 941 | if (cipher_ctx.tcfg && (cipher_ctx.tctx->init.alg != cryptocop_alg_mem2mem)){ |
| 942 | struct cryptocop_dma_desc *key_desc = NULL; |
| 943 | |
| 944 | failed = setup_key_dl_desc(&cipher_ctx, &key_desc, alloc_flag); |
| 945 | if (failed) { |
| 946 | DEBUG_API(printk("cryptocop_setup_dma_list: setup key dl\n")); |
| 947 | goto error_cleanup; |
| 948 | } |
| 949 | current_out_cdesc->next = key_desc; |
| 950 | current_out_cdesc = key_desc; |
| 951 | indata_ix += (unsigned int)(key_desc->dma_descr->after - key_desc->dma_descr->buf); |
| 952 | |
| 953 | /* Download explicit IV if a cipher is used and CBC mode and explicit IV selected. */ |
| 954 | if ((cipher_ctx.tctx->init.cipher_mode == cryptocop_cipher_mode_cbc) && (cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV)) { |
| 955 | struct cryptocop_dma_desc *iv_desc = NULL; |
| 956 | |
| 957 | DEBUG(printk("cryptocop_setup_dma_list: setup cipher CBC IV descriptor.\n")); |
| 958 | |
| 959 | failed = setup_cipher_iv_desc(&cipher_ctx, &iv_desc, alloc_flag); |
| 960 | if (failed) { |
| 961 | DEBUG_API(printk("cryptocop_setup_dma_list: CBC IV descriptor.\n")); |
| 962 | goto error_cleanup; |
| 963 | } |
| 964 | current_out_cdesc->next = iv_desc; |
| 965 | current_out_cdesc = iv_desc; |
| 966 | indata_ix += (unsigned int)(iv_desc->dma_descr->after - iv_desc->dma_descr->buf); |
| 967 | } |
| 968 | } |
| 969 | |
| 970 | /* Process descriptors. */ |
| 971 | odsc = operation->tfrm_op.desc; |
| 972 | while (odsc) { |
| 973 | struct cryptocop_desc_cfg *dcfg = odsc->cfg; |
| 974 | struct strcop_meta_out meta_out = {0}; |
| 975 | size_t desc_len = odsc->length; |
| 976 | int active_count, eop_needed_count; |
| 977 | |
| 978 | output_tc = NULL; |
| 979 | |
| 980 | DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor\n")); |
| 981 | |
| 982 | while (dcfg) { |
| 983 | struct cryptocop_tfrm_ctx *tc = NULL; |
| 984 | |
| 985 | DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor configuration.\n")); |
| 986 | /* Get the local context for the transform and mark it as the output unit if it produces output. */ |
| 987 | if (digest_ctx.tcfg && (digest_ctx.tcfg->tid == dcfg->tid)){ |
| 988 | tc = &digest_ctx; |
| 989 | } else if (cipher_ctx.tcfg && (cipher_ctx.tcfg->tid == dcfg->tid)){ |
| 990 | tc = &cipher_ctx; |
| 991 | } else if (csum_ctx.tcfg && (csum_ctx.tcfg->tid == dcfg->tid)){ |
| 992 | tc = &csum_ctx; |
| 993 | } |
| 994 | if (!tc) { |
| 995 | DEBUG_API(printk("cryptocop_setup_dma_list: invalid transform %d specified in descriptor.\n", dcfg->tid)); |
| 996 | failed = -EINVAL; |
| 997 | goto error_cleanup; |
| 998 | } |
| 999 | if (tc->done) { |
| 1000 | DEBUG_API(printk("cryptocop_setup_dma_list: completed transform %d reused.\n", dcfg->tid)); |
| 1001 | failed = -EINVAL; |
| 1002 | goto error_cleanup; |
| 1003 | } |
| 1004 | if (!tc->active) { |
| 1005 | tc->start_ix = indata_ix; |
| 1006 | tc->active = 1; |
| 1007 | } |
| 1008 | |
| 1009 | tc->previous_src = tc->current_src; |
| 1010 | tc->prev_src = tc->curr_src; |
| 1011 | /* Map source unit id to DMA source config. */ |
| 1012 | switch (dcfg->src){ |
| 1013 | case cryptocop_source_dma: |
| 1014 | tc->current_src = src_dma; |
| 1015 | break; |
| 1016 | case cryptocop_source_des: |
| 1017 | tc->current_src = src_des; |
| 1018 | break; |
| 1019 | case cryptocop_source_3des: |
| 1020 | tc->current_src = src_des; |
| 1021 | break; |
| 1022 | case cryptocop_source_aes: |
| 1023 | tc->current_src = src_aes; |
| 1024 | break; |
| 1025 | case cryptocop_source_md5: |
| 1026 | case cryptocop_source_sha1: |
| 1027 | case cryptocop_source_csum: |
| 1028 | case cryptocop_source_none: |
| 1029 | default: |
| 1030 | /* We do not allow using accumulating style units (SHA-1, MD5, checksum) as sources to other units. |
| 1031 | */ |
| 1032 | DEBUG_API(printk("cryptocop_setup_dma_list: bad unit source configured %d.\n", dcfg->src)); |
| 1033 | failed = -EINVAL; |
| 1034 | goto error_cleanup; |
| 1035 | } |
| 1036 | if (tc->current_src != src_dma) { |
| 1037 | /* Find the unit we are sourcing from. */ |
| 1038 | if (digest_ctx.unit_no == tc->current_src){ |
| 1039 | tc->curr_src = &digest_ctx; |
| 1040 | } else if (cipher_ctx.unit_no == tc->current_src){ |
| 1041 | tc->curr_src = &cipher_ctx; |
| 1042 | } else if (csum_ctx.unit_no == tc->current_src){ |
| 1043 | tc->curr_src = &csum_ctx; |
| 1044 | } |
| 1045 | if ((tc->curr_src == tc) && (tc->unit_no != src_dma)){ |
| 1046 | DEBUG_API(printk("cryptocop_setup_dma_list: unit %d configured to source from itself.\n", tc->unit_no)); |
| 1047 | failed = -EINVAL; |
| 1048 | goto error_cleanup; |
| 1049 | } |
| 1050 | } else { |
| 1051 | tc->curr_src = NULL; |
| 1052 | } |
| 1053 | |
| 1054 | /* Detect source switch. */ |
| 1055 | DEBUG(printk("cryptocop_setup_dma_list: tc->active=%d tc->unit_no=%d tc->current_src=%d tc->previous_src=%d, tc->curr_src=0x%p, tc->prev_srv=0x%p\n", tc->active, tc->unit_no, tc->current_src, tc->previous_src, tc->curr_src, tc->prev_src)); |
| 1056 | if (tc->active && (tc->current_src != tc->previous_src)) { |
| 1057 | /* Only allow source switch when both the old source unit and the new one have |
| 1058 | * no pending data to process (i.e. the consumed length must be a multiple of the |
| 1059 | * transform blocklength). */ |
| 1060 | /* Note: if the src == NULL we are actually sourcing from DMA out. */ |
| 1061 | if (((tc->prev_src != NULL) && (tc->prev_src->consumed % tc->prev_src->blocklength)) || |
| 1062 | ((tc->curr_src != NULL) && (tc->curr_src->consumed % tc->curr_src->blocklength))) |
| 1063 | { |
| 1064 | DEBUG_API(printk("cryptocop_setup_dma_list: can only disconnect from or connect to a unit on a multiple of the blocklength, old: cons=%d, prod=%d, block=%d, new: cons=%d prod=%d, block=%d.\n", tc->prev_src ? tc->prev_src->consumed : INT_MIN, tc->prev_src ? tc->prev_src->produced : INT_MIN, tc->prev_src ? tc->prev_src->blocklength : INT_MIN, tc->curr_src ? tc->curr_src->consumed : INT_MIN, tc->curr_src ? tc->curr_src->produced : INT_MIN, tc->curr_src ? tc->curr_src->blocklength : INT_MIN)); |
| 1065 | failed = -EINVAL; |
| 1066 | goto error_cleanup; |
| 1067 | } |
| 1068 | } |
| 1069 | /* Detect unit deactivation. */ |
| 1070 | if (dcfg->last) { |
| 1071 | /* Length check of this is handled below. */ |
| 1072 | tc->done = 1; |
| 1073 | } |
| 1074 | dcfg = dcfg->next; |
| 1075 | } /* while (dcfg) */ |
| 1076 | DEBUG(printk("cryptocop_setup_dma_list: parsing operation descriptor configuration complete.\n")); |
| 1077 | |
| 1078 | if (cipher_ctx.active && (cipher_ctx.curr_src != NULL) && !cipher_ctx.curr_src->active){ |
| 1079 | DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", cipher_ctx.curr_src->unit_no)); |
| 1080 | failed = -EINVAL; |
| 1081 | goto error_cleanup; |
| 1082 | } |
| 1083 | if (digest_ctx.active && (digest_ctx.curr_src != NULL) && !digest_ctx.curr_src->active){ |
| 1084 | DEBUG_API(printk("cryptocop_setup_dma_list: digest source from inactive unit %d\n", digest_ctx.curr_src->unit_no)); |
| 1085 | failed = -EINVAL; |
| 1086 | goto error_cleanup; |
| 1087 | } |
| 1088 | if (csum_ctx.active && (csum_ctx.curr_src != NULL) && !csum_ctx.curr_src->active){ |
| 1089 | DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", csum_ctx.curr_src->unit_no)); |
| 1090 | failed = -EINVAL; |
| 1091 | goto error_cleanup; |
| 1092 | } |
| 1093 | |
| 1094 | /* Update consumed and produced lengths. |
| 1095 | |
| 1096 | The consumed length accounting here is actually cheating. If a unit source from DMA (or any |
| 1097 | other unit that process data in blocks of one octet) it is correct, but if it source from a |
| 1098 | block processing unit, i.e. a cipher, it will be temporarily incorrect at some times. However |
| 1099 | since it is only allowed--by the HW--to change source to or from a block processing unit at times where that |
| 1100 | unit has processed an exact multiple of its block length the end result will be correct. |
| 1101 | Beware that if the source change restriction change this code will need to be (much) reworked. |
| 1102 | */ |
| 1103 | DEBUG(printk("cryptocop_setup_dma_list: desc->length=%d, desc_len=%d.\n", odsc->length, desc_len)); |
| 1104 | |
| 1105 | if (csum_ctx.active) { |
| 1106 | csum_ctx.consumed += desc_len; |
| 1107 | if (csum_ctx.done) { |
| 1108 | csum_ctx.produced = 2; |
| 1109 | } |
| 1110 | DEBUG(printk("cryptocop_setup_dma_list: csum_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", csum_ctx.consumed, csum_ctx.produced, csum_ctx.blocklength)); |
| 1111 | } |
| 1112 | if (digest_ctx.active) { |
| 1113 | digest_ctx.consumed += desc_len; |
| 1114 | if (digest_ctx.done) { |
| 1115 | if (digest_ctx.unit_no == src_md5) { |
| 1116 | digest_ctx.produced = MD5_STATE_LENGTH; |
| 1117 | } else { |
| 1118 | digest_ctx.produced = SHA1_STATE_LENGTH; |
| 1119 | } |
| 1120 | } |
| 1121 | DEBUG(printk("cryptocop_setup_dma_list: digest_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", digest_ctx.consumed, digest_ctx.produced, digest_ctx.blocklength)); |
| 1122 | } |
| 1123 | if (cipher_ctx.active) { |
| 1124 | /* Ciphers are allowed only to source from DMA out. That is filtered above. */ |
| 1125 | assert(cipher_ctx.current_src == src_dma); |
| 1126 | cipher_ctx.consumed += desc_len; |
| 1127 | cipher_ctx.produced = cipher_ctx.blocklength * (cipher_ctx.consumed / cipher_ctx.blocklength); |
| 1128 | if (cipher_ctx.cbcmode && !(cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV) && cipher_ctx.produced){ |
| 1129 | cipher_ctx.produced -= cipher_ctx.blocklength; /* Compensate for CBC iv. */ |
| 1130 | } |
| 1131 | DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", cipher_ctx.consumed, cipher_ctx.produced, cipher_ctx.blocklength)); |
| 1132 | } |
| 1133 | |
| 1134 | /* Setup the DMA out descriptors. */ |
| 1135 | /* Configure the metadata. */ |
| 1136 | active_count = 0; |
| 1137 | eop_needed_count = 0; |
| 1138 | if (cipher_ctx.active) { |
| 1139 | ++active_count; |
| 1140 | if (cipher_ctx.unit_no == src_dma){ |
| 1141 | /* mem2mem */ |
| 1142 | meta_out.ciphsel = src_none; |
| 1143 | } else { |
| 1144 | meta_out.ciphsel = cipher_ctx.current_src; |
| 1145 | } |
| 1146 | meta_out.ciphconf = cipher_ctx.ciph_conf; |
| 1147 | meta_out.cbcmode = cipher_ctx.cbcmode; |
| 1148 | meta_out.decrypt = cipher_ctx.decrypt; |
| 1149 | DEBUG(printk("set ciphsel=%d ciphconf=%d cbcmode=%d decrypt=%d\n", meta_out.ciphsel, meta_out.ciphconf, meta_out.cbcmode, meta_out.decrypt)); |
| 1150 | if (cipher_ctx.done) ++eop_needed_count; |
| 1151 | } else { |
| 1152 | meta_out.ciphsel = src_none; |
| 1153 | } |
| 1154 | |
| 1155 | if (digest_ctx.active) { |
| 1156 | ++active_count; |
| 1157 | meta_out.hashsel = digest_ctx.current_src; |
| 1158 | meta_out.hashconf = digest_ctx.hash_conf; |
| 1159 | meta_out.hashmode = 0; /* Explicit mode is not used here. */ |
| 1160 | DEBUG(printk("set hashsel=%d hashconf=%d hashmode=%d\n", meta_out.hashsel, meta_out.hashconf, meta_out.hashmode)); |
| 1161 | if (digest_ctx.done) { |
| 1162 | assert(digest_ctx.pad_descs == NULL); |
| 1163 | failed = create_pad_descriptor(&digest_ctx, &digest_ctx.pad_descs, alloc_flag); |
| 1164 | if (failed) { |
| 1165 | DEBUG_API(printk("cryptocop_setup_dma_list: failed digest pad creation.\n")); |
| 1166 | goto error_cleanup; |
| 1167 | } |
| 1168 | } |
| 1169 | } else { |
| 1170 | meta_out.hashsel = src_none; |
| 1171 | } |
| 1172 | |
| 1173 | if (csum_ctx.active) { |
| 1174 | ++active_count; |
| 1175 | meta_out.csumsel = csum_ctx.current_src; |
| 1176 | if (csum_ctx.done) { |
| 1177 | assert(csum_ctx.pad_descs == NULL); |
| 1178 | failed = create_pad_descriptor(&csum_ctx, &csum_ctx.pad_descs, alloc_flag); |
| 1179 | if (failed) { |
| 1180 | DEBUG_API(printk("cryptocop_setup_dma_list: failed csum pad creation.\n")); |
| 1181 | goto error_cleanup; |
| 1182 | } |
| 1183 | } |
| 1184 | } else { |
| 1185 | meta_out.csumsel = src_none; |
| 1186 | } |
| 1187 | DEBUG(printk("cryptocop_setup_dma_list: %d eop needed, %d active units\n", eop_needed_count, active_count)); |
| 1188 | /* Setup DMA out descriptors for the indata. */ |
| 1189 | failed = create_output_descriptors(operation, &iniov_ix, &iniov_offset, desc_len, ¤t_out_cdesc, &meta_out, alloc_flag); |
| 1190 | if (failed) { |
| 1191 | DEBUG_API(printk("cryptocop_setup_dma_list: create_output_descriptors %d\n", failed)); |
| 1192 | goto error_cleanup; |
| 1193 | } |
| 1194 | /* Setup out EOP. If there are active units that are not done here they cannot get an EOP |
| 1195 | * so we ust setup a zero length descriptor to DMA to signal EOP only to done units. |
| 1196 | * If there is a pad descriptor EOP for the padded unit will be EOPed by it. |
| 1197 | */ |
| 1198 | assert(active_count >= eop_needed_count); |
| 1199 | assert((eop_needed_count == 0) || (eop_needed_count == 1)); |
| 1200 | if (eop_needed_count) { |
| 1201 | /* This means that the bulk operation (cipeher/m2m) is terminated. */ |
| 1202 | if (active_count > 1) { |
| 1203 | /* Use zero length EOP descriptor. */ |
| 1204 | struct cryptocop_dma_desc *ed = alloc_cdesc(alloc_flag); |
| 1205 | struct strcop_meta_out ed_mo = {0}; |
| 1206 | if (!ed) { |
| 1207 | DEBUG_API(printk("cryptocop_setup_dma_list: alloc EOP descriptor for cipher\n")); |
| 1208 | failed = -ENOMEM; |
| 1209 | goto error_cleanup; |
| 1210 | } |
| 1211 | |
| 1212 | assert(cipher_ctx.active && cipher_ctx.done); |
| 1213 | |
| 1214 | if (cipher_ctx.unit_no == src_dma){ |
| 1215 | /* mem2mem */ |
| 1216 | ed_mo.ciphsel = src_none; |
| 1217 | } else { |
| 1218 | ed_mo.ciphsel = cipher_ctx.current_src; |
| 1219 | } |
| 1220 | ed_mo.ciphconf = cipher_ctx.ciph_conf; |
| 1221 | ed_mo.cbcmode = cipher_ctx.cbcmode; |
| 1222 | ed_mo.decrypt = cipher_ctx.decrypt; |
| 1223 | |
| 1224 | ed->free_buf = NULL; |
| 1225 | ed->dma_descr->wait = 1; |
| 1226 | ed->dma_descr->out_eop = 1; |
| 1227 | |
| 1228 | ed->dma_descr->buf = (char*)virt_to_phys(&ed); /* Use any valid physical address for zero length descriptor. */ |
| 1229 | ed->dma_descr->after = ed->dma_descr->buf; |
| 1230 | ed->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, ed_mo); |
| 1231 | current_out_cdesc->next = ed; |
| 1232 | current_out_cdesc = ed; |
| 1233 | } else { |
| 1234 | /* Set EOP in the current out descriptor since the only active module is |
| 1235 | * the one needing the EOP. */ |
| 1236 | |
| 1237 | current_out_cdesc->dma_descr->out_eop = 1; |
| 1238 | } |
| 1239 | } |
| 1240 | |
| 1241 | if (cipher_ctx.done && cipher_ctx.active) cipher_ctx.active = 0; |
| 1242 | if (digest_ctx.done && digest_ctx.active) digest_ctx.active = 0; |
| 1243 | if (csum_ctx.done && csum_ctx.active) csum_ctx.active = 0; |
| 1244 | indata_ix += odsc->length; |
| 1245 | odsc = odsc->next; |
| 1246 | } /* while (odsc) */ /* Process descriptors. */ |
| 1247 | DEBUG(printk("cryptocop_setup_dma_list: done parsing operation descriptors\n")); |
| 1248 | if (cipher_ctx.tcfg && (cipher_ctx.active || !cipher_ctx.done)){ |
| 1249 | DEBUG_API(printk("cryptocop_setup_dma_list: cipher operation not terminated.\n")); |
| 1250 | failed = -EINVAL; |
| 1251 | goto error_cleanup; |
| 1252 | } |
| 1253 | if (digest_ctx.tcfg && (digest_ctx.active || !digest_ctx.done)){ |
| 1254 | DEBUG_API(printk("cryptocop_setup_dma_list: digest operation not terminated.\n")); |
| 1255 | failed = -EINVAL; |
| 1256 | goto error_cleanup; |
| 1257 | } |
| 1258 | if (csum_ctx.tcfg && (csum_ctx.active || !csum_ctx.done)){ |
| 1259 | DEBUG_API(printk("cryptocop_setup_dma_list: csum operation not terminated.\n")); |
| 1260 | failed = -EINVAL; |
| 1261 | goto error_cleanup; |
| 1262 | } |
| 1263 | |
| 1264 | failed = append_input_descriptors(operation, ¤t_in_cdesc, ¤t_out_cdesc, &cipher_ctx, alloc_flag); |
| 1265 | if (failed){ |
| 1266 | DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed)); |
| 1267 | goto error_cleanup; |
| 1268 | } |
| 1269 | failed = append_input_descriptors(operation, ¤t_in_cdesc, ¤t_out_cdesc, &digest_ctx, alloc_flag); |
| 1270 | if (failed){ |
| 1271 | DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed)); |
| 1272 | goto error_cleanup; |
| 1273 | } |
| 1274 | failed = append_input_descriptors(operation, ¤t_in_cdesc, ¤t_out_cdesc, &csum_ctx, alloc_flag); |
| 1275 | if (failed){ |
| 1276 | DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed)); |
| 1277 | goto error_cleanup; |
| 1278 | } |
| 1279 | |
| 1280 | DEBUG(printk("cryptocop_setup_dma_list: int_op=0x%p, *int_op=0x%p\n", int_op, *int_op)); |
| 1281 | (*int_op)->cdesc_out = out_cdesc_head.next; |
| 1282 | (*int_op)->cdesc_in = in_cdesc_head.next; |
| 1283 | DEBUG(printk("cryptocop_setup_dma_list: out_cdesc_head=0x%p in_cdesc_head=0x%p\n", (*int_op)->cdesc_out, (*int_op)->cdesc_in)); |
| 1284 | |
| 1285 | setup_descr_chain(out_cdesc_head.next); |
| 1286 | setup_descr_chain(in_cdesc_head.next); |
| 1287 | |
| 1288 | /* Last but not least: mark the last DMA in descriptor for a INTR and EOL and the the |
| 1289 | * last DMA out descriptor for EOL. |
| 1290 | */ |
| 1291 | current_in_cdesc->dma_descr->intr = 1; |
| 1292 | current_in_cdesc->dma_descr->eol = 1; |
| 1293 | current_out_cdesc->dma_descr->eol = 1; |
| 1294 | |
| 1295 | /* Setup DMA contexts. */ |
| 1296 | (*int_op)->ctx_out.next = NULL; |
| 1297 | (*int_op)->ctx_out.eol = 1; |
| 1298 | (*int_op)->ctx_out.intr = 0; |
| 1299 | (*int_op)->ctx_out.store_mode = 0; |
| 1300 | (*int_op)->ctx_out.en = 0; |
| 1301 | (*int_op)->ctx_out.dis = 0; |
| 1302 | (*int_op)->ctx_out.md0 = 0; |
| 1303 | (*int_op)->ctx_out.md1 = 0; |
| 1304 | (*int_op)->ctx_out.md2 = 0; |
| 1305 | (*int_op)->ctx_out.md3 = 0; |
| 1306 | (*int_op)->ctx_out.md4 = 0; |
| 1307 | (*int_op)->ctx_out.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_out->dma_descr); |
| 1308 | (*int_op)->ctx_out.saved_data_buf = (*int_op)->cdesc_out->dma_descr->buf; /* Already physical address. */ |
| 1309 | |
| 1310 | (*int_op)->ctx_in.next = NULL; |
| 1311 | (*int_op)->ctx_in.eol = 1; |
| 1312 | (*int_op)->ctx_in.intr = 0; |
| 1313 | (*int_op)->ctx_in.store_mode = 0; |
| 1314 | (*int_op)->ctx_in.en = 0; |
| 1315 | (*int_op)->ctx_in.dis = 0; |
| 1316 | (*int_op)->ctx_in.md0 = 0; |
| 1317 | (*int_op)->ctx_in.md1 = 0; |
| 1318 | (*int_op)->ctx_in.md2 = 0; |
| 1319 | (*int_op)->ctx_in.md3 = 0; |
| 1320 | (*int_op)->ctx_in.md4 = 0; |
| 1321 | |
| 1322 | (*int_op)->ctx_in.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_in->dma_descr); |
| 1323 | (*int_op)->ctx_in.saved_data_buf = (*int_op)->cdesc_in->dma_descr->buf; /* Already physical address. */ |
| 1324 | |
| 1325 | DEBUG(printk("cryptocop_setup_dma_list: done\n")); |
| 1326 | return 0; |
| 1327 | |
| 1328 | error_cleanup: |
| 1329 | { |
| 1330 | /* Free all allocated resources. */ |
| 1331 | struct cryptocop_dma_desc *tmp_cdesc; |
| 1332 | while (digest_ctx.pad_descs){ |
| 1333 | tmp_cdesc = digest_ctx.pad_descs->next; |
| 1334 | free_cdesc(digest_ctx.pad_descs); |
| 1335 | digest_ctx.pad_descs = tmp_cdesc; |
| 1336 | } |
| 1337 | while (csum_ctx.pad_descs){ |
| 1338 | tmp_cdesc = csum_ctx.pad_descs->next; |
| 1339 | free_cdesc(csum_ctx.pad_descs); |
| 1340 | csum_ctx.pad_descs = tmp_cdesc; |
| 1341 | } |
| 1342 | assert(cipher_ctx.pad_descs == NULL); /* The ciphers are never padded. */ |
| 1343 | |
| 1344 | if (*int_op != NULL) delete_internal_operation(*int_op); |
| 1345 | } |
| 1346 | DEBUG_API(printk("cryptocop_setup_dma_list: done with error %d\n", failed)); |
| 1347 | return failed; |
| 1348 | } |
| 1349 | |
| 1350 | |
| 1351 | static void delete_internal_operation(struct cryptocop_int_operation *iop) |
| 1352 | { |
| 1353 | void *ptr = iop->alloc_ptr; |
| 1354 | struct cryptocop_dma_desc *cd = iop->cdesc_out; |
| 1355 | struct cryptocop_dma_desc *next; |
| 1356 | |
| 1357 | DEBUG(printk("delete_internal_operation: iop=0x%p, alloc_ptr=0x%p\n", iop, ptr)); |
| 1358 | |
| 1359 | while (cd) { |
| 1360 | next = cd->next; |
| 1361 | free_cdesc(cd); |
| 1362 | cd = next; |
| 1363 | } |
| 1364 | cd = iop->cdesc_in; |
| 1365 | while (cd) { |
| 1366 | next = cd->next; |
| 1367 | free_cdesc(cd); |
| 1368 | cd = next; |
| 1369 | } |
| 1370 | kfree(ptr); |
| 1371 | } |
| 1372 | |
| 1373 | #define MD5_MIN_PAD_LENGTH (9) |
| 1374 | #define MD5_PAD_LENGTH_FIELD_LENGTH (8) |
| 1375 | |
| 1376 | static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length) |
| 1377 | { |
| 1378 | size_t padlen = MD5_BLOCK_LENGTH - (hashed_length % MD5_BLOCK_LENGTH); |
| 1379 | unsigned char *p; |
| 1380 | int i; |
| 1381 | unsigned long long int bit_length = hashed_length << 3; |
| 1382 | |
| 1383 | if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH; |
| 1384 | |
| 1385 | p = kmalloc(padlen, alloc_flag); |
| 1386 | if (!pad) return -ENOMEM; |
| 1387 | |
| 1388 | *p = 0x80; |
| 1389 | memset(p+1, 0, padlen - 1); |
| 1390 | |
| 1391 | DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length)); |
| 1392 | |
| 1393 | i = padlen - MD5_PAD_LENGTH_FIELD_LENGTH; |
| 1394 | while (bit_length != 0){ |
| 1395 | p[i++] = bit_length % 0x100; |
| 1396 | bit_length >>= 8; |
| 1397 | } |
| 1398 | |
| 1399 | *pad = (char*)p; |
| 1400 | *pad_length = padlen; |
| 1401 | |
| 1402 | return 0; |
| 1403 | } |
| 1404 | |
| 1405 | #define SHA1_MIN_PAD_LENGTH (9) |
| 1406 | #define SHA1_PAD_LENGTH_FIELD_LENGTH (8) |
| 1407 | |
| 1408 | static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length) |
| 1409 | { |
| 1410 | size_t padlen = SHA1_BLOCK_LENGTH - (hashed_length % SHA1_BLOCK_LENGTH); |
| 1411 | unsigned char *p; |
| 1412 | int i; |
| 1413 | unsigned long long int bit_length = hashed_length << 3; |
| 1414 | |
| 1415 | if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH; |
| 1416 | |
| 1417 | p = kmalloc(padlen, alloc_flag); |
| 1418 | if (!pad) return -ENOMEM; |
| 1419 | |
| 1420 | *p = 0x80; |
| 1421 | memset(p+1, 0, padlen - 1); |
| 1422 | |
| 1423 | DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length)); |
| 1424 | |
| 1425 | i = padlen - 1; |
| 1426 | while (bit_length != 0){ |
| 1427 | p[i--] = bit_length % 0x100; |
| 1428 | bit_length >>= 8; |
| 1429 | } |
| 1430 | |
| 1431 | *pad = (char*)p; |
| 1432 | *pad_length = padlen; |
| 1433 | |
| 1434 | return 0; |
| 1435 | } |
| 1436 | |
| 1437 | |
| 1438 | static int transform_ok(struct cryptocop_transform_init *tinit) |
| 1439 | { |
| 1440 | switch (tinit->alg){ |
| 1441 | case cryptocop_alg_csum: |
| 1442 | switch (tinit->csum_mode){ |
| 1443 | case cryptocop_csum_le: |
| 1444 | case cryptocop_csum_be: |
| 1445 | break; |
| 1446 | default: |
| 1447 | DEBUG_API(printk("transform_ok: Bad mode set for csum transform\n")); |
| 1448 | return -EINVAL; |
| 1449 | } |
| 1450 | case cryptocop_alg_mem2mem: |
| 1451 | case cryptocop_alg_md5: |
| 1452 | case cryptocop_alg_sha1: |
| 1453 | if (tinit->keylen != 0) { |
| 1454 | DEBUG_API(printk("transform_ok: non-zero keylength, %d, for a digest/csum algorithm\n", tinit->keylen)); |
| 1455 | return -EINVAL; /* This check is a bit strict. */ |
| 1456 | } |
| 1457 | break; |
| 1458 | case cryptocop_alg_des: |
| 1459 | if (tinit->keylen != 64) { |
| 1460 | DEBUG_API(printk("transform_ok: keylen %d invalid for DES\n", tinit->keylen)); |
| 1461 | return -EINVAL; |
| 1462 | } |
| 1463 | break; |
| 1464 | case cryptocop_alg_3des: |
| 1465 | if (tinit->keylen != 192) { |
| 1466 | DEBUG_API(printk("transform_ok: keylen %d invalid for 3DES\n", tinit->keylen)); |
| 1467 | return -EINVAL; |
| 1468 | } |
| 1469 | break; |
| 1470 | case cryptocop_alg_aes: |
| 1471 | if (tinit->keylen != 128 && tinit->keylen != 192 && tinit->keylen != 256) { |
| 1472 | DEBUG_API(printk("transform_ok: keylen %d invalid for AES\n", tinit->keylen)); |
| 1473 | return -EINVAL; |
| 1474 | } |
| 1475 | break; |
| 1476 | case cryptocop_no_alg: |
| 1477 | default: |
| 1478 | DEBUG_API(printk("transform_ok: no such algorithm %d\n", tinit->alg)); |
| 1479 | return -EINVAL; |
| 1480 | } |
| 1481 | |
| 1482 | switch (tinit->alg){ |
| 1483 | case cryptocop_alg_des: |
| 1484 | case cryptocop_alg_3des: |
| 1485 | case cryptocop_alg_aes: |
| 1486 | if (tinit->cipher_mode != cryptocop_cipher_mode_ecb && tinit->cipher_mode != cryptocop_cipher_mode_cbc) return -EINVAL; |
| 1487 | default: |
| 1488 | break; |
| 1489 | } |
| 1490 | return 0; |
| 1491 | } |
| 1492 | |
| 1493 | |
| 1494 | int cryptocop_new_session(cryptocop_session_id *sid, struct cryptocop_transform_init *tinit, int alloc_flag) |
| 1495 | { |
| 1496 | struct cryptocop_session *sess; |
| 1497 | struct cryptocop_transform_init *tfrm_in = tinit; |
| 1498 | struct cryptocop_transform_init *tmp_in; |
| 1499 | int no_tfrms = 0; |
| 1500 | int i; |
| 1501 | unsigned long int flags; |
| 1502 | |
| 1503 | init_stream_coprocessor(); /* For safety if we are called early */ |
| 1504 | |
| 1505 | while (tfrm_in){ |
| 1506 | int err; |
| 1507 | ++no_tfrms; |
| 1508 | if ((err = transform_ok(tfrm_in))) { |
| 1509 | DEBUG_API(printk("cryptocop_new_session, bad transform\n")); |
| 1510 | return err; |
| 1511 | } |
| 1512 | tfrm_in = tfrm_in->next; |
| 1513 | } |
| 1514 | if (0 == no_tfrms) { |
| 1515 | DEBUG_API(printk("cryptocop_new_session, no transforms specified\n")); |
| 1516 | return -EINVAL; |
| 1517 | } |
| 1518 | |
| 1519 | sess = kmalloc(sizeof(struct cryptocop_session), alloc_flag); |
| 1520 | if (!sess){ |
| 1521 | DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_session\n")); |
| 1522 | return -ENOMEM; |
| 1523 | } |
| 1524 | |
| 1525 | sess->tfrm_ctx = kmalloc(no_tfrms * sizeof(struct cryptocop_transform_ctx), alloc_flag); |
| 1526 | if (!sess->tfrm_ctx) { |
| 1527 | DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_transform_ctx\n")); |
| 1528 | kfree(sess); |
| 1529 | return -ENOMEM; |
| 1530 | } |
| 1531 | |
| 1532 | tfrm_in = tinit; |
| 1533 | for (i = 0; i < no_tfrms; i++){ |
| 1534 | tmp_in = tfrm_in->next; |
| 1535 | while (tmp_in){ |
| 1536 | if (tmp_in->tid == tfrm_in->tid) { |
| 1537 | DEBUG_API(printk("cryptocop_new_session, duplicate transform ids\n")); |
| 1538 | kfree(sess->tfrm_ctx); |
| 1539 | kfree(sess); |
| 1540 | return -EINVAL; |
| 1541 | } |
| 1542 | tmp_in = tmp_in->next; |
| 1543 | } |
| 1544 | memcpy(&sess->tfrm_ctx[i].init, tfrm_in, sizeof(struct cryptocop_transform_init)); |
| 1545 | sess->tfrm_ctx[i].dec_key_set = 0; |
| 1546 | sess->tfrm_ctx[i].next = &sess->tfrm_ctx[i] + 1; |
| 1547 | |
| 1548 | tfrm_in = tfrm_in->next; |
| 1549 | } |
| 1550 | sess->tfrm_ctx[i-1].next = NULL; |
| 1551 | |
| 1552 | spin_lock_irqsave(&cryptocop_sessions_lock, flags); |
| 1553 | sess->sid = next_sid; |
| 1554 | next_sid++; |
| 1555 | /* TODO If we are really paranoid we should do duplicate check to handle sid wraparound. |
| 1556 | * OTOH 2^64 is a really large number of session. */ |
| 1557 | if (next_sid == 0) next_sid = 1; |
| 1558 | |
| 1559 | /* Prepend to session list. */ |
| 1560 | sess->next = cryptocop_sessions; |
| 1561 | cryptocop_sessions = sess; |
| 1562 | spin_unlock_irqrestore(&cryptocop_sessions_lock, flags); |
| 1563 | *sid = sess->sid; |
| 1564 | return 0; |
| 1565 | } |
| 1566 | |
| 1567 | |
| 1568 | int cryptocop_free_session(cryptocop_session_id sid) |
| 1569 | { |
| 1570 | struct cryptocop_transform_ctx *tc; |
| 1571 | struct cryptocop_session *sess = NULL; |
| 1572 | struct cryptocop_session *psess = NULL; |
| 1573 | unsigned long int flags; |
| 1574 | int i; |
| 1575 | LIST_HEAD(remove_list); |
| 1576 | struct list_head *node, *tmp; |
| 1577 | struct cryptocop_prio_job *pj; |
| 1578 | |
| 1579 | DEBUG(printk("cryptocop_free_session: sid=%lld\n", sid)); |
| 1580 | |
| 1581 | spin_lock_irqsave(&cryptocop_sessions_lock, flags); |
| 1582 | sess = cryptocop_sessions; |
| 1583 | while (sess && sess->sid != sid){ |
| 1584 | psess = sess; |
| 1585 | sess = sess->next; |
| 1586 | } |
| 1587 | if (sess){ |
| 1588 | if (psess){ |
| 1589 | psess->next = sess->next; |
| 1590 | } else { |
| 1591 | cryptocop_sessions = sess->next; |
| 1592 | } |
| 1593 | } |
| 1594 | spin_unlock_irqrestore(&cryptocop_sessions_lock, flags); |
| 1595 | |
| 1596 | if (!sess) return -EINVAL; |
| 1597 | |
| 1598 | /* Remove queued jobs. */ |
| 1599 | spin_lock_irqsave(&cryptocop_job_queue_lock, flags); |
| 1600 | |
| 1601 | for (i = 0; i < cryptocop_prio_no_prios; i++){ |
| 1602 | if (!list_empty(&(cryptocop_job_queues[i].jobs))){ |
| 1603 | list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) { |
| 1604 | pj = list_entry(node, struct cryptocop_prio_job, node); |
| 1605 | if (pj->oper->sid == sid) { |
| 1606 | list_move_tail(node, &remove_list); |
| 1607 | } |
| 1608 | } |
| 1609 | } |
| 1610 | } |
| 1611 | spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags); |
| 1612 | |
| 1613 | list_for_each_safe(node, tmp, &remove_list) { |
| 1614 | list_del(node); |
| 1615 | pj = list_entry(node, struct cryptocop_prio_job, node); |
| 1616 | pj->oper->operation_status = -EAGAIN; /* EAGAIN is not ideal for job/session terminated but it's the best choice I know of. */ |
| 1617 | DEBUG(printk("cryptocop_free_session: pj=0x%p, pj->oper=0x%p, pj->iop=0x%p\n", pj, pj->oper, pj->iop)); |
| 1618 | pj->oper->cb(pj->oper, pj->oper->cb_data); |
| 1619 | delete_internal_operation(pj->iop); |
| 1620 | kfree(pj); |
| 1621 | } |
| 1622 | |
| 1623 | tc = sess->tfrm_ctx; |
| 1624 | /* Erase keying data. */ |
| 1625 | while (tc){ |
| 1626 | DEBUG(printk("cryptocop_free_session: memset keys, tfrm id=%d\n", tc->init.tid)); |
| 1627 | memset(tc->init.key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH); |
| 1628 | memset(tc->dec_key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH); |
| 1629 | tc = tc->next; |
| 1630 | } |
| 1631 | kfree(sess->tfrm_ctx); |
| 1632 | kfree(sess); |
| 1633 | |
| 1634 | return 0; |
| 1635 | } |
| 1636 | |
| 1637 | static struct cryptocop_session *get_session(cryptocop_session_id sid) |
| 1638 | { |
| 1639 | struct cryptocop_session *sess; |
| 1640 | unsigned long int flags; |
| 1641 | |
| 1642 | spin_lock_irqsave(&cryptocop_sessions_lock, flags); |
| 1643 | sess = cryptocop_sessions; |
| 1644 | while (sess && (sess->sid != sid)){ |
| 1645 | sess = sess->next; |
| 1646 | } |
| 1647 | spin_unlock_irqrestore(&cryptocop_sessions_lock, flags); |
| 1648 | |
| 1649 | return sess; |
| 1650 | } |
| 1651 | |
| 1652 | static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid) |
| 1653 | { |
| 1654 | struct cryptocop_transform_ctx *tc = sess->tfrm_ctx; |
| 1655 | |
| 1656 | DEBUG(printk("get_transform_ctx, sess=0x%p, tid=%d\n", sess, tid)); |
| 1657 | assert(sess != NULL); |
| 1658 | while (tc && tc->init.tid != tid){ |
| 1659 | DEBUG(printk("tc=0x%p, tc->next=0x%p\n", tc, tc->next)); |
| 1660 | tc = tc->next; |
| 1661 | } |
| 1662 | DEBUG(printk("get_transform_ctx, returning tc=0x%p\n", tc)); |
| 1663 | return tc; |
| 1664 | } |
| 1665 | |
| 1666 | |
| 1667 | |
| 1668 | /* The AES s-transform matrix (s-box). */ |
| 1669 | static const u8 aes_sbox[256] = { |
| 1670 | 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, 171, 118, |
| 1671 | 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175, 156, 164, 114, 192, |
| 1672 | 183, 253, 147, 38, 54, 63, 247, 204, 52, 165, 229, 241, 113, 216, 49, 21, |
| 1673 | 4, 199, 35, 195, 24, 150, 5, 154, 7, 18, 128, 226, 235, 39, 178, 117, |
| 1674 | 9, 131, 44, 26, 27, 110, 90, 160, 82, 59, 214, 179, 41, 227, 47, 132, |
| 1675 | 83, 209, 0, 237, 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, |
| 1676 | 208, 239, 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168, |
| 1677 | 81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243, 210, |
| 1678 | 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, 93, 25, 115, |
| 1679 | 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184, 20, 222, 94, 11, 219, |
| 1680 | 224, 50, 58, 10, 73, 6, 36, 92, 194, 211, 172, 98, 145, 149, 228, 121, |
| 1681 | 231, 200, 55, 109, 141, 213, 78, 169, 108, 86, 244, 234, 101, 122, 174, 8, |
| 1682 | 186, 120, 37, 46, 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138, |
| 1683 | 112, 62, 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158, |
| 1684 | 225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223, |
| 1685 | 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84, 187, 22 |
| 1686 | }; |
| 1687 | |
| 1688 | /* AES has a 32 bit word round constants for each round in the |
| 1689 | * key schedule. round_constant[i] is really Rcon[i+1] in FIPS187. |
| 1690 | */ |
| 1691 | static u32 round_constant[11] = { |
| 1692 | 0x01000000, 0x02000000, 0x04000000, 0x08000000, |
| 1693 | 0x10000000, 0x20000000, 0x40000000, 0x80000000, |
| 1694 | 0x1B000000, 0x36000000, 0x6C000000 |
| 1695 | }; |
| 1696 | |
| 1697 | /* Apply the s-box to each of the four occtets in w. */ |
| 1698 | static u32 aes_ks_subword(const u32 w) |
| 1699 | { |
| 1700 | u8 bytes[4]; |
| 1701 | |
| 1702 | *(u32*)(&bytes[0]) = w; |
| 1703 | bytes[0] = aes_sbox[bytes[0]]; |
| 1704 | bytes[1] = aes_sbox[bytes[1]]; |
| 1705 | bytes[2] = aes_sbox[bytes[2]]; |
| 1706 | bytes[3] = aes_sbox[bytes[3]]; |
| 1707 | return *(u32*)(&bytes[0]); |
| 1708 | } |
| 1709 | |
| 1710 | /* The encrypt (forward) Rijndael key schedule algorithm pseudo code: |
| 1711 | * (Note that AES words are 32 bit long) |
| 1712 | * |
| 1713 | * KeyExpansion(byte key[4*Nk], word w[Nb*(Nr+1)], Nk){ |
| 1714 | * word temp |
| 1715 | * i = 0 |
| 1716 | * while (i < Nk) { |
| 1717 | * w[i] = word(key[4*i, 4*i + 1, 4*i + 2, 4*i + 3]) |
| 1718 | * i = i + 1 |
| 1719 | * } |
| 1720 | * i = Nk |
| 1721 | * |
| 1722 | * while (i < (Nb * (Nr + 1))) { |
| 1723 | * temp = w[i - 1] |
| 1724 | * if ((i mod Nk) == 0) { |
| 1725 | * temp = SubWord(RotWord(temp)) xor Rcon[i/Nk] |
| 1726 | * } |
| 1727 | * else if ((Nk > 6) && ((i mod Nk) == 4)) { |
| 1728 | * temp = SubWord(temp) |
| 1729 | * } |
| 1730 | * w[i] = w[i - Nk] xor temp |
| 1731 | * } |
| 1732 | * RotWord(t) does a 8 bit cyclic shift left on a 32 bit word. |
| 1733 | * SubWord(t) applies the AES s-box individually to each octet |
| 1734 | * in a 32 bit word. |
| 1735 | * |
| 1736 | * For AES Nk can have the values 4, 6, and 8 (corresponding to |
| 1737 | * values for Nr of 10, 12, and 14). Nb is always 4. |
| 1738 | * |
| 1739 | * To construct w[i], w[i - 1] and w[i - Nk] must be |
| 1740 | * available. Consequently we must keep a state of the last Nk words |
| 1741 | * to be able to create the last round keys. |
| 1742 | */ |
| 1743 | static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned char *key, unsigned int keylength) |
| 1744 | { |
| 1745 | u32 temp; |
| 1746 | u32 w_ring[8]; /* nk is max 8, use elements 0..(nk - 1) as a ringbuffer */ |
| 1747 | u8 w_last_ix; |
| 1748 | int i; |
| 1749 | u8 nr, nk; |
| 1750 | |
| 1751 | switch (keylength){ |
| 1752 | case 128: |
| 1753 | nk = 4; |
| 1754 | nr = 10; |
| 1755 | break; |
| 1756 | case 192: |
| 1757 | nk = 6; |
| 1758 | nr = 12; |
| 1759 | break; |
| 1760 | case 256: |
| 1761 | nk = 8; |
| 1762 | nr = 14; |
| 1763 | break; |
| 1764 | default: |
| 1765 | panic("stream co-processor: bad aes key length in get_aes_decrypt_key\n"); |
| 1766 | }; |
| 1767 | |
| 1768 | /* Need to do host byte order correction here since key is byte oriented and the |
| 1769 | * kx algorithm is word (u32) oriented. */ |
| 1770 | for (i = 0; i < nk; i+=1) { |
| 1771 | w_ring[i] = be32_to_cpu(*(u32*)&key[4*i]); |
| 1772 | } |
| 1773 | |
| 1774 | i = (int)nk; |
| 1775 | w_last_ix = i - 1; |
| 1776 | while (i < (4 * (nr + 2))) { |
| 1777 | temp = w_ring[w_last_ix]; |
| 1778 | if (!(i % nk)) { |
| 1779 | /* RotWord(temp) */ |
| 1780 | temp = (temp << 8) | (temp >> 24); |
| 1781 | temp = aes_ks_subword(temp); |
| 1782 | temp ^= round_constant[i/nk - 1]; |
| 1783 | } else if ((nk > 6) && ((i % nk) == 4)) { |
| 1784 | temp = aes_ks_subword(temp); |
| 1785 | } |
| 1786 | w_last_ix = (w_last_ix + 1) % nk; /* This is the same as (i-Nk) mod Nk */ |
| 1787 | temp ^= w_ring[w_last_ix]; |
| 1788 | w_ring[w_last_ix] = temp; |
| 1789 | |
| 1790 | /* We need the round keys for round Nr+1 and Nr+2 (round key |
| 1791 | * Nr+2 is the round key beyond the last one used when |
| 1792 | * encrypting). Rounds are numbered starting from 0, Nr=10 |
| 1793 | * implies 11 rounds are used in encryption/decryption. |
| 1794 | */ |
| 1795 | if (i >= (4 * nr)) { |
| 1796 | /* Need to do host byte order correction here, the key |
| 1797 | * is byte oriented. */ |
| 1798 | *(u32*)dec_key = cpu_to_be32(temp); |
| 1799 | dec_key += 4; |
| 1800 | } |
| 1801 | ++i; |
| 1802 | } |
| 1803 | } |
| 1804 | |
| 1805 | |
| 1806 | /**** Job/operation management. ****/ |
| 1807 | |
| 1808 | int cryptocop_job_queue_insert_csum(struct cryptocop_operation *operation) |
| 1809 | { |
| 1810 | return cryptocop_job_queue_insert(cryptocop_prio_kernel_csum, operation); |
| 1811 | } |
| 1812 | |
| 1813 | int cryptocop_job_queue_insert_crypto(struct cryptocop_operation *operation) |
| 1814 | { |
| 1815 | return cryptocop_job_queue_insert(cryptocop_prio_kernel, operation); |
| 1816 | } |
| 1817 | |
| 1818 | int cryptocop_job_queue_insert_user_job(struct cryptocop_operation *operation) |
| 1819 | { |
| 1820 | return cryptocop_job_queue_insert(cryptocop_prio_user, operation); |
| 1821 | } |
| 1822 | |
| 1823 | static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation) |
| 1824 | { |
| 1825 | int ret; |
| 1826 | struct cryptocop_prio_job *pj = NULL; |
| 1827 | unsigned long int flags; |
| 1828 | |
| 1829 | DEBUG(printk("cryptocop_job_queue_insert(%d, 0x%p)\n", prio, operation)); |
| 1830 | |
| 1831 | if (!operation || !operation->cb){ |
| 1832 | DEBUG_API(printk("cryptocop_job_queue_insert oper=0x%p, NULL operation or callback\n", operation)); |
| 1833 | return -EINVAL; |
| 1834 | } |
| 1835 | |
| 1836 | if ((ret = cryptocop_job_setup(&pj, operation)) != 0){ |
| 1837 | DEBUG_API(printk("cryptocop_job_queue_insert: job setup failed\n")); |
| 1838 | return ret; |
| 1839 | } |
| 1840 | assert(pj != NULL); |
| 1841 | |
| 1842 | spin_lock_irqsave(&cryptocop_job_queue_lock, flags); |
| 1843 | list_add_tail(&pj->node, &cryptocop_job_queues[prio].jobs); |
| 1844 | spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags); |
| 1845 | |
| 1846 | /* Make sure a job is running */ |
| 1847 | cryptocop_start_job(); |
| 1848 | return 0; |
| 1849 | } |
| 1850 | |
| 1851 | static void cryptocop_do_tasklet(unsigned long unused); |
| 1852 | DECLARE_TASKLET (cryptocop_tasklet, cryptocop_do_tasklet, 0); |
| 1853 | |
| 1854 | static void cryptocop_do_tasklet(unsigned long unused) |
| 1855 | { |
| 1856 | struct list_head *node; |
| 1857 | struct cryptocop_prio_job *pj = NULL; |
| 1858 | unsigned long flags; |
| 1859 | |
| 1860 | DEBUG(printk("cryptocop_do_tasklet: entering\n")); |
| 1861 | |
| 1862 | do { |
| 1863 | spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags); |
| 1864 | if (!list_empty(&cryptocop_completed_jobs)){ |
| 1865 | node = cryptocop_completed_jobs.next; |
| 1866 | list_del(node); |
| 1867 | pj = list_entry(node, struct cryptocop_prio_job, node); |
| 1868 | } else { |
| 1869 | pj = NULL; |
| 1870 | } |
| 1871 | spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags); |
| 1872 | if (pj) { |
| 1873 | assert(pj->oper != NULL); |
| 1874 | |
| 1875 | /* Notify consumer of operation completeness. */ |
| 1876 | DEBUG(printk("cryptocop_do_tasklet: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data)); |
| 1877 | |
| 1878 | pj->oper->operation_status = 0; /* Job is completed. */ |
| 1879 | pj->oper->cb(pj->oper, pj->oper->cb_data); |
| 1880 | delete_internal_operation(pj->iop); |
| 1881 | kfree(pj); |
| 1882 | } |
| 1883 | } while (pj != NULL); |
| 1884 | |
| 1885 | DEBUG(printk("cryptocop_do_tasklet: exiting\n")); |
| 1886 | } |
| 1887 | |
| 1888 | static irqreturn_t |
| 1889 | dma_done_interrupt(int irq, void *dev_id, struct pt_regs * regs) |
| 1890 | { |
| 1891 | struct cryptocop_prio_job *done_job; |
| 1892 | reg_dma_rw_ack_intr ack_intr = { |
| 1893 | .data = 1, |
| 1894 | }; |
| 1895 | |
| 1896 | REG_WR (dma, regi_dma9, rw_ack_intr, ack_intr); |
| 1897 | |
| 1898 | DEBUG(printk("cryptocop DMA done\n")); |
| 1899 | |
| 1900 | spin_lock(&running_job_lock); |
| 1901 | if (cryptocop_running_job == NULL){ |
| 1902 | printk("stream co-processor got interrupt when not busy\n"); |
| 1903 | spin_unlock(&running_job_lock); |
| 1904 | return IRQ_HANDLED; |
| 1905 | } |
| 1906 | done_job = cryptocop_running_job; |
| 1907 | cryptocop_running_job = NULL; |
| 1908 | spin_unlock(&running_job_lock); |
| 1909 | |
| 1910 | /* Start processing a job. */ |
| 1911 | if (!spin_trylock(&cryptocop_process_lock)){ |
| 1912 | DEBUG(printk("cryptocop irq handler, not starting a job\n")); |
| 1913 | } else { |
| 1914 | cryptocop_start_job(); |
| 1915 | spin_unlock(&cryptocop_process_lock); |
| 1916 | } |
| 1917 | |
| 1918 | done_job->oper->operation_status = 0; /* Job is completed. */ |
| 1919 | if (done_job->oper->fast_callback){ |
| 1920 | /* This operation wants callback from interrupt. */ |
| 1921 | done_job->oper->cb(done_job->oper, done_job->oper->cb_data); |
| 1922 | delete_internal_operation(done_job->iop); |
| 1923 | kfree(done_job); |
| 1924 | } else { |
| 1925 | spin_lock(&cryptocop_completed_jobs_lock); |
| 1926 | list_add_tail(&(done_job->node), &cryptocop_completed_jobs); |
| 1927 | spin_unlock(&cryptocop_completed_jobs_lock); |
| 1928 | tasklet_schedule(&cryptocop_tasklet); |
| 1929 | } |
| 1930 | |
| 1931 | DEBUG(printk("cryptocop leave irq handler\n")); |
| 1932 | return IRQ_HANDLED; |
| 1933 | } |
| 1934 | |
| 1935 | |
| 1936 | /* Setup interrupts and DMA channels. */ |
| 1937 | static int init_cryptocop(void) |
| 1938 | { |
| 1939 | unsigned long flags; |
| 1940 | reg_intr_vect_rw_mask intr_mask; |
| 1941 | reg_dma_rw_cfg dma_cfg = {.en = 1}; |
| 1942 | reg_dma_rw_intr_mask intr_mask_in = {.data = regk_dma_yes}; /* Only want descriptor interrupts from the DMA in channel. */ |
| 1943 | reg_dma_rw_ack_intr ack_intr = {.data = 1,.in_eop = 1 }; |
| 1944 | reg_strcop_rw_cfg strcop_cfg = { |
| 1945 | .ipend = regk_strcop_little, |
| 1946 | .td1 = regk_strcop_e, |
| 1947 | .td2 = regk_strcop_d, |
| 1948 | .td3 = regk_strcop_e, |
| 1949 | .ignore_sync = 0, |
| 1950 | .en = 1 |
| 1951 | }; |
| 1952 | |
| 1953 | if (request_irq(DMA9_INTR_VECT, dma_done_interrupt, 0, "stream co-processor DMA", NULL)) panic("request_irq stream co-processor irq dma9"); |
| 1954 | |
| 1955 | (void)crisv32_request_dma(8, "strcop", DMA_PANIC_ON_ERROR, 0, dma_strp); |
| 1956 | (void)crisv32_request_dma(9, "strcop", DMA_PANIC_ON_ERROR, 0, dma_strp); |
| 1957 | |
| 1958 | local_irq_save(flags); |
| 1959 | |
| 1960 | /* Reset and enable the cryptocop. */ |
| 1961 | strcop_cfg.en = 0; |
| 1962 | REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg); |
| 1963 | strcop_cfg.en = 1; |
| 1964 | REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg); |
| 1965 | |
| 1966 | /* Enable DMA9 interrupt */ |
| 1967 | intr_mask = REG_RD(intr_vect, regi_irq, rw_mask); |
| 1968 | intr_mask.dma9 = 1; |
| 1969 | REG_WR(intr_vect, regi_irq, rw_mask, intr_mask); |
| 1970 | |
| 1971 | /* Enable DMAs. */ |
| 1972 | REG_WR(dma, regi_dma9, rw_cfg, dma_cfg); /* input DMA */ |
| 1973 | REG_WR(dma, regi_dma8, rw_cfg, dma_cfg); /* output DMA */ |
| 1974 | |
| 1975 | /* Set up wordsize = 4 for DMAs. */ |
| 1976 | DMA_WR_CMD (regi_dma8, regk_dma_set_w_size4); |
| 1977 | DMA_WR_CMD (regi_dma9, regk_dma_set_w_size4); |
| 1978 | |
| 1979 | /* Enable interrupts. */ |
| 1980 | REG_WR(dma, regi_dma9, rw_intr_mask, intr_mask_in); |
| 1981 | |
| 1982 | /* Clear intr ack. */ |
| 1983 | REG_WR(dma, regi_dma9, rw_ack_intr, ack_intr); |
| 1984 | |
| 1985 | local_irq_restore(flags); |
| 1986 | |
| 1987 | return 0; |
| 1988 | } |
| 1989 | |
| 1990 | /* Free used cryptocop hw resources (interrupt and DMA channels). */ |
| 1991 | static void release_cryptocop(void) |
| 1992 | { |
| 1993 | unsigned long flags; |
| 1994 | reg_intr_vect_rw_mask intr_mask; |
| 1995 | reg_dma_rw_cfg dma_cfg = {.en = 0}; |
| 1996 | reg_dma_rw_intr_mask intr_mask_in = {0}; |
| 1997 | reg_dma_rw_ack_intr ack_intr = {.data = 1,.in_eop = 1 }; |
| 1998 | |
| 1999 | local_irq_save(flags); |
| 2000 | |
| 2001 | /* Clear intr ack. */ |
| 2002 | REG_WR(dma, regi_dma9, rw_ack_intr, ack_intr); |
| 2003 | |
| 2004 | /* Disable DMA9 interrupt */ |
| 2005 | intr_mask = REG_RD(intr_vect, regi_irq, rw_mask); |
| 2006 | intr_mask.dma9 = 0; |
| 2007 | REG_WR(intr_vect, regi_irq, rw_mask, intr_mask); |
| 2008 | |
| 2009 | /* Disable DMAs. */ |
| 2010 | REG_WR(dma, regi_dma9, rw_cfg, dma_cfg); /* input DMA */ |
| 2011 | REG_WR(dma, regi_dma8, rw_cfg, dma_cfg); /* output DMA */ |
| 2012 | |
| 2013 | /* Disable interrupts. */ |
| 2014 | REG_WR(dma, regi_dma9, rw_intr_mask, intr_mask_in); |
| 2015 | |
| 2016 | local_irq_restore(flags); |
| 2017 | |
| 2018 | free_irq(DMA9_INTR_VECT, NULL); |
| 2019 | |
| 2020 | (void)crisv32_free_dma(8); |
| 2021 | (void)crisv32_free_dma(9); |
| 2022 | } |
| 2023 | |
| 2024 | |
| 2025 | /* Init job queue. */ |
| 2026 | static int cryptocop_job_queue_init(void) |
| 2027 | { |
| 2028 | int i; |
| 2029 | |
| 2030 | INIT_LIST_HEAD(&cryptocop_completed_jobs); |
| 2031 | |
| 2032 | for (i = 0; i < cryptocop_prio_no_prios; i++){ |
| 2033 | cryptocop_job_queues[i].prio = (cryptocop_queue_priority)i; |
| 2034 | INIT_LIST_HEAD(&cryptocop_job_queues[i].jobs); |
| 2035 | } |
| 2036 | return 0; |
| 2037 | } |
| 2038 | |
| 2039 | |
| 2040 | static void cryptocop_job_queue_close(void) |
| 2041 | { |
| 2042 | struct list_head *node, *tmp; |
| 2043 | struct cryptocop_prio_job *pj = NULL; |
| 2044 | unsigned long int process_flags, flags; |
| 2045 | int i; |
| 2046 | |
| 2047 | /* FIXME: This is as yet untested code. */ |
| 2048 | |
| 2049 | /* Stop strcop from getting an operation to process while we are closing the |
| 2050 | module. */ |
| 2051 | spin_lock_irqsave(&cryptocop_process_lock, process_flags); |
| 2052 | |
| 2053 | /* Empty the job queue. */ |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 2054 | for (i = 0; i < cryptocop_prio_no_prios; i++){ |
| 2055 | if (!list_empty(&(cryptocop_job_queues[i].jobs))){ |
| 2056 | list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) { |
| 2057 | pj = list_entry(node, struct cryptocop_prio_job, node); |
| 2058 | list_del(node); |
| 2059 | |
| 2060 | /* Call callback to notify consumer of job removal. */ |
| 2061 | DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data)); |
| 2062 | pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */ |
| 2063 | pj->oper->cb(pj->oper, pj->oper->cb_data); |
| 2064 | |
| 2065 | delete_internal_operation(pj->iop); |
| 2066 | kfree(pj); |
| 2067 | } |
| 2068 | } |
| 2069 | } |
| 2070 | spin_unlock_irqrestore(&cryptocop_process_lock, process_flags); |
| 2071 | |
| 2072 | /* Remove the running job, if any. */ |
| 2073 | spin_lock_irqsave(&running_job_lock, flags); |
| 2074 | if (cryptocop_running_job){ |
| 2075 | reg_strcop_rw_cfg rw_cfg; |
| 2076 | reg_dma_rw_cfg dma_out_cfg, dma_in_cfg; |
| 2077 | |
| 2078 | /* Stop DMA. */ |
| 2079 | dma_out_cfg = REG_RD(dma, regi_dma8, rw_cfg); |
| 2080 | dma_out_cfg.en = regk_dma_no; |
| 2081 | REG_WR(dma, regi_dma8, rw_cfg, dma_out_cfg); |
| 2082 | |
| 2083 | dma_in_cfg = REG_RD(dma, regi_dma9, rw_cfg); |
| 2084 | dma_in_cfg.en = regk_dma_no; |
| 2085 | REG_WR(dma, regi_dma9, rw_cfg, dma_in_cfg); |
| 2086 | |
| 2087 | /* Disble the cryptocop. */ |
| 2088 | rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg); |
| 2089 | rw_cfg.en = 0; |
| 2090 | REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg); |
| 2091 | |
| 2092 | pj = cryptocop_running_job; |
| 2093 | cryptocop_running_job = NULL; |
| 2094 | |
| 2095 | /* Call callback to notify consumer of job removal. */ |
| 2096 | DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data)); |
| 2097 | pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */ |
| 2098 | pj->oper->cb(pj->oper, pj->oper->cb_data); |
| 2099 | |
| 2100 | delete_internal_operation(pj->iop); |
| 2101 | kfree(pj); |
| 2102 | } |
| 2103 | spin_unlock_irqrestore(&running_job_lock, flags); |
| 2104 | |
| 2105 | /* Remove completed jobs, if any. */ |
| 2106 | spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags); |
| 2107 | |
| 2108 | list_for_each_safe(node, tmp, &cryptocop_completed_jobs) { |
| 2109 | pj = list_entry(node, struct cryptocop_prio_job, node); |
| 2110 | list_del(node); |
| 2111 | /* Call callback to notify consumer of job removal. */ |
| 2112 | DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data)); |
| 2113 | pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */ |
| 2114 | pj->oper->cb(pj->oper, pj->oper->cb_data); |
| 2115 | |
| 2116 | delete_internal_operation(pj->iop); |
| 2117 | kfree(pj); |
| 2118 | } |
| 2119 | spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags); |
| 2120 | } |
| 2121 | |
| 2122 | |
| 2123 | static void cryptocop_start_job(void) |
| 2124 | { |
| 2125 | int i; |
| 2126 | struct cryptocop_prio_job *pj; |
| 2127 | unsigned long int flags; |
| 2128 | unsigned long int running_job_flags; |
| 2129 | reg_strcop_rw_cfg rw_cfg = {.en = 1, .ignore_sync = 0}; |
| 2130 | |
| 2131 | DEBUG(printk("cryptocop_start_job: entering\n")); |
| 2132 | |
| 2133 | spin_lock_irqsave(&running_job_lock, running_job_flags); |
| 2134 | if (cryptocop_running_job != NULL){ |
| 2135 | /* Already running. */ |
| 2136 | DEBUG(printk("cryptocop_start_job: already running, exit\n")); |
| 2137 | spin_unlock_irqrestore(&running_job_lock, running_job_flags); |
| 2138 | return; |
| 2139 | } |
| 2140 | spin_lock_irqsave(&cryptocop_job_queue_lock, flags); |
| 2141 | |
| 2142 | /* Check the queues in priority order. */ |
| 2143 | for (i = cryptocop_prio_kernel_csum; (i < cryptocop_prio_no_prios) && list_empty(&cryptocop_job_queues[i].jobs); i++); |
| 2144 | if (i == cryptocop_prio_no_prios) { |
| 2145 | spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags); |
| 2146 | spin_unlock_irqrestore(&running_job_lock, running_job_flags); |
| 2147 | DEBUG(printk("cryptocop_start_job: no jobs to run\n")); |
| 2148 | return; /* No jobs to run */ |
| 2149 | } |
| 2150 | DEBUG(printk("starting job for prio %d\n", i)); |
| 2151 | |
| 2152 | /* TODO: Do not starve lower priority jobs. Let in a lower |
| 2153 | * prio job for every N-th processed higher prio job or some |
| 2154 | * other scheduling policy. This could reasonably be |
| 2155 | * tweakable since the optimal balance would depend on the |
| 2156 | * type of load on the system. */ |
| 2157 | |
| 2158 | /* Pull the DMA lists from the job and start the DMA client. */ |
| 2159 | pj = list_entry(cryptocop_job_queues[i].jobs.next, struct cryptocop_prio_job, node); |
| 2160 | list_del(&pj->node); |
| 2161 | spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags); |
| 2162 | cryptocop_running_job = pj; |
| 2163 | |
| 2164 | /* Set config register (3DES and CSUM modes). */ |
| 2165 | switch (pj->iop->tdes_mode){ |
| 2166 | case cryptocop_3des_eee: |
| 2167 | rw_cfg.td1 = regk_strcop_e; |
| 2168 | rw_cfg.td2 = regk_strcop_e; |
| 2169 | rw_cfg.td3 = regk_strcop_e; |
| 2170 | break; |
| 2171 | case cryptocop_3des_eed: |
| 2172 | rw_cfg.td1 = regk_strcop_e; |
| 2173 | rw_cfg.td2 = regk_strcop_e; |
| 2174 | rw_cfg.td3 = regk_strcop_d; |
| 2175 | break; |
| 2176 | case cryptocop_3des_ede: |
| 2177 | rw_cfg.td1 = regk_strcop_e; |
| 2178 | rw_cfg.td2 = regk_strcop_d; |
| 2179 | rw_cfg.td3 = regk_strcop_e; |
| 2180 | break; |
| 2181 | case cryptocop_3des_edd: |
| 2182 | rw_cfg.td1 = regk_strcop_e; |
| 2183 | rw_cfg.td2 = regk_strcop_d; |
| 2184 | rw_cfg.td3 = regk_strcop_d; |
| 2185 | break; |
| 2186 | case cryptocop_3des_dee: |
| 2187 | rw_cfg.td1 = regk_strcop_d; |
| 2188 | rw_cfg.td2 = regk_strcop_e; |
| 2189 | rw_cfg.td3 = regk_strcop_e; |
| 2190 | break; |
| 2191 | case cryptocop_3des_ded: |
| 2192 | rw_cfg.td1 = regk_strcop_d; |
| 2193 | rw_cfg.td2 = regk_strcop_e; |
| 2194 | rw_cfg.td3 = regk_strcop_d; |
| 2195 | break; |
| 2196 | case cryptocop_3des_dde: |
| 2197 | rw_cfg.td1 = regk_strcop_d; |
| 2198 | rw_cfg.td2 = regk_strcop_d; |
| 2199 | rw_cfg.td3 = regk_strcop_e; |
| 2200 | break; |
| 2201 | case cryptocop_3des_ddd: |
| 2202 | rw_cfg.td1 = regk_strcop_d; |
| 2203 | rw_cfg.td2 = regk_strcop_d; |
| 2204 | rw_cfg.td3 = regk_strcop_d; |
| 2205 | break; |
| 2206 | default: |
| 2207 | DEBUG(printk("cryptocop_setup_dma_list: bad 3DES mode\n")); |
| 2208 | } |
| 2209 | switch (pj->iop->csum_mode){ |
| 2210 | case cryptocop_csum_le: |
| 2211 | rw_cfg.ipend = regk_strcop_little; |
| 2212 | break; |
| 2213 | case cryptocop_csum_be: |
| 2214 | rw_cfg.ipend = regk_strcop_big; |
| 2215 | break; |
| 2216 | default: |
| 2217 | DEBUG(printk("cryptocop_setup_dma_list: bad checksum mode\n")); |
| 2218 | } |
| 2219 | REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg); |
| 2220 | |
| 2221 | DEBUG(printk("cryptocop_start_job: starting DMA, new cryptocop_running_job=0x%p\n" |
| 2222 | "ctx_in: 0x%p, phys: 0x%p\n" |
| 2223 | "ctx_out: 0x%p, phys: 0x%p\n", |
| 2224 | pj, |
| 2225 | &pj->iop->ctx_in, (char*)virt_to_phys(&pj->iop->ctx_in), |
| 2226 | &pj->iop->ctx_out, (char*)virt_to_phys(&pj->iop->ctx_out))); |
| 2227 | |
| 2228 | /* Start input DMA. */ |
| 2229 | DMA_START_CONTEXT(regi_dma9, virt_to_phys(&pj->iop->ctx_in)); |
| 2230 | |
| 2231 | /* Start output DMA. */ |
| 2232 | DMA_START_CONTEXT(regi_dma8, virt_to_phys(&pj->iop->ctx_out)); |
| 2233 | |
| 2234 | spin_unlock_irqrestore(&running_job_lock, running_job_flags); |
| 2235 | DEBUG(printk("cryptocop_start_job: exiting\n")); |
| 2236 | } |
| 2237 | |
| 2238 | |
| 2239 | static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation) |
| 2240 | { |
| 2241 | int err; |
| 2242 | int alloc_flag = operation->in_interrupt ? GFP_ATOMIC : GFP_KERNEL; |
| 2243 | void *iop_alloc_ptr = NULL; |
| 2244 | |
| 2245 | *pj = kmalloc(sizeof (struct cryptocop_prio_job), alloc_flag); |
| 2246 | if (!*pj) return -ENOMEM; |
| 2247 | |
| 2248 | DEBUG(printk("cryptocop_job_setup: operation=0x%p\n", operation)); |
| 2249 | |
| 2250 | (*pj)->oper = operation; |
| 2251 | DEBUG(printk("cryptocop_job_setup, cb=0x%p cb_data=0x%p\n", (*pj)->oper->cb, (*pj)->oper->cb_data)); |
| 2252 | |
| 2253 | if (operation->use_dmalists) { |
| 2254 | DEBUG(print_user_dma_lists(&operation->list_op)); |
| 2255 | if (!operation->list_op.inlist || !operation->list_op.outlist || !operation->list_op.out_data_buf || !operation->list_op.in_data_buf){ |
| 2256 | DEBUG_API(printk("cryptocop_job_setup: bad indata (use_dmalists)\n")); |
| 2257 | kfree(*pj); |
| 2258 | return -EINVAL; |
| 2259 | } |
| 2260 | iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag); |
| 2261 | if (!iop_alloc_ptr) { |
| 2262 | DEBUG_API(printk("cryptocop_job_setup: kmalloc cryptocop_int_operation\n")); |
| 2263 | kfree(*pj); |
| 2264 | return -ENOMEM; |
| 2265 | } |
| 2266 | (*pj)->iop = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out)); |
| 2267 | DEBUG(memset((*pj)->iop, 0xff, sizeof(struct cryptocop_int_operation))); |
| 2268 | (*pj)->iop->alloc_ptr = iop_alloc_ptr; |
| 2269 | (*pj)->iop->sid = operation->sid; |
| 2270 | (*pj)->iop->cdesc_out = NULL; |
| 2271 | (*pj)->iop->cdesc_in = NULL; |
| 2272 | (*pj)->iop->tdes_mode = operation->list_op.tdes_mode; |
| 2273 | (*pj)->iop->csum_mode = operation->list_op.csum_mode; |
| 2274 | (*pj)->iop->ddesc_out = operation->list_op.outlist; |
| 2275 | (*pj)->iop->ddesc_in = operation->list_op.inlist; |
| 2276 | |
| 2277 | /* Setup DMA contexts. */ |
| 2278 | (*pj)->iop->ctx_out.next = NULL; |
| 2279 | (*pj)->iop->ctx_out.eol = 1; |
| 2280 | (*pj)->iop->ctx_out.saved_data = operation->list_op.outlist; |
| 2281 | (*pj)->iop->ctx_out.saved_data_buf = operation->list_op.out_data_buf; |
| 2282 | |
| 2283 | (*pj)->iop->ctx_in.next = NULL; |
| 2284 | (*pj)->iop->ctx_in.eol = 1; |
| 2285 | (*pj)->iop->ctx_in.saved_data = operation->list_op.inlist; |
| 2286 | (*pj)->iop->ctx_in.saved_data_buf = operation->list_op.in_data_buf; |
| 2287 | } else { |
| 2288 | if ((err = cryptocop_setup_dma_list(operation, &(*pj)->iop, alloc_flag))) { |
| 2289 | DEBUG_API(printk("cryptocop_job_setup: cryptocop_setup_dma_list failed %d\n", err)); |
| 2290 | kfree(*pj); |
| 2291 | return err; |
| 2292 | } |
| 2293 | } |
| 2294 | DEBUG(print_dma_descriptors((*pj)->iop)); |
| 2295 | |
| 2296 | DEBUG(printk("cryptocop_job_setup, DMA list setup successful\n")); |
| 2297 | |
| 2298 | return 0; |
| 2299 | } |
| 2300 | |
| 2301 | |
| 2302 | static int cryptocop_open(struct inode *inode, struct file *filp) |
| 2303 | { |
Eric Sesterhenn | 32ea086 | 2006-07-10 04:45:02 -0700 | [diff] [blame] | 2304 | int p = iminor(inode); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 2305 | |
| 2306 | if (p != CRYPTOCOP_MINOR) return -EINVAL; |
| 2307 | |
| 2308 | filp->private_data = NULL; |
| 2309 | return 0; |
| 2310 | } |
| 2311 | |
| 2312 | |
| 2313 | static int cryptocop_release(struct inode *inode, struct file *filp) |
| 2314 | { |
| 2315 | struct cryptocop_private *dev = filp->private_data; |
| 2316 | struct cryptocop_private *dev_next; |
| 2317 | |
| 2318 | while (dev){ |
| 2319 | dev_next = dev->next; |
| 2320 | if (dev->sid != CRYPTOCOP_SESSION_ID_NONE) { |
| 2321 | (void)cryptocop_free_session(dev->sid); |
| 2322 | } |
| 2323 | kfree(dev); |
| 2324 | dev = dev_next; |
| 2325 | } |
| 2326 | |
| 2327 | return 0; |
| 2328 | } |
| 2329 | |
| 2330 | |
| 2331 | static int cryptocop_ioctl_close_session(struct inode *inode, struct file *filp, |
| 2332 | unsigned int cmd, unsigned long arg) |
| 2333 | { |
| 2334 | struct cryptocop_private *dev = filp->private_data; |
| 2335 | struct cryptocop_private *prev_dev = NULL; |
| 2336 | struct strcop_session_op *sess_op = (struct strcop_session_op *)arg; |
| 2337 | struct strcop_session_op sop; |
| 2338 | int err; |
| 2339 | |
| 2340 | DEBUG(printk("cryptocop_ioctl_close_session\n")); |
| 2341 | |
| 2342 | if (!access_ok(VERIFY_READ, sess_op, sizeof(struct strcop_session_op))) |
| 2343 | return -EFAULT; |
| 2344 | err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op)); |
| 2345 | if (err) return -EFAULT; |
| 2346 | |
| 2347 | while (dev && (dev->sid != sop.ses_id)) { |
| 2348 | prev_dev = dev; |
| 2349 | dev = dev->next; |
| 2350 | } |
| 2351 | if (dev){ |
| 2352 | if (prev_dev){ |
| 2353 | prev_dev->next = dev->next; |
| 2354 | } else { |
| 2355 | filp->private_data = dev->next; |
| 2356 | } |
| 2357 | err = cryptocop_free_session(dev->sid); |
| 2358 | if (err) return -EFAULT; |
| 2359 | } else { |
| 2360 | DEBUG_API(printk("cryptocop_ioctl_close_session: session %lld not found\n", sop.ses_id)); |
| 2361 | return -EINVAL; |
| 2362 | } |
| 2363 | return 0; |
| 2364 | } |
| 2365 | |
| 2366 | |
| 2367 | static void ioctl_process_job_callback(struct cryptocop_operation *op, void*cb_data) |
| 2368 | { |
| 2369 | struct ioctl_job_cb_ctx *jc = (struct ioctl_job_cb_ctx *)cb_data; |
| 2370 | |
| 2371 | DEBUG(printk("ioctl_process_job_callback: op=0x%p, cb_data=0x%p\n", op, cb_data)); |
| 2372 | |
| 2373 | jc->processed = 1; |
| 2374 | wake_up(&cryptocop_ioc_process_wq); |
| 2375 | } |
| 2376 | |
| 2377 | |
| 2378 | #define CRYPTOCOP_IOCTL_CIPHER_TID (1) |
| 2379 | #define CRYPTOCOP_IOCTL_DIGEST_TID (2) |
| 2380 | #define CRYPTOCOP_IOCTL_CSUM_TID (3) |
| 2381 | |
| 2382 | static size_t first_cfg_change_ix(struct strcop_crypto_op *crp_op) |
| 2383 | { |
| 2384 | size_t ch_ix = 0; |
| 2385 | |
| 2386 | if (crp_op->do_cipher) ch_ix = crp_op->cipher_start; |
| 2387 | if (crp_op->do_digest && (crp_op->digest_start < ch_ix)) ch_ix = crp_op->digest_start; |
| 2388 | if (crp_op->do_csum && (crp_op->csum_start < ch_ix)) ch_ix = crp_op->csum_start; |
| 2389 | |
| 2390 | DEBUG(printk("first_cfg_change_ix: ix=%d\n", ch_ix)); |
| 2391 | return ch_ix; |
| 2392 | } |
| 2393 | |
| 2394 | |
| 2395 | static size_t next_cfg_change_ix(struct strcop_crypto_op *crp_op, size_t ix) |
| 2396 | { |
| 2397 | size_t ch_ix = INT_MAX; |
| 2398 | size_t tmp_ix = 0; |
| 2399 | |
| 2400 | if (crp_op->do_cipher && ((crp_op->cipher_start + crp_op->cipher_len) > ix)){ |
| 2401 | if (crp_op->cipher_start > ix) { |
| 2402 | ch_ix = crp_op->cipher_start; |
| 2403 | } else { |
| 2404 | ch_ix = crp_op->cipher_start + crp_op->cipher_len; |
| 2405 | } |
| 2406 | } |
| 2407 | if (crp_op->do_digest && ((crp_op->digest_start + crp_op->digest_len) > ix)){ |
| 2408 | if (crp_op->digest_start > ix) { |
| 2409 | tmp_ix = crp_op->digest_start; |
| 2410 | } else { |
| 2411 | tmp_ix = crp_op->digest_start + crp_op->digest_len; |
| 2412 | } |
| 2413 | if (tmp_ix < ch_ix) ch_ix = tmp_ix; |
| 2414 | } |
| 2415 | if (crp_op->do_csum && ((crp_op->csum_start + crp_op->csum_len) > ix)){ |
| 2416 | if (crp_op->csum_start > ix) { |
| 2417 | tmp_ix = crp_op->csum_start; |
| 2418 | } else { |
| 2419 | tmp_ix = crp_op->csum_start + crp_op->csum_len; |
| 2420 | } |
| 2421 | if (tmp_ix < ch_ix) ch_ix = tmp_ix; |
| 2422 | } |
| 2423 | if (ch_ix == INT_MAX) ch_ix = ix; |
| 2424 | DEBUG(printk("next_cfg_change_ix prev ix=%d, next ix=%d\n", ix, ch_ix)); |
| 2425 | return ch_ix; |
| 2426 | } |
| 2427 | |
| 2428 | |
| 2429 | /* Map map_length bytes from the pages starting on *pageix and *pageoffset to iovecs starting on *iovix. |
| 2430 | * Return -1 for ok, 0 for fail. */ |
| 2431 | static int map_pages_to_iovec(struct iovec *iov, int iovlen, int *iovix, struct page **pages, int nopages, int *pageix, int *pageoffset, int map_length ) |
| 2432 | { |
| 2433 | int tmplen; |
| 2434 | |
| 2435 | assert(iov != NULL); |
| 2436 | assert(iovix != NULL); |
| 2437 | assert(pages != NULL); |
| 2438 | assert(pageix != NULL); |
| 2439 | assert(pageoffset != NULL); |
| 2440 | |
| 2441 | DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset)); |
| 2442 | |
| 2443 | while (map_length > 0){ |
| 2444 | DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset)); |
| 2445 | if (*iovix >= iovlen){ |
| 2446 | DEBUG_API(printk("map_page_to_iovec: *iovix=%d >= iovlen=%d\n", *iovix, iovlen)); |
| 2447 | return 0; |
| 2448 | } |
| 2449 | if (*pageix >= nopages){ |
| 2450 | DEBUG_API(printk("map_page_to_iovec: *pageix=%d >= nopages=%d\n", *pageix, nopages)); |
| 2451 | return 0; |
| 2452 | } |
| 2453 | iov[*iovix].iov_base = (unsigned char*)page_address(pages[*pageix]) + *pageoffset; |
| 2454 | tmplen = PAGE_SIZE - *pageoffset; |
| 2455 | if (tmplen < map_length){ |
| 2456 | (*pageoffset) = 0; |
| 2457 | (*pageix)++; |
| 2458 | } else { |
| 2459 | tmplen = map_length; |
| 2460 | (*pageoffset) += map_length; |
| 2461 | } |
| 2462 | DEBUG(printk("mapping %d bytes from page %d (or %d) to iovec %d\n", tmplen, *pageix, *pageix-1, *iovix)); |
| 2463 | iov[*iovix].iov_len = tmplen; |
| 2464 | map_length -= tmplen; |
| 2465 | (*iovix)++; |
| 2466 | } |
| 2467 | DEBUG(printk("map_page_to_iovec, exit, *iovix=%d\n", *iovix)); |
| 2468 | return -1; |
| 2469 | } |
| 2470 | |
| 2471 | |
| 2472 | |
| 2473 | static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) |
| 2474 | { |
| 2475 | int i; |
| 2476 | struct cryptocop_private *dev = filp->private_data; |
| 2477 | struct strcop_crypto_op *crp_oper = (struct strcop_crypto_op *)arg; |
| 2478 | struct strcop_crypto_op oper = {0}; |
| 2479 | int err = 0; |
| 2480 | struct cryptocop_operation *cop = NULL; |
| 2481 | |
| 2482 | struct ioctl_job_cb_ctx *jc = NULL; |
| 2483 | |
| 2484 | struct page **inpages = NULL; |
| 2485 | struct page **outpages = NULL; |
| 2486 | int noinpages = 0; |
| 2487 | int nooutpages = 0; |
| 2488 | |
| 2489 | struct cryptocop_desc descs[5]; /* Max 5 descriptors are needed, there are three transforms that |
| 2490 | * can get connected/disconnected on different places in the indata. */ |
| 2491 | struct cryptocop_desc_cfg dcfgs[5*3]; |
| 2492 | int desc_ix = 0; |
| 2493 | int dcfg_ix = 0; |
| 2494 | struct cryptocop_tfrm_cfg ciph_tcfg = {0}; |
| 2495 | struct cryptocop_tfrm_cfg digest_tcfg = {0}; |
| 2496 | struct cryptocop_tfrm_cfg csum_tcfg = {0}; |
| 2497 | |
| 2498 | unsigned char *digest_result = NULL; |
| 2499 | int digest_length = 0; |
| 2500 | int cblocklen = 0; |
| 2501 | unsigned char csum_result[CSUM_BLOCK_LENGTH]; |
| 2502 | struct cryptocop_session *sess; |
| 2503 | |
| 2504 | int iovlen = 0; |
| 2505 | int iovix = 0; |
| 2506 | int pageix = 0; |
| 2507 | int pageoffset = 0; |
| 2508 | |
| 2509 | size_t prev_ix = 0; |
| 2510 | size_t next_ix; |
| 2511 | |
| 2512 | int cipher_active, digest_active, csum_active; |
| 2513 | int end_digest, end_csum; |
| 2514 | int digest_done = 0; |
| 2515 | int cipher_done = 0; |
| 2516 | int csum_done = 0; |
| 2517 | |
| 2518 | DEBUG(printk("cryptocop_ioctl_process\n")); |
| 2519 | |
| 2520 | if (!access_ok(VERIFY_WRITE, crp_oper, sizeof(struct strcop_crypto_op))){ |
| 2521 | DEBUG_API(printk("cryptocop_ioctl_process: !access_ok crp_oper!\n")); |
| 2522 | return -EFAULT; |
| 2523 | } |
| 2524 | if (copy_from_user(&oper, crp_oper, sizeof(struct strcop_crypto_op))) { |
| 2525 | DEBUG_API(printk("cryptocop_ioctl_process: copy_from_user\n")); |
| 2526 | return -EFAULT; |
| 2527 | } |
| 2528 | DEBUG(print_strcop_crypto_op(&oper)); |
| 2529 | |
| 2530 | while (dev && dev->sid != oper.ses_id) dev = dev->next; |
| 2531 | if (!dev){ |
| 2532 | DEBUG_API(printk("cryptocop_ioctl_process: session %lld not found\n", oper.ses_id)); |
| 2533 | return -EINVAL; |
| 2534 | } |
| 2535 | |
| 2536 | /* Check buffers. */ |
| 2537 | if (((oper.indata + oper.inlen) < oper.indata) || ((oper.cipher_outdata + oper.cipher_outlen) < oper.cipher_outdata)){ |
| 2538 | DEBUG_API(printk("cryptocop_ioctl_process: user buffers wrapped around, bad user!\n")); |
| 2539 | return -EINVAL; |
| 2540 | } |
| 2541 | |
| 2542 | if (!access_ok(VERIFY_WRITE, oper.cipher_outdata, oper.cipher_outlen)){ |
| 2543 | DEBUG_API(printk("cryptocop_ioctl_process: !access_ok out data!\n")); |
| 2544 | return -EFAULT; |
| 2545 | } |
| 2546 | if (!access_ok(VERIFY_READ, oper.indata, oper.inlen)){ |
| 2547 | DEBUG_API(printk("cryptocop_ioctl_process: !access_ok in data!\n")); |
| 2548 | return -EFAULT; |
| 2549 | } |
| 2550 | |
| 2551 | cop = kmalloc(sizeof(struct cryptocop_operation), GFP_KERNEL); |
| 2552 | if (!cop) { |
| 2553 | DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n")); |
| 2554 | return -ENOMEM; |
| 2555 | } |
| 2556 | jc = kmalloc(sizeof(struct ioctl_job_cb_ctx), GFP_KERNEL); |
| 2557 | if (!jc) { |
| 2558 | DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n")); |
| 2559 | err = -ENOMEM; |
| 2560 | goto error_cleanup; |
| 2561 | } |
| 2562 | jc->processed = 0; |
| 2563 | |
| 2564 | cop->cb_data = jc; |
| 2565 | cop->cb = ioctl_process_job_callback; |
| 2566 | cop->operation_status = 0; |
| 2567 | cop->use_dmalists = 0; |
| 2568 | cop->in_interrupt = 0; |
| 2569 | cop->fast_callback = 0; |
| 2570 | cop->tfrm_op.tfrm_cfg = NULL; |
| 2571 | cop->tfrm_op.desc = NULL; |
| 2572 | cop->tfrm_op.indata = NULL; |
| 2573 | cop->tfrm_op.incount = 0; |
| 2574 | cop->tfrm_op.inlen = 0; |
| 2575 | cop->tfrm_op.outdata = NULL; |
| 2576 | cop->tfrm_op.outcount = 0; |
| 2577 | cop->tfrm_op.outlen = 0; |
| 2578 | |
| 2579 | sess = get_session(oper.ses_id); |
| 2580 | if (!sess){ |
| 2581 | DEBUG_API(printk("cryptocop_ioctl_process: bad session id.\n")); |
| 2582 | kfree(cop); |
| 2583 | kfree(jc); |
| 2584 | return -EINVAL; |
| 2585 | } |
| 2586 | |
| 2587 | if (oper.do_cipher) { |
| 2588 | unsigned int cipher_outlen = 0; |
| 2589 | struct cryptocop_transform_ctx *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_CIPHER_TID); |
| 2590 | if (!tc) { |
| 2591 | DEBUG_API(printk("cryptocop_ioctl_process: no cipher transform in session.\n")); |
| 2592 | err = -EINVAL; |
| 2593 | goto error_cleanup; |
| 2594 | } |
| 2595 | ciph_tcfg.tid = CRYPTOCOP_IOCTL_CIPHER_TID; |
| 2596 | ciph_tcfg.inject_ix = 0; |
| 2597 | ciph_tcfg.flags = 0; |
| 2598 | if ((oper.cipher_start < 0) || (oper.cipher_len <= 0) || (oper.cipher_start > oper.inlen) || ((oper.cipher_start + oper.cipher_len) > oper.inlen)){ |
| 2599 | DEBUG_API(printk("cryptocop_ioctl_process: bad cipher length\n")); |
| 2600 | kfree(cop); |
| 2601 | kfree(jc); |
| 2602 | return -EINVAL; |
| 2603 | } |
| 2604 | cblocklen = tc->init.alg == cryptocop_alg_aes ? AES_BLOCK_LENGTH : DES_BLOCK_LENGTH; |
| 2605 | if (oper.cipher_len % cblocklen) { |
| 2606 | kfree(cop); |
| 2607 | kfree(jc); |
| 2608 | DEBUG_API(printk("cryptocop_ioctl_process: cipher inlength not multiple of block length.\n")); |
| 2609 | return -EINVAL; |
| 2610 | } |
| 2611 | cipher_outlen = oper.cipher_len; |
| 2612 | if (tc->init.cipher_mode == cryptocop_cipher_mode_cbc){ |
| 2613 | if (oper.cipher_explicit) { |
| 2614 | ciph_tcfg.flags |= CRYPTOCOP_EXPLICIT_IV; |
| 2615 | memcpy(ciph_tcfg.iv, oper.cipher_iv, cblocklen); |
| 2616 | } else { |
| 2617 | cipher_outlen = oper.cipher_len - cblocklen; |
| 2618 | } |
| 2619 | } else { |
| 2620 | if (oper.cipher_explicit){ |
| 2621 | kfree(cop); |
| 2622 | kfree(jc); |
| 2623 | DEBUG_API(printk("cryptocop_ioctl_process: explicit_iv when not CBC mode\n")); |
| 2624 | return -EINVAL; |
| 2625 | } |
| 2626 | } |
| 2627 | if (oper.cipher_outlen != cipher_outlen) { |
| 2628 | kfree(cop); |
| 2629 | kfree(jc); |
| 2630 | DEBUG_API(printk("cryptocop_ioctl_process: cipher_outlen incorrect, should be %d not %d.\n", cipher_outlen, oper.cipher_outlen)); |
| 2631 | return -EINVAL; |
| 2632 | } |
| 2633 | |
| 2634 | if (oper.decrypt){ |
| 2635 | ciph_tcfg.flags |= CRYPTOCOP_DECRYPT; |
| 2636 | } else { |
| 2637 | ciph_tcfg.flags |= CRYPTOCOP_ENCRYPT; |
| 2638 | } |
| 2639 | ciph_tcfg.next = cop->tfrm_op.tfrm_cfg; |
| 2640 | cop->tfrm_op.tfrm_cfg = &ciph_tcfg; |
| 2641 | } |
| 2642 | if (oper.do_digest){ |
| 2643 | struct cryptocop_transform_ctx *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_DIGEST_TID); |
| 2644 | if (!tc) { |
| 2645 | DEBUG_API(printk("cryptocop_ioctl_process: no digest transform in session.\n")); |
| 2646 | err = -EINVAL; |
| 2647 | goto error_cleanup; |
| 2648 | } |
| 2649 | digest_length = tc->init.alg == cryptocop_alg_md5 ? 16 : 20; |
| 2650 | digest_result = kmalloc(digest_length, GFP_KERNEL); |
| 2651 | if (!digest_result) { |
| 2652 | DEBUG_API(printk("cryptocop_ioctl_process: kmalloc digest_result\n")); |
| 2653 | err = -EINVAL; |
| 2654 | goto error_cleanup; |
| 2655 | } |
| 2656 | DEBUG(memset(digest_result, 0xff, digest_length)); |
| 2657 | |
| 2658 | digest_tcfg.tid = CRYPTOCOP_IOCTL_DIGEST_TID; |
| 2659 | digest_tcfg.inject_ix = 0; |
| 2660 | ciph_tcfg.inject_ix += digest_length; |
| 2661 | if ((oper.digest_start < 0) || (oper.digest_len <= 0) || (oper.digest_start > oper.inlen) || ((oper.digest_start + oper.digest_len) > oper.inlen)){ |
| 2662 | DEBUG_API(printk("cryptocop_ioctl_process: bad digest length\n")); |
| 2663 | err = -EINVAL; |
| 2664 | goto error_cleanup; |
| 2665 | } |
| 2666 | |
| 2667 | digest_tcfg.next = cop->tfrm_op.tfrm_cfg; |
| 2668 | cop->tfrm_op.tfrm_cfg = &digest_tcfg; |
| 2669 | } |
| 2670 | if (oper.do_csum){ |
| 2671 | csum_tcfg.tid = CRYPTOCOP_IOCTL_CSUM_TID; |
| 2672 | csum_tcfg.inject_ix = digest_length; |
| 2673 | ciph_tcfg.inject_ix += 2; |
| 2674 | |
| 2675 | if ((oper.csum_start < 0) || (oper.csum_len <= 0) || (oper.csum_start > oper.inlen) || ((oper.csum_start + oper.csum_len) > oper.inlen)){ |
| 2676 | DEBUG_API(printk("cryptocop_ioctl_process: bad csum length\n")); |
| 2677 | kfree(cop); |
| 2678 | kfree(jc); |
| 2679 | return -EINVAL; |
| 2680 | } |
| 2681 | |
| 2682 | csum_tcfg.next = cop->tfrm_op.tfrm_cfg; |
| 2683 | cop->tfrm_op.tfrm_cfg = &csum_tcfg; |
| 2684 | } |
| 2685 | |
| 2686 | prev_ix = first_cfg_change_ix(&oper); |
| 2687 | if (prev_ix > oper.inlen) { |
| 2688 | DEBUG_API(printk("cryptocop_ioctl_process: length mismatch\n")); |
| 2689 | nooutpages = noinpages = 0; |
| 2690 | err = -EINVAL; |
| 2691 | goto error_cleanup; |
| 2692 | } |
| 2693 | DEBUG(printk("cryptocop_ioctl_process: inlen=%d, cipher_outlen=%d\n", oper.inlen, oper.cipher_outlen)); |
| 2694 | |
| 2695 | /* Map user pages for in and out data of the operation. */ |
| 2696 | noinpages = (((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK) + oper.inlen - 1 - prev_ix + ~PAGE_MASK) >> PAGE_SHIFT; |
| 2697 | DEBUG(printk("cryptocop_ioctl_process: noinpages=%d\n", noinpages)); |
| 2698 | inpages = kmalloc(noinpages * sizeof(struct page*), GFP_KERNEL); |
| 2699 | if (!inpages){ |
| 2700 | DEBUG_API(printk("cryptocop_ioctl_process: kmalloc inpages\n")); |
| 2701 | nooutpages = noinpages = 0; |
| 2702 | err = -ENOMEM; |
| 2703 | goto error_cleanup; |
| 2704 | } |
| 2705 | if (oper.do_cipher){ |
| 2706 | nooutpages = (((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) + oper.cipher_outlen - 1 + ~PAGE_MASK) >> PAGE_SHIFT; |
| 2707 | DEBUG(printk("cryptocop_ioctl_process: nooutpages=%d\n", nooutpages)); |
| 2708 | outpages = kmalloc(nooutpages * sizeof(struct page*), GFP_KERNEL); |
| 2709 | if (!outpages){ |
| 2710 | DEBUG_API(printk("cryptocop_ioctl_process: kmalloc outpages\n")); |
| 2711 | nooutpages = noinpages = 0; |
| 2712 | err = -ENOMEM; |
| 2713 | goto error_cleanup; |
| 2714 | } |
| 2715 | } |
| 2716 | |
| 2717 | /* Acquire the mm page semaphore. */ |
| 2718 | down_read(¤t->mm->mmap_sem); |
| 2719 | |
| 2720 | err = get_user_pages(current, |
| 2721 | current->mm, |
| 2722 | (unsigned long int)(oper.indata + prev_ix), |
| 2723 | noinpages, |
| 2724 | 0, /* read access only for in data */ |
| 2725 | 0, /* no force */ |
| 2726 | inpages, |
| 2727 | NULL); |
| 2728 | |
| 2729 | if (err < 0) { |
| 2730 | up_read(¤t->mm->mmap_sem); |
| 2731 | nooutpages = noinpages = 0; |
| 2732 | DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n")); |
| 2733 | goto error_cleanup; |
| 2734 | } |
| 2735 | noinpages = err; |
| 2736 | if (oper.do_cipher){ |
| 2737 | err = get_user_pages(current, |
| 2738 | current->mm, |
| 2739 | (unsigned long int)oper.cipher_outdata, |
| 2740 | nooutpages, |
| 2741 | 1, /* write access for out data */ |
| 2742 | 0, /* no force */ |
| 2743 | outpages, |
| 2744 | NULL); |
| 2745 | up_read(¤t->mm->mmap_sem); |
| 2746 | if (err < 0) { |
| 2747 | nooutpages = 0; |
| 2748 | DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n")); |
| 2749 | goto error_cleanup; |
| 2750 | } |
| 2751 | nooutpages = err; |
| 2752 | } else { |
| 2753 | up_read(¤t->mm->mmap_sem); |
| 2754 | } |
| 2755 | |
| 2756 | /* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and |
| 2757 | * csum output and splits when units are (dis-)connected. */ |
| 2758 | cop->tfrm_op.indata = kmalloc((noinpages) * sizeof(struct iovec), GFP_KERNEL); |
| 2759 | cop->tfrm_op.outdata = kmalloc((6 + nooutpages) * sizeof(struct iovec), GFP_KERNEL); |
| 2760 | if (!cop->tfrm_op.indata || !cop->tfrm_op.outdata) { |
| 2761 | DEBUG_API(printk("cryptocop_ioctl_process: kmalloc iovecs\n")); |
| 2762 | err = -ENOMEM; |
| 2763 | goto error_cleanup; |
| 2764 | } |
| 2765 | |
| 2766 | cop->tfrm_op.inlen = oper.inlen - prev_ix; |
| 2767 | cop->tfrm_op.outlen = 0; |
| 2768 | if (oper.do_cipher) cop->tfrm_op.outlen += oper.cipher_outlen; |
| 2769 | if (oper.do_digest) cop->tfrm_op.outlen += digest_length; |
| 2770 | if (oper.do_csum) cop->tfrm_op.outlen += 2; |
| 2771 | |
| 2772 | /* Setup the in iovecs. */ |
| 2773 | cop->tfrm_op.incount = noinpages; |
| 2774 | if (noinpages > 1){ |
| 2775 | size_t tmplen = cop->tfrm_op.inlen; |
| 2776 | |
| 2777 | cop->tfrm_op.indata[0].iov_len = PAGE_SIZE - ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK); |
| 2778 | cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK); |
| 2779 | tmplen -= cop->tfrm_op.indata[0].iov_len; |
| 2780 | for (i = 1; i<noinpages; i++){ |
| 2781 | cop->tfrm_op.indata[i].iov_len = tmplen < PAGE_SIZE ? tmplen : PAGE_SIZE; |
| 2782 | cop->tfrm_op.indata[i].iov_base = (unsigned char*)page_address(inpages[i]); |
| 2783 | tmplen -= PAGE_SIZE; |
| 2784 | } |
| 2785 | } else { |
| 2786 | cop->tfrm_op.indata[0].iov_len = oper.inlen - prev_ix; |
| 2787 | cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK); |
| 2788 | } |
| 2789 | |
| 2790 | iovlen = nooutpages + 6; |
| 2791 | pageoffset = oper.do_cipher ? ((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) : 0; |
| 2792 | |
| 2793 | next_ix = next_cfg_change_ix(&oper, prev_ix); |
| 2794 | if (prev_ix == next_ix){ |
| 2795 | DEBUG_API(printk("cryptocop_ioctl_process: length configuration broken.\n")); |
| 2796 | err = -EINVAL; /* This should be impossible barring bugs. */ |
| 2797 | goto error_cleanup; |
| 2798 | } |
| 2799 | while (prev_ix != next_ix){ |
| 2800 | end_digest = end_csum = cipher_active = digest_active = csum_active = 0; |
| 2801 | descs[desc_ix].cfg = NULL; |
| 2802 | descs[desc_ix].length = next_ix - prev_ix; |
| 2803 | |
| 2804 | if (oper.do_cipher && (oper.cipher_start < next_ix) && (prev_ix < (oper.cipher_start + oper.cipher_len))) { |
| 2805 | dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CIPHER_TID; |
| 2806 | dcfgs[dcfg_ix].src = cryptocop_source_dma; |
| 2807 | cipher_active = 1; |
| 2808 | |
| 2809 | if (next_ix == (oper.cipher_start + oper.cipher_len)){ |
| 2810 | cipher_done = 1; |
| 2811 | dcfgs[dcfg_ix].last = 1; |
| 2812 | } else { |
| 2813 | dcfgs[dcfg_ix].last = 0; |
| 2814 | } |
| 2815 | dcfgs[dcfg_ix].next = descs[desc_ix].cfg; |
| 2816 | descs[desc_ix].cfg = &dcfgs[dcfg_ix]; |
| 2817 | ++dcfg_ix; |
| 2818 | } |
| 2819 | if (oper.do_digest && (oper.digest_start < next_ix) && (prev_ix < (oper.digest_start + oper.digest_len))) { |
| 2820 | digest_active = 1; |
| 2821 | dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_DIGEST_TID; |
| 2822 | dcfgs[dcfg_ix].src = cryptocop_source_dma; |
| 2823 | if (next_ix == (oper.digest_start + oper.digest_len)){ |
| 2824 | assert(!digest_done); |
| 2825 | digest_done = 1; |
| 2826 | dcfgs[dcfg_ix].last = 1; |
| 2827 | } else { |
| 2828 | dcfgs[dcfg_ix].last = 0; |
| 2829 | } |
| 2830 | dcfgs[dcfg_ix].next = descs[desc_ix].cfg; |
| 2831 | descs[desc_ix].cfg = &dcfgs[dcfg_ix]; |
| 2832 | ++dcfg_ix; |
| 2833 | } |
| 2834 | if (oper.do_csum && (oper.csum_start < next_ix) && (prev_ix < (oper.csum_start + oper.csum_len))){ |
| 2835 | csum_active = 1; |
| 2836 | dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CSUM_TID; |
| 2837 | dcfgs[dcfg_ix].src = cryptocop_source_dma; |
| 2838 | if (next_ix == (oper.csum_start + oper.csum_len)){ |
| 2839 | csum_done = 1; |
| 2840 | dcfgs[dcfg_ix].last = 1; |
| 2841 | } else { |
| 2842 | dcfgs[dcfg_ix].last = 0; |
| 2843 | } |
| 2844 | dcfgs[dcfg_ix].next = descs[desc_ix].cfg; |
| 2845 | descs[desc_ix].cfg = &dcfgs[dcfg_ix]; |
| 2846 | ++dcfg_ix; |
| 2847 | } |
| 2848 | if (!descs[desc_ix].cfg){ |
| 2849 | DEBUG_API(printk("cryptocop_ioctl_process: data segment %d (%d to %d) had no active transforms\n", desc_ix, prev_ix, next_ix)); |
| 2850 | err = -EINVAL; |
| 2851 | goto error_cleanup; |
| 2852 | } |
| 2853 | descs[desc_ix].next = &(descs[desc_ix]) + 1; |
| 2854 | ++desc_ix; |
| 2855 | prev_ix = next_ix; |
| 2856 | next_ix = next_cfg_change_ix(&oper, prev_ix); |
| 2857 | } |
| 2858 | if (desc_ix > 0){ |
| 2859 | descs[desc_ix-1].next = NULL; |
| 2860 | } else { |
| 2861 | descs[0].next = NULL; |
| 2862 | } |
| 2863 | if (oper.do_digest) { |
| 2864 | DEBUG(printk("cryptocop_ioctl_process: mapping %d byte digest output to iovec %d\n", digest_length, iovix)); |
| 2865 | /* Add outdata iovec, length == <length of type of digest> */ |
| 2866 | cop->tfrm_op.outdata[iovix].iov_base = digest_result; |
| 2867 | cop->tfrm_op.outdata[iovix].iov_len = digest_length; |
| 2868 | ++iovix; |
| 2869 | } |
| 2870 | if (oper.do_csum) { |
| 2871 | /* Add outdata iovec, length == 2, the length of csum. */ |
| 2872 | DEBUG(printk("cryptocop_ioctl_process: mapping 2 byte csum output to iovec %d\n", iovix)); |
| 2873 | /* Add outdata iovec, length == <length of type of digest> */ |
| 2874 | cop->tfrm_op.outdata[iovix].iov_base = csum_result; |
| 2875 | cop->tfrm_op.outdata[iovix].iov_len = 2; |
| 2876 | ++iovix; |
| 2877 | } |
| 2878 | if (oper.do_cipher) { |
| 2879 | if (!map_pages_to_iovec(cop->tfrm_op.outdata, iovlen, &iovix, outpages, nooutpages, &pageix, &pageoffset, oper.cipher_outlen)){ |
| 2880 | DEBUG_API(printk("cryptocop_ioctl_process: failed to map pages to iovec.\n")); |
| 2881 | err = -ENOSYS; /* This should be impossible barring bugs. */ |
| 2882 | goto error_cleanup; |
| 2883 | } |
| 2884 | } |
| 2885 | DEBUG(printk("cryptocop_ioctl_process: setting cop->tfrm_op.outcount %d\n", iovix)); |
| 2886 | cop->tfrm_op.outcount = iovix; |
| 2887 | assert(iovix <= (nooutpages + 6)); |
| 2888 | |
| 2889 | cop->sid = oper.ses_id; |
| 2890 | cop->tfrm_op.desc = &descs[0]; |
| 2891 | |
| 2892 | DEBUG(printk("cryptocop_ioctl_process: inserting job, cb_data=0x%p\n", cop->cb_data)); |
| 2893 | |
| 2894 | if ((err = cryptocop_job_queue_insert_user_job(cop)) != 0) { |
| 2895 | DEBUG_API(printk("cryptocop_ioctl_process: insert job %d\n", err)); |
| 2896 | err = -EINVAL; |
| 2897 | goto error_cleanup; |
| 2898 | } |
| 2899 | |
| 2900 | DEBUG(printk("cryptocop_ioctl_process: begin wait for result\n")); |
| 2901 | |
| 2902 | wait_event(cryptocop_ioc_process_wq, (jc->processed != 0)); |
| 2903 | DEBUG(printk("cryptocop_ioctl_process: end wait for result\n")); |
| 2904 | if (!jc->processed){ |
| 2905 | printk(KERN_WARNING "cryptocop_ioctl_process: job not processed at completion\n"); |
| 2906 | err = -EIO; |
| 2907 | goto error_cleanup; |
| 2908 | } |
| 2909 | |
| 2910 | /* Job process done. Cipher output should already be correct in job so no post processing of outdata. */ |
| 2911 | DEBUG(printk("cryptocop_ioctl_process: operation_status = %d\n", cop->operation_status)); |
| 2912 | if (cop->operation_status == 0){ |
| 2913 | if (oper.do_digest){ |
| 2914 | DEBUG(printk("cryptocop_ioctl_process: copy %d bytes digest to user\n", digest_length)); |
| 2915 | err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, digest), digest_result, digest_length); |
| 2916 | if (0 != err){ |
| 2917 | DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, digest length %d, err %d\n", digest_length, err)); |
| 2918 | err = -EFAULT; |
| 2919 | goto error_cleanup; |
| 2920 | } |
| 2921 | } |
| 2922 | if (oper.do_csum){ |
| 2923 | DEBUG(printk("cryptocop_ioctl_process: copy 2 bytes checksum to user\n")); |
| 2924 | err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, csum), csum_result, 2); |
| 2925 | if (0 != err){ |
| 2926 | DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, csum, err %d\n", err)); |
| 2927 | err = -EFAULT; |
| 2928 | goto error_cleanup; |
| 2929 | } |
| 2930 | } |
| 2931 | err = 0; |
| 2932 | } else { |
| 2933 | DEBUG(printk("cryptocop_ioctl_process: returning err = operation_status = %d\n", cop->operation_status)); |
| 2934 | err = cop->operation_status; |
| 2935 | } |
| 2936 | |
| 2937 | error_cleanup: |
| 2938 | /* Release page caches. */ |
| 2939 | for (i = 0; i < noinpages; i++){ |
| 2940 | put_page(inpages[i]); |
| 2941 | } |
| 2942 | for (i = 0; i < nooutpages; i++){ |
| 2943 | int spdl_err; |
| 2944 | /* Mark output pages dirty. */ |
| 2945 | spdl_err = set_page_dirty_lock(outpages[i]); |
Andrew Morton | 4741c9f | 2006-03-24 03:18:11 -0800 | [diff] [blame] | 2946 | DEBUG(if (spdl_err < 0)printk("cryptocop_ioctl_process: set_page_dirty_lock returned %d\n", spdl_err)); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 2947 | } |
| 2948 | for (i = 0; i < nooutpages; i++){ |
| 2949 | put_page(outpages[i]); |
| 2950 | } |
| 2951 | |
Jesper Juhl | b2325fe | 2005-11-07 01:01:35 -0800 | [diff] [blame] | 2952 | kfree(digest_result); |
| 2953 | kfree(inpages); |
| 2954 | kfree(outpages); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 2955 | if (cop){ |
Jesper Juhl | b2325fe | 2005-11-07 01:01:35 -0800 | [diff] [blame] | 2956 | kfree(cop->tfrm_op.indata); |
| 2957 | kfree(cop->tfrm_op.outdata); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 2958 | kfree(cop); |
| 2959 | } |
Jesper Juhl | b2325fe | 2005-11-07 01:01:35 -0800 | [diff] [blame] | 2960 | kfree(jc); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 2961 | |
| 2962 | DEBUG(print_lock_status()); |
| 2963 | |
| 2964 | return err; |
| 2965 | } |
| 2966 | |
| 2967 | |
| 2968 | static int cryptocop_ioctl_create_session(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) |
| 2969 | { |
| 2970 | cryptocop_session_id sid; |
| 2971 | int err; |
| 2972 | struct cryptocop_private *dev; |
| 2973 | struct strcop_session_op *sess_op = (struct strcop_session_op *)arg; |
| 2974 | struct strcop_session_op sop; |
| 2975 | struct cryptocop_transform_init *tis = NULL; |
| 2976 | struct cryptocop_transform_init ti_cipher = {0}; |
| 2977 | struct cryptocop_transform_init ti_digest = {0}; |
| 2978 | struct cryptocop_transform_init ti_csum = {0}; |
| 2979 | |
| 2980 | if (!access_ok(VERIFY_WRITE, sess_op, sizeof(struct strcop_session_op))) |
| 2981 | return -EFAULT; |
| 2982 | err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op)); |
| 2983 | if (err) return -EFAULT; |
| 2984 | if (sop.cipher != cryptocop_cipher_none) { |
| 2985 | if (!access_ok(VERIFY_READ, sop.key, sop.keylen)) return -EFAULT; |
| 2986 | } |
| 2987 | DEBUG(printk("cryptocop_ioctl_create_session, sess_op:\n")); |
| 2988 | |
| 2989 | DEBUG(printk("\tcipher:%d\n" |
| 2990 | "\tcipher_mode:%d\n" |
| 2991 | "\tdigest:%d\n" |
| 2992 | "\tcsum:%d\n", |
| 2993 | (int)sop.cipher, |
| 2994 | (int)sop.cmode, |
| 2995 | (int)sop.digest, |
| 2996 | (int)sop.csum)); |
| 2997 | |
| 2998 | if (sop.cipher != cryptocop_cipher_none){ |
| 2999 | /* Init the cipher. */ |
| 3000 | switch (sop.cipher){ |
| 3001 | case cryptocop_cipher_des: |
| 3002 | ti_cipher.alg = cryptocop_alg_des; |
| 3003 | break; |
| 3004 | case cryptocop_cipher_3des: |
| 3005 | ti_cipher.alg = cryptocop_alg_3des; |
| 3006 | break; |
| 3007 | case cryptocop_cipher_aes: |
| 3008 | ti_cipher.alg = cryptocop_alg_aes; |
| 3009 | break; |
| 3010 | default: |
| 3011 | DEBUG_API(printk("create session, bad cipher algorithm %d\n", sop.cipher)); |
| 3012 | return -EINVAL; |
| 3013 | }; |
| 3014 | DEBUG(printk("setting cipher transform %d\n", ti_cipher.alg)); |
| 3015 | copy_from_user(ti_cipher.key, sop.key, sop.keylen/8); |
| 3016 | ti_cipher.keylen = sop.keylen; |
| 3017 | switch (sop.cmode){ |
| 3018 | case cryptocop_cipher_mode_cbc: |
| 3019 | case cryptocop_cipher_mode_ecb: |
| 3020 | ti_cipher.cipher_mode = sop.cmode; |
| 3021 | break; |
| 3022 | default: |
| 3023 | DEBUG_API(printk("create session, bad cipher mode %d\n", sop.cmode)); |
| 3024 | return -EINVAL; |
| 3025 | } |
| 3026 | DEBUG(printk("cryptocop_ioctl_create_session: setting CBC mode %d\n", ti_cipher.cipher_mode)); |
| 3027 | switch (sop.des3_mode){ |
| 3028 | case cryptocop_3des_eee: |
| 3029 | case cryptocop_3des_eed: |
| 3030 | case cryptocop_3des_ede: |
| 3031 | case cryptocop_3des_edd: |
| 3032 | case cryptocop_3des_dee: |
| 3033 | case cryptocop_3des_ded: |
| 3034 | case cryptocop_3des_dde: |
| 3035 | case cryptocop_3des_ddd: |
| 3036 | ti_cipher.tdes_mode = sop.des3_mode; |
| 3037 | break; |
| 3038 | default: |
| 3039 | DEBUG_API(printk("create session, bad 3DES mode %d\n", sop.des3_mode)); |
| 3040 | return -EINVAL; |
| 3041 | } |
| 3042 | ti_cipher.tid = CRYPTOCOP_IOCTL_CIPHER_TID; |
| 3043 | ti_cipher.next = tis; |
| 3044 | tis = &ti_cipher; |
| 3045 | } /* if (sop.cipher != cryptocop_cipher_none) */ |
| 3046 | if (sop.digest != cryptocop_digest_none){ |
| 3047 | DEBUG(printk("setting digest transform\n")); |
| 3048 | switch (sop.digest){ |
| 3049 | case cryptocop_digest_md5: |
| 3050 | ti_digest.alg = cryptocop_alg_md5; |
| 3051 | break; |
| 3052 | case cryptocop_digest_sha1: |
| 3053 | ti_digest.alg = cryptocop_alg_sha1; |
| 3054 | break; |
| 3055 | default: |
| 3056 | DEBUG_API(printk("create session, bad digest algorithm %d\n", sop.digest)); |
| 3057 | return -EINVAL; |
| 3058 | } |
| 3059 | ti_digest.tid = CRYPTOCOP_IOCTL_DIGEST_TID; |
| 3060 | ti_digest.next = tis; |
| 3061 | tis = &ti_digest; |
| 3062 | } /* if (sop.digest != cryptocop_digest_none) */ |
| 3063 | if (sop.csum != cryptocop_csum_none){ |
| 3064 | DEBUG(printk("setting csum transform\n")); |
| 3065 | switch (sop.csum){ |
| 3066 | case cryptocop_csum_le: |
| 3067 | case cryptocop_csum_be: |
| 3068 | ti_csum.csum_mode = sop.csum; |
| 3069 | break; |
| 3070 | default: |
| 3071 | DEBUG_API(printk("create session, bad checksum algorithm %d\n", sop.csum)); |
| 3072 | return -EINVAL; |
| 3073 | } |
| 3074 | ti_csum.alg = cryptocop_alg_csum; |
| 3075 | ti_csum.tid = CRYPTOCOP_IOCTL_CSUM_TID; |
| 3076 | ti_csum.next = tis; |
| 3077 | tis = &ti_csum; |
| 3078 | } /* (sop.csum != cryptocop_csum_none) */ |
| 3079 | dev = kmalloc(sizeof(struct cryptocop_private), GFP_KERNEL); |
| 3080 | if (!dev){ |
| 3081 | DEBUG_API(printk("create session, alloc dev\n")); |
| 3082 | return -ENOMEM; |
| 3083 | } |
| 3084 | |
| 3085 | err = cryptocop_new_session(&sid, tis, GFP_KERNEL); |
| 3086 | DEBUG({ if (err) printk("create session, cryptocop_new_session %d\n", err);}); |
| 3087 | |
| 3088 | if (err) { |
| 3089 | kfree(dev); |
| 3090 | return err; |
| 3091 | } |
| 3092 | sess_op->ses_id = sid; |
| 3093 | dev->sid = sid; |
| 3094 | dev->next = filp->private_data; |
| 3095 | filp->private_data = dev; |
| 3096 | |
| 3097 | return 0; |
| 3098 | } |
| 3099 | |
| 3100 | static int cryptocop_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) |
| 3101 | { |
| 3102 | int err = 0; |
| 3103 | if (_IOC_TYPE(cmd) != ETRAXCRYPTOCOP_IOCTYPE) { |
| 3104 | DEBUG_API(printk("cryptocop_ioctl: wrong type\n")); |
| 3105 | return -ENOTTY; |
| 3106 | } |
| 3107 | if (_IOC_NR(cmd) > CRYPTOCOP_IO_MAXNR){ |
| 3108 | return -ENOTTY; |
| 3109 | } |
| 3110 | /* Access check of the argument. Some commands, e.g. create session and process op, |
| 3111 | needs additional checks. Those are handled in the command handling functions. */ |
| 3112 | if (_IOC_DIR(cmd) & _IOC_READ) |
| 3113 | err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); |
| 3114 | else if (_IOC_DIR(cmd) & _IOC_WRITE) |
| 3115 | err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd)); |
| 3116 | if (err) return -EFAULT; |
| 3117 | |
| 3118 | switch (cmd) { |
| 3119 | case CRYPTOCOP_IO_CREATE_SESSION: |
| 3120 | return cryptocop_ioctl_create_session(inode, filp, cmd, arg); |
| 3121 | case CRYPTOCOP_IO_CLOSE_SESSION: |
| 3122 | return cryptocop_ioctl_close_session(inode, filp, cmd, arg); |
| 3123 | case CRYPTOCOP_IO_PROCESS_OP: |
| 3124 | return cryptocop_ioctl_process(inode, filp, cmd, arg); |
| 3125 | default: |
| 3126 | DEBUG_API(printk("cryptocop_ioctl: unknown command\n")); |
| 3127 | return -ENOTTY; |
| 3128 | } |
| 3129 | return 0; |
| 3130 | } |
| 3131 | |
| 3132 | |
| 3133 | #ifdef LDEBUG |
| 3134 | static void print_dma_descriptors(struct cryptocop_int_operation *iop) |
| 3135 | { |
| 3136 | struct cryptocop_dma_desc *cdesc_out = iop->cdesc_out; |
| 3137 | struct cryptocop_dma_desc *cdesc_in = iop->cdesc_in; |
| 3138 | int i; |
| 3139 | |
| 3140 | printk("print_dma_descriptors start\n"); |
| 3141 | |
| 3142 | printk("iop:\n"); |
| 3143 | printk("\tsid: 0x%lld\n", iop->sid); |
| 3144 | |
| 3145 | printk("\tcdesc_out: 0x%p\n", iop->cdesc_out); |
| 3146 | printk("\tcdesc_in: 0x%p\n", iop->cdesc_in); |
| 3147 | printk("\tddesc_out: 0x%p\n", iop->ddesc_out); |
| 3148 | printk("\tddesc_in: 0x%p\n", iop->ddesc_in); |
| 3149 | |
| 3150 | printk("\niop->ctx_out: 0x%p phys: 0x%p\n", &iop->ctx_out, (char*)virt_to_phys(&iop->ctx_out)); |
| 3151 | printk("\tnext: 0x%p\n" |
| 3152 | "\tsaved_data: 0x%p\n" |
| 3153 | "\tsaved_data_buf: 0x%p\n", |
| 3154 | iop->ctx_out.next, |
| 3155 | iop->ctx_out.saved_data, |
| 3156 | iop->ctx_out.saved_data_buf); |
| 3157 | |
| 3158 | printk("\niop->ctx_in: 0x%p phys: 0x%p\n", &iop->ctx_in, (char*)virt_to_phys(&iop->ctx_in)); |
| 3159 | printk("\tnext: 0x%p\n" |
| 3160 | "\tsaved_data: 0x%p\n" |
| 3161 | "\tsaved_data_buf: 0x%p\n", |
| 3162 | iop->ctx_in.next, |
| 3163 | iop->ctx_in.saved_data, |
| 3164 | iop->ctx_in.saved_data_buf); |
| 3165 | |
| 3166 | i = 0; |
| 3167 | while (cdesc_out) { |
| 3168 | dma_descr_data *td; |
| 3169 | printk("cdesc_out %d, desc=0x%p\n", i, cdesc_out->dma_descr); |
| 3170 | printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_out->dma_descr)); |
| 3171 | td = cdesc_out->dma_descr; |
| 3172 | printk("\n\tbuf: 0x%p\n" |
| 3173 | "\tafter: 0x%p\n" |
| 3174 | "\tmd: 0x%04x\n" |
| 3175 | "\tnext: 0x%p\n", |
| 3176 | td->buf, |
| 3177 | td->after, |
| 3178 | td->md, |
| 3179 | td->next); |
| 3180 | printk("flags:\n" |
| 3181 | "\twait:\t%d\n" |
| 3182 | "\teol:\t%d\n" |
| 3183 | "\touteop:\t%d\n" |
| 3184 | "\tineop:\t%d\n" |
| 3185 | "\tintr:\t%d\n", |
| 3186 | td->wait, |
| 3187 | td->eol, |
| 3188 | td->out_eop, |
| 3189 | td->in_eop, |
| 3190 | td->intr); |
| 3191 | cdesc_out = cdesc_out->next; |
| 3192 | i++; |
| 3193 | } |
| 3194 | i = 0; |
| 3195 | while (cdesc_in) { |
| 3196 | dma_descr_data *td; |
| 3197 | printk("cdesc_in %d, desc=0x%p\n", i, cdesc_in->dma_descr); |
| 3198 | printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_in->dma_descr)); |
| 3199 | td = cdesc_in->dma_descr; |
| 3200 | printk("\n\tbuf: 0x%p\n" |
| 3201 | "\tafter: 0x%p\n" |
| 3202 | "\tmd: 0x%04x\n" |
| 3203 | "\tnext: 0x%p\n", |
| 3204 | td->buf, |
| 3205 | td->after, |
| 3206 | td->md, |
| 3207 | td->next); |
| 3208 | printk("flags:\n" |
| 3209 | "\twait:\t%d\n" |
| 3210 | "\teol:\t%d\n" |
| 3211 | "\touteop:\t%d\n" |
| 3212 | "\tineop:\t%d\n" |
| 3213 | "\tintr:\t%d\n", |
| 3214 | td->wait, |
| 3215 | td->eol, |
| 3216 | td->out_eop, |
| 3217 | td->in_eop, |
| 3218 | td->intr); |
| 3219 | cdesc_in = cdesc_in->next; |
| 3220 | i++; |
| 3221 | } |
| 3222 | |
| 3223 | printk("print_dma_descriptors end\n"); |
| 3224 | } |
| 3225 | |
| 3226 | |
| 3227 | static void print_strcop_crypto_op(struct strcop_crypto_op *cop) |
| 3228 | { |
| 3229 | printk("print_strcop_crypto_op, 0x%p\n", cop); |
| 3230 | |
| 3231 | /* Indata. */ |
| 3232 | printk("indata=0x%p\n" |
| 3233 | "inlen=%d\n" |
| 3234 | "do_cipher=%d\n" |
| 3235 | "decrypt=%d\n" |
| 3236 | "cipher_explicit=%d\n" |
| 3237 | "cipher_start=%d\n" |
| 3238 | "cipher_len=%d\n" |
| 3239 | "outdata=0x%p\n" |
| 3240 | "outlen=%d\n", |
| 3241 | cop->indata, |
| 3242 | cop->inlen, |
| 3243 | cop->do_cipher, |
| 3244 | cop->decrypt, |
| 3245 | cop->cipher_explicit, |
| 3246 | cop->cipher_start, |
| 3247 | cop->cipher_len, |
| 3248 | cop->cipher_outdata, |
| 3249 | cop->cipher_outlen); |
| 3250 | |
| 3251 | printk("do_digest=%d\n" |
| 3252 | "digest_start=%d\n" |
| 3253 | "digest_len=%d\n", |
| 3254 | cop->do_digest, |
| 3255 | cop->digest_start, |
| 3256 | cop->digest_len); |
| 3257 | |
| 3258 | printk("do_csum=%d\n" |
| 3259 | "csum_start=%d\n" |
| 3260 | "csum_len=%d\n", |
| 3261 | cop->do_csum, |
| 3262 | cop->csum_start, |
| 3263 | cop->csum_len); |
| 3264 | } |
| 3265 | |
| 3266 | static void print_cryptocop_operation(struct cryptocop_operation *cop) |
| 3267 | { |
| 3268 | struct cryptocop_desc *d; |
| 3269 | struct cryptocop_tfrm_cfg *tc; |
| 3270 | struct cryptocop_desc_cfg *dc; |
| 3271 | int i; |
| 3272 | |
| 3273 | printk("print_cryptocop_operation, cop=0x%p\n\n", cop); |
| 3274 | printk("sid: %lld\n", cop->sid); |
| 3275 | printk("operation_status=%d\n" |
| 3276 | "use_dmalists=%d\n" |
| 3277 | "in_interrupt=%d\n" |
| 3278 | "fast_callback=%d\n", |
| 3279 | cop->operation_status, |
| 3280 | cop->use_dmalists, |
| 3281 | cop->in_interrupt, |
| 3282 | cop->fast_callback); |
| 3283 | |
| 3284 | if (cop->use_dmalists){ |
| 3285 | print_user_dma_lists(&cop->list_op); |
| 3286 | } else { |
| 3287 | printk("cop->tfrm_op\n" |
| 3288 | "tfrm_cfg=0x%p\n" |
| 3289 | "desc=0x%p\n" |
| 3290 | "indata=0x%p\n" |
| 3291 | "incount=%d\n" |
| 3292 | "inlen=%d\n" |
| 3293 | "outdata=0x%p\n" |
| 3294 | "outcount=%d\n" |
| 3295 | "outlen=%d\n\n", |
| 3296 | cop->tfrm_op.tfrm_cfg, |
| 3297 | cop->tfrm_op.desc, |
| 3298 | cop->tfrm_op.indata, |
| 3299 | cop->tfrm_op.incount, |
| 3300 | cop->tfrm_op.inlen, |
| 3301 | cop->tfrm_op.outdata, |
| 3302 | cop->tfrm_op.outcount, |
| 3303 | cop->tfrm_op.outlen); |
| 3304 | |
| 3305 | tc = cop->tfrm_op.tfrm_cfg; |
| 3306 | while (tc){ |
| 3307 | printk("tfrm_cfg, 0x%p\n" |
| 3308 | "tid=%d\n" |
| 3309 | "flags=%d\n" |
| 3310 | "inject_ix=%d\n" |
| 3311 | "next=0x%p\n", |
| 3312 | tc, |
| 3313 | tc->tid, |
| 3314 | tc->flags, |
| 3315 | tc->inject_ix, |
| 3316 | tc->next); |
| 3317 | tc = tc->next; |
| 3318 | } |
| 3319 | d = cop->tfrm_op.desc; |
| 3320 | while (d){ |
| 3321 | printk("\n======================desc, 0x%p\n" |
| 3322 | "length=%d\n" |
| 3323 | "cfg=0x%p\n" |
| 3324 | "next=0x%p\n", |
| 3325 | d, |
| 3326 | d->length, |
| 3327 | d->cfg, |
| 3328 | d->next); |
| 3329 | dc = d->cfg; |
| 3330 | while (dc){ |
| 3331 | printk("=========desc_cfg, 0x%p\n" |
| 3332 | "tid=%d\n" |
| 3333 | "src=%d\n" |
| 3334 | "last=%d\n" |
| 3335 | "next=0x%p\n", |
| 3336 | dc, |
| 3337 | dc->tid, |
| 3338 | dc->src, |
| 3339 | dc->last, |
| 3340 | dc->next); |
| 3341 | dc = dc->next; |
| 3342 | } |
| 3343 | d = d->next; |
| 3344 | } |
| 3345 | printk("\n====iniov\n"); |
| 3346 | for (i = 0; i < cop->tfrm_op.incount; i++){ |
| 3347 | printk("indata[%d]\n" |
| 3348 | "base=0x%p\n" |
| 3349 | "len=%d\n", |
| 3350 | i, |
| 3351 | cop->tfrm_op.indata[i].iov_base, |
| 3352 | cop->tfrm_op.indata[i].iov_len); |
| 3353 | } |
| 3354 | printk("\n====outiov\n"); |
| 3355 | for (i = 0; i < cop->tfrm_op.outcount; i++){ |
| 3356 | printk("outdata[%d]\n" |
| 3357 | "base=0x%p\n" |
| 3358 | "len=%d\n", |
| 3359 | i, |
| 3360 | cop->tfrm_op.outdata[i].iov_base, |
| 3361 | cop->tfrm_op.outdata[i].iov_len); |
| 3362 | } |
| 3363 | } |
| 3364 | printk("------------end print_cryptocop_operation\n"); |
| 3365 | } |
| 3366 | |
| 3367 | |
| 3368 | static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op) |
| 3369 | { |
| 3370 | dma_descr_data *dd; |
| 3371 | int i; |
| 3372 | |
| 3373 | printk("print_user_dma_lists, dma_op=0x%p\n", dma_op); |
| 3374 | |
| 3375 | printk("out_data_buf = 0x%p, phys_to_virt(out_data_buf) = 0x%p\n", dma_op->out_data_buf, phys_to_virt((unsigned long int)dma_op->out_data_buf)); |
| 3376 | printk("in_data_buf = 0x%p, phys_to_virt(in_data_buf) = 0x%p\n", dma_op->in_data_buf, phys_to_virt((unsigned long int)dma_op->in_data_buf)); |
| 3377 | |
| 3378 | printk("##############outlist\n"); |
| 3379 | dd = phys_to_virt((unsigned long int)dma_op->outlist); |
| 3380 | i = 0; |
| 3381 | while (dd != NULL) { |
| 3382 | printk("#%d phys_to_virt(desc) 0x%p\n", i, dd); |
| 3383 | printk("\n\tbuf: 0x%p\n" |
| 3384 | "\tafter: 0x%p\n" |
| 3385 | "\tmd: 0x%04x\n" |
| 3386 | "\tnext: 0x%p\n", |
| 3387 | dd->buf, |
| 3388 | dd->after, |
| 3389 | dd->md, |
| 3390 | dd->next); |
| 3391 | printk("flags:\n" |
| 3392 | "\twait:\t%d\n" |
| 3393 | "\teol:\t%d\n" |
| 3394 | "\touteop:\t%d\n" |
| 3395 | "\tineop:\t%d\n" |
| 3396 | "\tintr:\t%d\n", |
| 3397 | dd->wait, |
| 3398 | dd->eol, |
| 3399 | dd->out_eop, |
| 3400 | dd->in_eop, |
| 3401 | dd->intr); |
| 3402 | if (dd->eol) |
| 3403 | dd = NULL; |
| 3404 | else |
| 3405 | dd = phys_to_virt((unsigned long int)dd->next); |
| 3406 | ++i; |
| 3407 | } |
| 3408 | |
| 3409 | printk("##############inlist\n"); |
| 3410 | dd = phys_to_virt((unsigned long int)dma_op->inlist); |
| 3411 | i = 0; |
| 3412 | while (dd != NULL) { |
| 3413 | printk("#%d phys_to_virt(desc) 0x%p\n", i, dd); |
| 3414 | printk("\n\tbuf: 0x%p\n" |
| 3415 | "\tafter: 0x%p\n" |
| 3416 | "\tmd: 0x%04x\n" |
| 3417 | "\tnext: 0x%p\n", |
| 3418 | dd->buf, |
| 3419 | dd->after, |
| 3420 | dd->md, |
| 3421 | dd->next); |
| 3422 | printk("flags:\n" |
| 3423 | "\twait:\t%d\n" |
| 3424 | "\teol:\t%d\n" |
| 3425 | "\touteop:\t%d\n" |
| 3426 | "\tineop:\t%d\n" |
| 3427 | "\tintr:\t%d\n", |
| 3428 | dd->wait, |
| 3429 | dd->eol, |
| 3430 | dd->out_eop, |
| 3431 | dd->in_eop, |
| 3432 | dd->intr); |
| 3433 | if (dd->eol) |
| 3434 | dd = NULL; |
| 3435 | else |
| 3436 | dd = phys_to_virt((unsigned long int)dd->next); |
| 3437 | ++i; |
| 3438 | } |
| 3439 | } |
| 3440 | |
| 3441 | |
| 3442 | static void print_lock_status(void) |
| 3443 | { |
| 3444 | printk("**********************print_lock_status\n"); |
| 3445 | printk("cryptocop_completed_jobs_lock %d\n", spin_is_locked(&cryptocop_completed_jobs_lock)); |
| 3446 | printk("cryptocop_job_queue_lock %d\n", spin_is_locked(&cryptocop_job_queue_lock)); |
| 3447 | printk("descr_pool_lock %d\n", spin_is_locked(&descr_pool_lock)); |
| 3448 | printk("cryptocop_sessions_lock %d\n", spin_is_locked(cryptocop_sessions_lock)); |
| 3449 | printk("running_job_lock %d\n", spin_is_locked(running_job_lock)); |
| 3450 | printk("cryptocop_process_lock %d\n", spin_is_locked(cryptocop_process_lock)); |
| 3451 | } |
| 3452 | #endif /* LDEBUG */ |
| 3453 | |
| 3454 | |
| 3455 | static const char cryptocop_name[] = "ETRAX FS stream co-processor"; |
| 3456 | |
| 3457 | static int init_stream_coprocessor(void) |
| 3458 | { |
| 3459 | int err; |
| 3460 | int i; |
| 3461 | static int initialized = 0; |
| 3462 | |
| 3463 | if (initialized) |
| 3464 | return 0; |
| 3465 | |
| 3466 | initialized = 1; |
| 3467 | |
| 3468 | printk("ETRAX FS stream co-processor driver v0.01, (c) 2003 Axis Communications AB\n"); |
| 3469 | |
| 3470 | err = register_chrdev(CRYPTOCOP_MAJOR, cryptocop_name, &cryptocop_fops); |
| 3471 | if (err < 0) { |
| 3472 | printk(KERN_ERR "stream co-processor: could not get major number.\n"); |
| 3473 | return err; |
| 3474 | } |
| 3475 | |
| 3476 | err = init_cryptocop(); |
| 3477 | if (err) { |
| 3478 | (void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name); |
| 3479 | return err; |
| 3480 | } |
| 3481 | err = cryptocop_job_queue_init(); |
| 3482 | if (err) { |
| 3483 | release_cryptocop(); |
| 3484 | (void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name); |
| 3485 | return err; |
| 3486 | } |
| 3487 | /* Init the descriptor pool. */ |
| 3488 | for (i = 0; i < CRYPTOCOP_DESCRIPTOR_POOL_SIZE - 1; i++) { |
| 3489 | descr_pool[i].from_pool = 1; |
| 3490 | descr_pool[i].next = &descr_pool[i + 1]; |
| 3491 | } |
| 3492 | descr_pool[i].from_pool = 1; |
| 3493 | descr_pool[i].next = NULL; |
| 3494 | descr_pool_free_list = &descr_pool[0]; |
| 3495 | descr_pool_no_free = CRYPTOCOP_DESCRIPTOR_POOL_SIZE; |
| 3496 | |
| 3497 | spin_lock_init(&cryptocop_completed_jobs_lock); |
| 3498 | spin_lock_init(&cryptocop_job_queue_lock); |
| 3499 | spin_lock_init(&descr_pool_lock); |
| 3500 | spin_lock_init(&cryptocop_sessions_lock); |
| 3501 | spin_lock_init(&running_job_lock); |
| 3502 | spin_lock_init(&cryptocop_process_lock); |
| 3503 | |
| 3504 | cryptocop_sessions = NULL; |
| 3505 | next_sid = 1; |
| 3506 | |
| 3507 | cryptocop_running_job = NULL; |
| 3508 | |
| 3509 | printk("stream co-processor: init done.\n"); |
| 3510 | return 0; |
| 3511 | } |
| 3512 | |
| 3513 | static void __exit exit_stream_coprocessor(void) |
| 3514 | { |
| 3515 | release_cryptocop(); |
| 3516 | cryptocop_job_queue_close(); |
| 3517 | } |
| 3518 | |
| 3519 | module_init(init_stream_coprocessor); |
| 3520 | module_exit(exit_stream_coprocessor); |
| 3521 | |