blob: 9ec29bb41b28b522bd7eab5d2e08b72b98221f18 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/s390/crypto/z90main.c
3 *
4 * z90crypt 1.3.2
5 *
6 * Copyright (C) 2001, 2004 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <asm/uaccess.h> // copy_(from|to)_user
28#include <linux/compat.h>
29#include <linux/compiler.h>
30#include <linux/delay.h> // mdelay
31#include <linux/init.h>
32#include <linux/interrupt.h> // for tasklets
33#include <linux/ioctl32.h>
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/kobject_uevent.h>
37#include <linux/proc_fs.h>
38#include <linux/syscalls.h>
39#include <linux/version.h>
40#include "z90crypt.h"
41#include "z90common.h"
42#ifndef Z90CRYPT_USE_HOTPLUG
43#include <linux/miscdevice.h>
44#endif
45
46#define VERSION_CODE(vers, rel, seq) (((vers)<<16) | ((rel)<<8) | (seq))
47#if LINUX_VERSION_CODE < VERSION_CODE(2,4,0) /* version < 2.4 */
48# error "This kernel is too old: not supported"
49#endif
50#if LINUX_VERSION_CODE > VERSION_CODE(2,7,0) /* version > 2.6 */
51# error "This kernel is too recent: not supported by this file"
52#endif
53
54#define VERSION_Z90MAIN_C "$Revision: 1.57 $"
55
56static char z90main_version[] __initdata =
57 "z90main.o (" VERSION_Z90MAIN_C "/"
58 VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
59
60extern char z90hardware_version[];
61
62/**
63 * Defaults that may be modified.
64 */
65
66#ifndef Z90CRYPT_USE_HOTPLUG
67/**
68 * You can specify a different minor at compile time.
69 */
70#ifndef Z90CRYPT_MINOR
71#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
72#endif
73#else
74/**
75 * You can specify a different major at compile time.
76 */
77#ifndef Z90CRYPT_MAJOR
78#define Z90CRYPT_MAJOR 0
79#endif
80#endif
81
82/**
83 * You can specify a different domain at compile time or on the insmod
84 * command line.
85 */
86#ifndef DOMAIN_INDEX
87#define DOMAIN_INDEX -1
88#endif
89
90/**
91 * This is the name under which the device is registered in /proc/modules.
92 */
93#define REG_NAME "z90crypt"
94
95/**
96 * Cleanup should run every CLEANUPTIME seconds and should clean up requests
97 * older than CLEANUPTIME seconds in the past.
98 */
99#ifndef CLEANUPTIME
100#define CLEANUPTIME 20
101#endif
102
103/**
104 * Config should run every CONFIGTIME seconds
105 */
106#ifndef CONFIGTIME
107#define CONFIGTIME 30
108#endif
109
110/**
111 * The first execution of the config task should take place
112 * immediately after initialization
113 */
114#ifndef INITIAL_CONFIGTIME
115#define INITIAL_CONFIGTIME 1
116#endif
117
118/**
119 * Reader should run every READERTIME milliseconds
120 * With the 100Hz patch for s390, z90crypt can lock the system solid while
121 * under heavy load. We'll try to avoid that.
122 */
123#ifndef READERTIME
124#if HZ > 1000
125#define READERTIME 2
126#else
127#define READERTIME 10
128#endif
129#endif
130
131/**
132 * turn long device array index into device pointer
133 */
134#define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
135
136/**
137 * turn short device array index into long device array index
138 */
139#define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
140
141/**
142 * turn short device array index into device pointer
143 */
144#define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
145
146/**
147 * Status for a work-element
148 */
149#define STAT_DEFAULT 0x00 // request has not been processed
150
151#define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
152 // else, device is determined each write
153#define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
154 // before being sent to the hardware.
155#define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
156// 0x20 // UNUSED state
157#define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
158#define STAT_NOWORK 0x00 // bits off: no work on any queue
159#define STAT_RDWRMASK 0x30 // mask for bits 5-4
160
161/**
162 * Macros to check the status RDWRMASK
163 */
164#define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
165#define SET_RDWRMASK(statbyte, newval) \
166 {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
167
168/**
169 * Audit Trail. Progress of a Work element
170 * audit[0]: Unless noted otherwise, these bits are all set by the process
171 */
172#define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
173#define FP_BUFFREQ 0x40 // Low Level buffer requested
174#define FP_BUFFGOT 0x20 // Low Level buffer obtained
175#define FP_SENT 0x10 // Work element sent to a crypto device
176 // (may be set by process or by reader task)
177#define FP_PENDING 0x08 // Work element placed on pending queue
178 // (may be set by process or by reader task)
179#define FP_REQUEST 0x04 // Work element placed on request queue
180#define FP_ASLEEP 0x02 // Work element about to sleep
181#define FP_AWAKE 0x01 // Work element has been awakened
182
183/**
184 * audit[1]: These bits are set by the reader task and/or the cleanup task
185 */
186#define FP_NOTPENDING 0x80 // Work element removed from pending queue
187#define FP_AWAKENING 0x40 // Caller about to be awakened
188#define FP_TIMEDOUT 0x20 // Caller timed out
189#define FP_RESPSIZESET 0x10 // Response size copied to work element
190#define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
191#define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
192#define FP_REMREQUEST 0x02 // Work element removed from request queue
193#define FP_SIGNALED 0x01 // Work element was awakened by a signal
194
195/**
196 * audit[2]: unused
197 */
198
199/**
200 * state of the file handle in private_data.status
201 */
202#define STAT_OPEN 0
203#define STAT_CLOSED 1
204
205/**
206 * PID() expands to the process ID of the current process
207 */
208#define PID() (current->pid)
209
210/**
211 * Selected Constants. The number of APs and the number of devices
212 */
213#ifndef Z90CRYPT_NUM_APS
214#define Z90CRYPT_NUM_APS 64
215#endif
216#ifndef Z90CRYPT_NUM_DEVS
217#define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
218#endif
219
220/**
221 * Buffer size for receiving responses. The maximum Response Size
222 * is actually the maximum request size, since in an error condition
223 * the request itself may be returned unchanged.
224 */
225#define MAX_RESPONSE_SIZE 0x0000077C
226
227/**
228 * A count and status-byte mask
229 */
230struct status {
231 int st_count; // # of enabled devices
232 int disabled_count; // # of disabled devices
233 int user_disabled_count; // # of devices disabled via proc fs
234 unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
235};
236
237/**
238 * The array of device indexes is a mechanism for fast indexing into
239 * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
240 * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
241 * z90CDeviceIndex[2] is 47.
242 */
243struct device_x {
244 int device_index[Z90CRYPT_NUM_DEVS];
245};
246
247/**
248 * All devices are arranged in a single array: 64 APs
249 */
250struct device {
251 int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
252 // PCIXCC_MCL3, CEX2C
253 enum devstat dev_stat; // current device status
254 int dev_self_x; // Index in array
255 int disabled; // Set when device is in error
256 int user_disabled; // Set when device is disabled by user
257 int dev_q_depth; // q depth
258 unsigned char * dev_resp_p; // Response buffer address
259 int dev_resp_l; // Response Buffer length
260 int dev_caller_count; // Number of callers
261 int dev_total_req_cnt; // # requests for device since load
262 struct list_head dev_caller_list; // List of callers
263};
264
265/**
266 * There's a struct status and a struct device_x for each device type.
267 */
268struct hdware_block {
269 struct status hdware_mask;
270 struct status type_mask[Z90CRYPT_NUM_TYPES];
271 struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
272 unsigned char device_type_array[Z90CRYPT_NUM_APS];
273};
274
275/**
276 * z90crypt is the topmost data structure in the hierarchy.
277 */
278struct z90crypt {
279 int max_count; // Nr of possible crypto devices
280 struct status mask;
281 int q_depth_array[Z90CRYPT_NUM_DEVS];
282 int dev_type_array[Z90CRYPT_NUM_DEVS];
283 struct device_x overall_device_x; // array device indexes
284 struct device * device_p[Z90CRYPT_NUM_DEVS];
285 int terminating;
286 int domain_established;// TRUE: domain has been found
287 int cdx; // Crypto Domain Index
288 int len; // Length of this data structure
289 struct hdware_block *hdware_info;
290};
291
292/**
293 * An array of these structures is pointed to from dev_caller
294 * The length of the array depends on the device type. For APs,
295 * there are 8.
296 *
297 * The caller buffer is allocated to the user at OPEN. At WRITE,
298 * it contains the request; at READ, the response. The function
299 * send_to_crypto_device converts the request to device-dependent
300 * form and use the caller's OPEN-allocated buffer for the response.
301 */
302struct caller {
303 int caller_buf_l; // length of original request
304 unsigned char * caller_buf_p; // Original request on WRITE
305 int caller_dev_dep_req_l; // len device dependent request
306 unsigned char * caller_dev_dep_req_p; // Device dependent form
307 unsigned char caller_id[8]; // caller-supplied message id
308 struct list_head caller_liste;
309 unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
310};
311
312/**
313 * Function prototypes from z90hardware.c
314 */
315enum hdstat query_online(int, int, int, int *, int *);
316enum devstat reset_device(int, int, int);
317enum devstat send_to_AP(int, int, int, unsigned char *);
318enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *);
319int convert_request(unsigned char *, int, short, int, int, int *,
320 unsigned char *);
321int convert_response(unsigned char *, unsigned char *, int *, unsigned char *);
322
323/**
324 * Low level function prototypes
325 */
326static int create_z90crypt(int *);
327static int refresh_z90crypt(int *);
328static int find_crypto_devices(struct status *);
329static int create_crypto_device(int);
330static int destroy_crypto_device(int);
331static void destroy_z90crypt(void);
332static int refresh_index_array(struct status *, struct device_x *);
333static int probe_device_type(struct device *);
334static int probe_PCIXCC_type(struct device *);
335
336/**
337 * proc fs definitions
338 */
339static struct proc_dir_entry *z90crypt_entry;
340
341/**
342 * data structures
343 */
344
345/**
346 * work_element.opener points back to this structure
347 */
348struct priv_data {
349 pid_t opener_pid;
350 unsigned char status; // 0: open 1: closed
351};
352
353/**
354 * A work element is allocated for each request
355 */
356struct work_element {
357 struct priv_data *priv_data;
358 pid_t pid;
359 int devindex; // index of device processing this w_e
360 // (If request did not specify device,
361 // -1 until placed onto a queue)
362 int devtype;
363 struct list_head liste; // used for requestq and pendingq
364 char buffer[128]; // local copy of user request
365 int buff_size; // size of the buffer for the request
366 char resp_buff[RESPBUFFSIZE];
367 int resp_buff_size;
368 char __user * resp_addr; // address of response in user space
369 unsigned int funccode; // function code of request
370 wait_queue_head_t waitq;
371 unsigned long requestsent; // time at which the request was sent
372 atomic_t alarmrung; // wake-up signal
373 unsigned char caller_id[8]; // pid + counter, for this w_e
374 unsigned char status[1]; // bits to mark status of the request
375 unsigned char audit[3]; // record of work element's progress
376 unsigned char * requestptr; // address of request buffer
377 int retcode; // return code of request
378};
379
380/**
381 * High level function prototypes
382 */
383static int z90crypt_open(struct inode *, struct file *);
384static int z90crypt_release(struct inode *, struct file *);
385static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
386static ssize_t z90crypt_write(struct file *, const char __user *,
387 size_t, loff_t *);
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700388static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
389static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
391static void z90crypt_reader_task(unsigned long);
392static void z90crypt_schedule_reader_task(unsigned long);
393static void z90crypt_config_task(unsigned long);
394static void z90crypt_cleanup_task(unsigned long);
395
396static int z90crypt_status(char *, char **, off_t, int, int *, void *);
397static int z90crypt_status_write(struct file *, const char __user *,
398 unsigned long, void *);
399
400/**
401 * Hotplug support
402 */
403
404#ifdef Z90CRYPT_USE_HOTPLUG
405#define Z90CRYPT_HOTPLUG_ADD 1
406#define Z90CRYPT_HOTPLUG_REMOVE 2
407
408static void z90crypt_hotplug_event(int, int, int);
409#endif
410
411/**
412 * Storage allocated at initialization and used throughout the life of
413 * this insmod
414 */
415#ifdef Z90CRYPT_USE_HOTPLUG
416static int z90crypt_major = Z90CRYPT_MAJOR;
417#endif
418
419static int domain = DOMAIN_INDEX;
420static struct z90crypt z90crypt;
421static int quiesce_z90crypt;
422static spinlock_t queuespinlock;
423static struct list_head request_list;
424static int requestq_count;
425static struct list_head pending_list;
426static int pendingq_count;
427
428static struct tasklet_struct reader_tasklet;
429static struct timer_list reader_timer;
430static struct timer_list config_timer;
431static struct timer_list cleanup_timer;
432static atomic_t total_open;
433static atomic_t z90crypt_step;
434
435static struct file_operations z90crypt_fops = {
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700436 .owner = THIS_MODULE,
437 .read = z90crypt_read,
438 .write = z90crypt_write,
439 .unlocked_ioctl = z90crypt_unlocked_ioctl,
440#ifdef CONFIG_COMPAT
441 .compat_ioctl = z90crypt_compat_ioctl,
442#endif
443 .open = z90crypt_open,
444 .release = z90crypt_release
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445};
446
447#ifndef Z90CRYPT_USE_HOTPLUG
448static struct miscdevice z90crypt_misc_device = {
449 .minor = Z90CRYPT_MINOR,
450 .name = DEV_NAME,
451 .fops = &z90crypt_fops,
452 .devfs_name = DEV_NAME
453};
454#endif
455
456/**
457 * Documentation values.
458 */
459MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
460 "and Jochen Roehrig");
461MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
462 "Copyright 2001, 2004 IBM Corporation");
463MODULE_LICENSE("GPL");
464module_param(domain, int, 0);
465MODULE_PARM_DESC(domain, "domain index for device");
466
467#ifdef CONFIG_COMPAT
468/**
469 * ioctl32 conversion routines
470 */
471struct ica_rsa_modexpo_32 { // For 32-bit callers
472 compat_uptr_t inputdata;
473 unsigned int inputdatalength;
474 compat_uptr_t outputdata;
475 unsigned int outputdatalength;
476 compat_uptr_t b_key;
477 compat_uptr_t n_modulus;
478};
479
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700480static long
481trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482{
483 struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
484 struct ica_rsa_modexpo_32 mex32k;
485 struct ica_rsa_modexpo __user *mex64;
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700486 long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 unsigned int i;
488
489 if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
490 return -EFAULT;
491 mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
492 if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
493 return -EFAULT;
494 if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
495 return -EFAULT;
496 if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
497 __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
498 __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
499 __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
500 __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
501 __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
502 return -EFAULT;
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700503 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 if (!ret)
505 if (__get_user(i, &mex64->outputdatalength) ||
506 __put_user(i, &mex32u->outputdatalength))
507 ret = -EFAULT;
508 return ret;
509}
510
511struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
512 compat_uptr_t inputdata;
513 unsigned int inputdatalength;
514 compat_uptr_t outputdata;
515 unsigned int outputdatalength;
516 compat_uptr_t bp_key;
517 compat_uptr_t bq_key;
518 compat_uptr_t np_prime;
519 compat_uptr_t nq_prime;
520 compat_uptr_t u_mult_inv;
521};
522
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700523static long
524trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525{
526 struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
527 struct ica_rsa_modexpo_crt_32 crt32k;
528 struct ica_rsa_modexpo_crt __user *crt64;
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700529 long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 unsigned int i;
531
532 if (!access_ok(VERIFY_WRITE, crt32u,
533 sizeof(struct ica_rsa_modexpo_crt_32)))
534 return -EFAULT;
535 crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
536 if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
537 return -EFAULT;
538 if (copy_from_user(&crt32k, crt32u,
539 sizeof(struct ica_rsa_modexpo_crt_32)))
540 return -EFAULT;
541 if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
542 __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
543 __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
544 __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
545 __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
546 __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
547 __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
548 __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
549 __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700550 return -EFAULT;
551 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 if (!ret)
553 if (__get_user(i, &crt64->outputdatalength) ||
554 __put_user(i, &crt32u->outputdatalength))
555 ret = -EFAULT;
556 return ret;
557}
558
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700559static long
560z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561{
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700562 switch (cmd) {
563 case ICAZ90STATUS:
564 case Z90QUIESCE:
565 case Z90STAT_TOTALCOUNT:
566 case Z90STAT_PCICACOUNT:
567 case Z90STAT_PCICCCOUNT:
568 case Z90STAT_PCIXCCCOUNT:
569 case Z90STAT_PCIXCCMCL2COUNT:
570 case Z90STAT_PCIXCCMCL3COUNT:
571 case Z90STAT_CEX2CCOUNT:
572 case Z90STAT_REQUESTQ_COUNT:
573 case Z90STAT_PENDINGQ_COUNT:
574 case Z90STAT_TOTALOPEN_COUNT:
575 case Z90STAT_DOMAIN_INDEX:
576 case Z90STAT_STATUS_MASK:
577 case Z90STAT_QDEPTH_MASK:
578 case Z90STAT_PERDEV_REQCNT:
579 return z90crypt_unlocked_ioctl(filp, cmd, arg);
580 case ICARSAMODEXPO:
581 return trans_modexpo32(filp, cmd, arg);
582 case ICARSACRT:
583 return trans_modexpo_crt32(filp, cmd, arg);
584 default:
585 return -ENOIOCTLCMD;
586 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588#endif
589
590/**
591 * The module initialization code.
592 */
593static int __init
594z90crypt_init_module(void)
595{
596 int result, nresult;
597 struct proc_dir_entry *entry;
598
599 PDEBUG("PID %d\n", PID());
600
601 if ((domain < -1) || (domain > 15)) {
602 PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
603 return -EINVAL;
604 }
605
606#ifndef Z90CRYPT_USE_HOTPLUG
607 /* Register as misc device with given minor (or get a dynamic one). */
608 result = misc_register(&z90crypt_misc_device);
609 if (result < 0) {
610 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
611 z90crypt_misc_device.minor, result);
612 return result;
613 }
614#else
615 /* Register the major (or get a dynamic one). */
616 result = register_chrdev(z90crypt_major, REG_NAME, &z90crypt_fops);
617 if (result < 0) {
618 PRINTKW("register_chrdev (major %d) failed with %d.\n",
619 z90crypt_major, result);
620 return result;
621 }
622
623 if (z90crypt_major == 0)
624 z90crypt_major = result;
625#endif
626
627 PDEBUG("Registered " DEV_NAME " with result %d\n", result);
628
629 result = create_z90crypt(&domain);
630 if (result != 0) {
631 PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
632 domain, result);
633 result = -ENOMEM;
634 goto init_module_cleanup;
635 }
636
637 if (result == 0) {
638 PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
639 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
640 __DATE__, __TIME__);
641 PRINTKN("%s\n", z90main_version);
642 PRINTKN("%s\n", z90hardware_version);
643 PDEBUG("create_z90crypt (domain index %d) successful.\n",
644 domain);
645 } else
646 PRINTK("No devices at startup\n");
647
648#ifdef Z90CRYPT_USE_HOTPLUG
649 /* generate hotplug event for device node generation */
650 z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_ADD);
651#endif
652
653 /* Initialize globals. */
654 spin_lock_init(&queuespinlock);
655
656 INIT_LIST_HEAD(&pending_list);
657 pendingq_count = 0;
658
659 INIT_LIST_HEAD(&request_list);
660 requestq_count = 0;
661
662 quiesce_z90crypt = 0;
663
664 atomic_set(&total_open, 0);
665 atomic_set(&z90crypt_step, 0);
666
667 /* Set up the cleanup task. */
668 init_timer(&cleanup_timer);
669 cleanup_timer.function = z90crypt_cleanup_task;
670 cleanup_timer.data = 0;
671 cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
672 add_timer(&cleanup_timer);
673
674 /* Set up the proc file system */
675 entry = create_proc_entry("driver/z90crypt", 0644, 0);
676 if (entry) {
677 entry->nlink = 1;
678 entry->data = 0;
679 entry->read_proc = z90crypt_status;
680 entry->write_proc = z90crypt_status_write;
681 }
682 else
683 PRINTK("Couldn't create z90crypt proc entry\n");
684 z90crypt_entry = entry;
685
686 /* Set up the configuration task. */
687 init_timer(&config_timer);
688 config_timer.function = z90crypt_config_task;
689 config_timer.data = 0;
690 config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
691 add_timer(&config_timer);
692
693 /* Set up the reader task */
694 tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
695 init_timer(&reader_timer);
696 reader_timer.function = z90crypt_schedule_reader_task;
697 reader_timer.data = 0;
698 reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
699 add_timer(&reader_timer);
700
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 return 0; // success
702
703init_module_cleanup:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704#ifndef Z90CRYPT_USE_HOTPLUG
705 if ((nresult = misc_deregister(&z90crypt_misc_device)))
706 PRINTK("misc_deregister failed with %d.\n", nresult);
707 else
708 PDEBUG("misc_deregister successful.\n");
709#else
710 if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
711 PRINTK("unregister_chrdev failed with %d.\n", nresult);
712 else
713 PDEBUG("unregister_chrdev successful.\n");
714#endif
715
716 return result; // failure
717}
718
719/**
720 * The module termination code
721 */
722static void __exit
723z90crypt_cleanup_module(void)
724{
725 int nresult;
726
727 PDEBUG("PID %d\n", PID());
728
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 remove_proc_entry("driver/z90crypt", 0);
730
731#ifndef Z90CRYPT_USE_HOTPLUG
732 if ((nresult = misc_deregister(&z90crypt_misc_device)))
733 PRINTK("misc_deregister failed with %d.\n", nresult);
734 else
735 PDEBUG("misc_deregister successful.\n");
736#else
737 z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_REMOVE);
738
739 if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
740 PRINTK("unregister_chrdev failed with %d.\n", nresult);
741 else
742 PDEBUG("unregister_chrdev successful.\n");
743#endif
744
745 /* Remove the tasks */
746 tasklet_kill(&reader_tasklet);
747 del_timer(&reader_timer);
748 del_timer(&config_timer);
749 del_timer(&cleanup_timer);
750
751 destroy_z90crypt();
752
753 PRINTKN("Unloaded.\n");
754}
755
756/**
757 * Functions running under a process id
758 *
759 * The I/O functions:
760 * z90crypt_open
761 * z90crypt_release
762 * z90crypt_read
763 * z90crypt_write
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700764 * z90crypt_unlocked_ioctl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 * z90crypt_status
766 * z90crypt_status_write
767 * disable_card
768 * enable_card
769 * scan_char
770 * scan_string
771 *
772 * Helper functions:
773 * z90crypt_rsa
774 * z90crypt_prepare
775 * z90crypt_send
776 * z90crypt_process_results
777 *
778 */
779static int
780z90crypt_open(struct inode *inode, struct file *filp)
781{
782 struct priv_data *private_data_p;
783
784 if (quiesce_z90crypt)
785 return -EQUIESCE;
786
787 private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL);
788 if (!private_data_p) {
789 PRINTK("Memory allocate failed\n");
790 return -ENOMEM;
791 }
792
793 memset((void *)private_data_p, 0, sizeof(struct priv_data));
794 private_data_p->status = STAT_OPEN;
795 private_data_p->opener_pid = PID();
796 filp->private_data = private_data_p;
797 atomic_inc(&total_open);
798
799 return 0;
800}
801
802static int
803z90crypt_release(struct inode *inode, struct file *filp)
804{
805 struct priv_data *private_data_p = filp->private_data;
806
807 PDEBUG("PID %d (filp %p)\n", PID(), filp);
808
809 private_data_p->status = STAT_CLOSED;
810 memset(private_data_p, 0, sizeof(struct priv_data));
811 kfree(private_data_p);
812 atomic_dec(&total_open);
813
814 return 0;
815}
816
817/*
818 * there are two read functions, of which compile options will choose one
819 * without USE_GET_RANDOM_BYTES
820 * => read() always returns -EPERM;
821 * otherwise
822 * => read() uses get_random_bytes() kernel function
823 */
824#ifndef USE_GET_RANDOM_BYTES
825/**
826 * z90crypt_read will not be supported beyond z90crypt 1.3.1
827 */
828static ssize_t
829z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
830{
831 PDEBUG("filp %p (PID %d)\n", filp, PID());
832 return -EPERM;
833}
834#else // we want to use get_random_bytes
835/**
836 * read() just returns a string of random bytes. Since we have no way
837 * to generate these cryptographically, we just execute get_random_bytes
838 * for the length specified.
839 */
840#include <linux/random.h>
841static ssize_t
842z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
843{
844 unsigned char *temp_buff;
845
846 PDEBUG("filp %p (PID %d)\n", filp, PID());
847
848 if (quiesce_z90crypt)
849 return -EQUIESCE;
850 if (count < 0) {
851 PRINTK("Requested random byte count negative: %ld\n", count);
852 return -EINVAL;
853 }
854 if (count > RESPBUFFSIZE) {
855 PDEBUG("count[%d] > RESPBUFFSIZE", count);
856 return -EINVAL;
857 }
858 if (count == 0)
859 return 0;
860 temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
861 if (!temp_buff) {
862 PRINTK("Memory allocate failed\n");
863 return -ENOMEM;
864 }
865 get_random_bytes(temp_buff, count);
866
867 if (copy_to_user(buf, temp_buff, count) != 0) {
868 kfree(temp_buff);
869 return -EFAULT;
870 }
871 kfree(temp_buff);
872 return count;
873}
874#endif
875
876/**
877 * Write is is not allowed
878 */
879static ssize_t
880z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
881{
882 PDEBUG("filp %p (PID %d)\n", filp, PID());
883 return -EPERM;
884}
885
886/**
887 * New status functions
888 */
889static inline int
890get_status_totalcount(void)
891{
892 return z90crypt.hdware_info->hdware_mask.st_count;
893}
894
895static inline int
896get_status_PCICAcount(void)
897{
898 return z90crypt.hdware_info->type_mask[PCICA].st_count;
899}
900
901static inline int
902get_status_PCICCcount(void)
903{
904 return z90crypt.hdware_info->type_mask[PCICC].st_count;
905}
906
907static inline int
908get_status_PCIXCCcount(void)
909{
910 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
911 z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
912}
913
914static inline int
915get_status_PCIXCCMCL2count(void)
916{
917 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
918}
919
920static inline int
921get_status_PCIXCCMCL3count(void)
922{
923 return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
924}
925
926static inline int
927get_status_CEX2Ccount(void)
928{
929 return z90crypt.hdware_info->type_mask[CEX2C].st_count;
930}
931
932static inline int
933get_status_requestq_count(void)
934{
935 return requestq_count;
936}
937
938static inline int
939get_status_pendingq_count(void)
940{
941 return pendingq_count;
942}
943
944static inline int
945get_status_totalopen_count(void)
946{
947 return atomic_read(&total_open);
948}
949
950static inline int
951get_status_domain_index(void)
952{
953 return z90crypt.cdx;
954}
955
956static inline unsigned char *
957get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
958{
959 int i, ix;
960
961 memcpy(status, z90crypt.hdware_info->device_type_array,
962 Z90CRYPT_NUM_APS);
963
964 for (i = 0; i < get_status_totalcount(); i++) {
965 ix = SHRT2LONG(i);
966 if (LONG2DEVPTR(ix)->user_disabled)
967 status[ix] = 0x0d;
968 }
969
970 return status;
971}
972
973static inline unsigned char *
974get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
975{
976 int i, ix;
977
978 memset(qdepth, 0, Z90CRYPT_NUM_APS);
979
980 for (i = 0; i < get_status_totalcount(); i++) {
981 ix = SHRT2LONG(i);
982 qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
983 }
984
985 return qdepth;
986}
987
988static inline unsigned int *
989get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
990{
991 int i, ix;
992
993 memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
994
995 for (i = 0; i < get_status_totalcount(); i++) {
996 ix = SHRT2LONG(i);
997 reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
998 }
999
1000 return reqcnt;
1001}
1002
1003static inline void
1004init_work_element(struct work_element *we_p,
1005 struct priv_data *priv_data, pid_t pid)
1006{
1007 int step;
1008
1009 we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
1010 /* Come up with a unique id for this caller. */
1011 step = atomic_inc_return(&z90crypt_step);
1012 memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
1013 memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
1014 we_p->pid = pid;
1015 we_p->priv_data = priv_data;
1016 we_p->status[0] = STAT_DEFAULT;
1017 we_p->audit[0] = 0x00;
1018 we_p->audit[1] = 0x00;
1019 we_p->audit[2] = 0x00;
1020 we_p->resp_buff_size = 0;
1021 we_p->retcode = 0;
1022 we_p->devindex = -1;
1023 we_p->devtype = -1;
1024 atomic_set(&we_p->alarmrung, 0);
1025 init_waitqueue_head(&we_p->waitq);
1026 INIT_LIST_HEAD(&(we_p->liste));
1027}
1028
1029static inline int
1030allocate_work_element(struct work_element **we_pp,
1031 struct priv_data *priv_data_p, pid_t pid)
1032{
1033 struct work_element *we_p;
1034
1035 we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
1036 if (!we_p)
1037 return -ENOMEM;
1038 init_work_element(we_p, priv_data_p, pid);
1039 *we_pp = we_p;
1040 return 0;
1041}
1042
1043static inline void
1044remove_device(struct device *device_p)
1045{
1046 if (!device_p || (device_p->disabled != 0))
1047 return;
1048 device_p->disabled = 1;
1049 z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
1050 z90crypt.hdware_info->hdware_mask.disabled_count++;
1051}
1052
1053/**
1054 * Bitlength limits for each card
1055 *
1056 * There are new MCLs which allow more bitlengths. See the table for details.
1057 * The MCL must be applied and the newer bitlengths enabled for these to work.
1058 *
1059 * Card Type Old limit New limit
1060 * PCICC 512-1024 512-2048
1061 * PCIXCC_MCL2 512-2048 no change (applying this MCL == card is MCL3+)
1062 * PCIXCC_MCL3 512-2048 128-2048
1063 * CEX2C 512-2048 128-2048
1064 *
1065 * ext_bitlens (extended bitlengths) is a global, since you should not apply an
1066 * MCL to just one card in a machine. We assume, at first, that all cards have
1067 * these capabilities.
1068 */
1069int ext_bitlens = 1; // This is global
1070#define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
1071#define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
1072#define PCICC_MIN_MOD_SIZE 64 // 512 bits
1073#define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
1074#define MAX_MOD_SIZE 256 // 2048 bits
1075
1076static inline int
1077select_device_type(int *dev_type_p, int bytelength)
1078{
1079 static int count = 0;
1080 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use;
1081 struct status *stat;
1082 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1083 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
1084 (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV))
1085 return -1;
1086 if (*dev_type_p != ANYDEV) {
1087 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
1088 if (stat->st_count >
1089 (stat->disabled_count + stat->user_disabled_count))
1090 return 0;
1091 return -1;
1092 }
1093
1094 /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */
1095 stat = &z90crypt.hdware_info->type_mask[PCICA];
1096 PCICA_avail = stat->st_count -
1097 (stat->disabled_count + stat->user_disabled_count);
1098 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
1099 PCIXCC_MCL3_avail = stat->st_count -
1100 (stat->disabled_count + stat->user_disabled_count);
1101 stat = &z90crypt.hdware_info->type_mask[CEX2C];
1102 CEX2C_avail = stat->st_count -
1103 (stat->disabled_count + stat->user_disabled_count);
1104 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) {
1105 /**
1106 * bitlength is a factor, PCICA is the most capable, even with
1107 * the new MCL.
1108 */
1109 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1110 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
1111 if (!PCICA_avail)
1112 return -1;
1113 else {
1114 *dev_type_p = PCICA;
1115 return 0;
1116 }
1117 }
1118
1119 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
1120 CEX2C_avail);
1121 if (index_to_use < PCICA_avail)
1122 *dev_type_p = PCICA;
1123 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
1124 *dev_type_p = PCIXCC_MCL3;
1125 else
1126 *dev_type_p = CEX2C;
1127 count++;
1128 return 0;
1129 }
1130
1131 /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
1132 if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
1133 return -1;
1134 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
1135 if (stat->st_count >
1136 (stat->disabled_count + stat->user_disabled_count)) {
1137 *dev_type_p = PCIXCC_MCL2;
1138 return 0;
1139 }
1140
1141 /**
1142 * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
1143 * (if we don't have the MCL applied and the newer bitlengths enabled)
1144 * cannot go to a PCICC
1145 */
1146 if ((bytelength < PCICC_MIN_MOD_SIZE) ||
1147 (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
1148 return -1;
1149 }
1150 stat = &z90crypt.hdware_info->type_mask[PCICC];
1151 if (stat->st_count >
1152 (stat->disabled_count + stat->user_disabled_count)) {
1153 *dev_type_p = PCICC;
1154 return 0;
1155 }
1156
1157 return -1;
1158}
1159
1160/**
1161 * Try the selected number, then the selected type (can be ANYDEV)
1162 */
1163static inline int
1164select_device(int *dev_type_p, int *device_nr_p, int bytelength)
1165{
1166 int i, indx, devTp, low_count, low_indx;
1167 struct device_x *index_p;
1168 struct device *dev_ptr;
1169
1170 PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
1171 if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
1172 PDEBUG("trying index = %d\n", *device_nr_p);
1173 dev_ptr = z90crypt.device_p[*device_nr_p];
1174
1175 if (dev_ptr &&
1176 (dev_ptr->dev_stat != DEV_GONE) &&
1177 (dev_ptr->disabled == 0) &&
1178 (dev_ptr->user_disabled == 0)) {
1179 PDEBUG("selected by number, index = %d\n",
1180 *device_nr_p);
1181 *dev_type_p = dev_ptr->dev_type;
1182 return *device_nr_p;
1183 }
1184 }
1185 *device_nr_p = -1;
1186 PDEBUG("trying type = %d\n", *dev_type_p);
1187 devTp = *dev_type_p;
1188 if (select_device_type(&devTp, bytelength) == -1) {
1189 PDEBUG("failed to select by type\n");
1190 return -1;
1191 }
1192 PDEBUG("selected type = %d\n", devTp);
1193 index_p = &z90crypt.hdware_info->type_x_addr[devTp];
1194 low_count = 0x0000FFFF;
1195 low_indx = -1;
1196 for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
1197 indx = index_p->device_index[i];
1198 dev_ptr = z90crypt.device_p[indx];
1199 if (dev_ptr &&
1200 (dev_ptr->dev_stat != DEV_GONE) &&
1201 (dev_ptr->disabled == 0) &&
1202 (dev_ptr->user_disabled == 0) &&
1203 (devTp == dev_ptr->dev_type) &&
1204 (low_count > dev_ptr->dev_caller_count)) {
1205 low_count = dev_ptr->dev_caller_count;
1206 low_indx = indx;
1207 }
1208 }
1209 *device_nr_p = low_indx;
1210 return low_indx;
1211}
1212
1213static inline int
1214send_to_crypto_device(struct work_element *we_p)
1215{
1216 struct caller *caller_p;
1217 struct device *device_p;
1218 int dev_nr;
1219 int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
1220
1221 if (!we_p->requestptr)
1222 return SEN_FATAL_ERROR;
1223 caller_p = (struct caller *)we_p->requestptr;
1224 dev_nr = we_p->devindex;
1225 if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
1226 if (z90crypt.hdware_info->hdware_mask.st_count != 0)
1227 return SEN_RETRY;
1228 else
1229 return SEN_NOT_AVAIL;
1230 }
1231 we_p->devindex = dev_nr;
1232 device_p = z90crypt.device_p[dev_nr];
1233 if (!device_p)
1234 return SEN_NOT_AVAIL;
1235 if (device_p->dev_type != we_p->devtype)
1236 return SEN_RETRY;
1237 if (device_p->dev_caller_count >= device_p->dev_q_depth)
1238 return SEN_QUEUE_FULL;
1239 PDEBUG("device number prior to send: %d\n", dev_nr);
1240 switch (send_to_AP(dev_nr, z90crypt.cdx,
1241 caller_p->caller_dev_dep_req_l,
1242 caller_p->caller_dev_dep_req_p)) {
1243 case DEV_SEN_EXCEPTION:
1244 PRINTKC("Exception during send to device %d\n", dev_nr);
1245 z90crypt.terminating = 1;
1246 return SEN_FATAL_ERROR;
1247 case DEV_GONE:
1248 PRINTK("Device %d not available\n", dev_nr);
1249 remove_device(device_p);
1250 return SEN_NOT_AVAIL;
1251 case DEV_EMPTY:
1252 return SEN_NOT_AVAIL;
1253 case DEV_NO_WORK:
1254 return SEN_FATAL_ERROR;
1255 case DEV_BAD_MESSAGE:
1256 return SEN_USER_ERROR;
1257 case DEV_QUEUE_FULL:
1258 return SEN_QUEUE_FULL;
1259 default:
1260 case DEV_ONLINE:
1261 break;
1262 }
1263 list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
1264 device_p->dev_caller_count++;
1265 return 0;
1266}
1267
1268/**
1269 * Send puts the user's work on one of two queues:
1270 * the pending queue if the send was successful
1271 * the request queue if the send failed because device full or busy
1272 */
1273static inline int
1274z90crypt_send(struct work_element *we_p, const char *buf)
1275{
1276 int rv;
1277
1278 PDEBUG("PID %d\n", PID());
1279
1280 if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
1281 PDEBUG("PID %d tried to send more work but has outstanding "
1282 "work.\n", PID());
1283 return -EWORKPEND;
1284 }
1285 we_p->devindex = -1; // Reset device number
1286 spin_lock_irq(&queuespinlock);
1287 rv = send_to_crypto_device(we_p);
1288 switch (rv) {
1289 case 0:
1290 we_p->requestsent = jiffies;
1291 we_p->audit[0] |= FP_SENT;
1292 list_add_tail(&we_p->liste, &pending_list);
1293 ++pendingq_count;
1294 we_p->audit[0] |= FP_PENDING;
1295 break;
1296 case SEN_BUSY:
1297 case SEN_QUEUE_FULL:
1298 rv = 0;
1299 we_p->devindex = -1; // any device will do
1300 we_p->requestsent = jiffies;
1301 list_add_tail(&we_p->liste, &request_list);
1302 ++requestq_count;
1303 we_p->audit[0] |= FP_REQUEST;
1304 break;
1305 case SEN_RETRY:
1306 rv = -ERESTARTSYS;
1307 break;
1308 case SEN_NOT_AVAIL:
1309 PRINTK("*** No devices available.\n");
1310 rv = we_p->retcode = -ENODEV;
1311 we_p->status[0] |= STAT_FAILED;
1312 break;
1313 case REC_OPERAND_INV:
1314 case REC_OPERAND_SIZE:
1315 case REC_EVEN_MOD:
1316 case REC_INVALID_PAD:
1317 rv = we_p->retcode = -EINVAL;
1318 we_p->status[0] |= STAT_FAILED;
1319 break;
1320 default:
1321 we_p->retcode = rv;
1322 we_p->status[0] |= STAT_FAILED;
1323 break;
1324 }
1325 if (rv != -ERESTARTSYS)
1326 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1327 spin_unlock_irq(&queuespinlock);
1328 if (rv == 0)
1329 tasklet_schedule(&reader_tasklet);
1330 return rv;
1331}
1332
1333/**
1334 * process_results copies the user's work from kernel space.
1335 */
1336static inline int
1337z90crypt_process_results(struct work_element *we_p, char __user *buf)
1338{
1339 int rv;
1340
1341 PDEBUG("we_p %p (PID %d)\n", we_p, PID());
1342
1343 LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
1344 SET_RDWRMASK(we_p->status[0], STAT_READPEND);
1345
1346 rv = 0;
1347 if (!we_p->buffer) {
1348 PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
1349 we_p, PID());
1350 rv = -ENOBUFF;
1351 }
1352
1353 if (!rv)
1354 if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
1355 PDEBUG("copy_to_user failed: rv = %d\n", rv);
1356 rv = -EFAULT;
1357 }
1358
1359 if (!rv)
1360 rv = we_p->retcode;
1361 if (!rv)
1362 if (we_p->resp_buff_size
1363 && copy_to_user(we_p->resp_addr, we_p->resp_buff,
1364 we_p->resp_buff_size))
1365 rv = -EFAULT;
1366
1367 SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
1368 return rv;
1369}
1370
1371static unsigned char NULL_psmid[8] =
1372{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1373
1374/**
1375 * Used in device configuration functions
1376 */
1377#define MAX_RESET 90
1378
1379/**
1380 * This is used only for PCICC support
1381 */
1382static inline int
1383is_PKCS11_padded(unsigned char *buffer, int length)
1384{
1385 int i;
1386 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
1387 return 0;
1388 for (i = 2; i < length; i++)
1389 if (buffer[i] != 0xFF)
1390 break;
1391 if ((i < 10) || (i == length))
1392 return 0;
1393 if (buffer[i] != 0x00)
1394 return 0;
1395 return 1;
1396}
1397
1398/**
1399 * This is used only for PCICC support
1400 */
1401static inline int
1402is_PKCS12_padded(unsigned char *buffer, int length)
1403{
1404 int i;
1405 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
1406 return 0;
1407 for (i = 2; i < length; i++)
1408 if (buffer[i] == 0x00)
1409 break;
1410 if ((i < 10) || (i == length))
1411 return 0;
1412 if (buffer[i] != 0x00)
1413 return 0;
1414 return 1;
1415}
1416
1417/**
1418 * builds struct caller and converts message from generic format to
1419 * device-dependent format
1420 * func is ICARSAMODEXPO or ICARSACRT
1421 * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
1422 */
1423static inline int
1424build_caller(struct work_element *we_p, short function)
1425{
1426 int rv;
1427 struct caller *caller_p = (struct caller *)we_p->requestptr;
1428
1429 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1430 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1431 (we_p->devtype != CEX2C))
1432 return SEN_NOT_AVAIL;
1433
1434 memcpy(caller_p->caller_id, we_p->caller_id,
1435 sizeof(caller_p->caller_id));
1436 caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
1437 caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
1438 caller_p->caller_buf_p = we_p->buffer;
1439 INIT_LIST_HEAD(&(caller_p->caller_liste));
1440
1441 rv = convert_request(we_p->buffer, we_p->funccode, function,
1442 z90crypt.cdx, we_p->devtype,
1443 &caller_p->caller_dev_dep_req_l,
1444 caller_p->caller_dev_dep_req_p);
1445 if (rv) {
1446 if (rv == SEN_NOT_AVAIL)
1447 PDEBUG("request can't be processed on hdwr avail\n");
1448 else
1449 PRINTK("Error from convert_request: %d\n", rv);
1450 }
1451 else
1452 memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
1453 return rv;
1454}
1455
1456static inline void
1457unbuild_caller(struct device *device_p, struct caller *caller_p)
1458{
1459 if (!caller_p)
1460 return;
1461 if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
1462 if (!list_empty(&caller_p->caller_liste)) {
1463 list_del_init(&caller_p->caller_liste);
1464 device_p->dev_caller_count--;
1465 }
1466 memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
1467}
1468
1469static inline int
1470get_crypto_request_buffer(struct work_element *we_p)
1471{
1472 struct ica_rsa_modexpo *mex_p;
1473 struct ica_rsa_modexpo_crt *crt_p;
1474 unsigned char *temp_buffer;
1475 short function;
1476 int rv;
1477
1478 mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
1479 crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
1480
1481 PDEBUG("device type input = %d\n", we_p->devtype);
1482
1483 if (z90crypt.terminating)
1484 return REC_NO_RESPONSE;
1485 if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
1486 PRINTK("psmid zeroes\n");
1487 return SEN_FATAL_ERROR;
1488 }
1489 if (!we_p->buffer) {
1490 PRINTK("buffer pointer NULL\n");
1491 return SEN_USER_ERROR;
1492 }
1493 if (!we_p->requestptr) {
1494 PRINTK("caller pointer NULL\n");
1495 return SEN_USER_ERROR;
1496 }
1497
1498 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1499 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1500 (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) {
1501 PRINTK("invalid device type\n");
1502 return SEN_USER_ERROR;
1503 }
1504
1505 if ((mex_p->inputdatalength < 1) ||
1506 (mex_p->inputdatalength > MAX_MOD_SIZE)) {
1507 PRINTK("inputdatalength[%d] is not valid\n",
1508 mex_p->inputdatalength);
1509 return SEN_USER_ERROR;
1510 }
1511
1512 if (mex_p->outputdatalength < mex_p->inputdatalength) {
1513 PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
1514 mex_p->outputdatalength, mex_p->inputdatalength);
1515 return SEN_USER_ERROR;
1516 }
1517
1518 if (!mex_p->inputdata || !mex_p->outputdata) {
1519 PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
1520 mex_p->outputdata, mex_p->inputdata);
1521 return SEN_USER_ERROR;
1522 }
1523
1524 /**
1525 * As long as outputdatalength is big enough, we can set the
1526 * outputdatalength equal to the inputdatalength, since that is the
1527 * number of bytes we will copy in any case
1528 */
1529 mex_p->outputdatalength = mex_p->inputdatalength;
1530
1531 rv = 0;
1532 switch (we_p->funccode) {
1533 case ICARSAMODEXPO:
1534 if (!mex_p->b_key || !mex_p->n_modulus)
1535 rv = SEN_USER_ERROR;
1536 break;
1537 case ICARSACRT:
1538 if (!IS_EVEN(crt_p->inputdatalength)) {
1539 PRINTK("inputdatalength[%d] is odd, CRT form\n",
1540 crt_p->inputdatalength);
1541 rv = SEN_USER_ERROR;
1542 break;
1543 }
1544 if (!crt_p->bp_key ||
1545 !crt_p->bq_key ||
1546 !crt_p->np_prime ||
1547 !crt_p->nq_prime ||
1548 !crt_p->u_mult_inv) {
1549 PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
1550 crt_p->bp_key, crt_p->bq_key,
1551 crt_p->np_prime, crt_p->nq_prime,
1552 crt_p->u_mult_inv);
1553 rv = SEN_USER_ERROR;
1554 }
1555 break;
1556 default:
1557 PRINTK("bad func = %d\n", we_p->funccode);
1558 rv = SEN_USER_ERROR;
1559 break;
1560 }
1561 if (rv != 0)
1562 return rv;
1563
1564 if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
1565 return SEN_NOT_AVAIL;
1566
1567 temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
1568 sizeof(struct caller);
1569 if (copy_from_user(temp_buffer, mex_p->inputdata,
1570 mex_p->inputdatalength) != 0)
1571 return SEN_RELEASED;
1572
1573 function = PCI_FUNC_KEY_ENCRYPT;
1574 switch (we_p->devtype) {
1575 /* PCICA does everything with a simple RSA mod-expo operation */
1576 case PCICA:
1577 function = PCI_FUNC_KEY_ENCRYPT;
1578 break;
1579 /**
1580 * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
1581 * operation, and all CRT forms with a PKCS-1.2 format decrypt.
1582 * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
1583 * mod-expo operation
1584 */
1585 case PCIXCC_MCL2:
1586 if (we_p->funccode == ICARSAMODEXPO)
1587 function = PCI_FUNC_KEY_ENCRYPT;
1588 else
1589 function = PCI_FUNC_KEY_DECRYPT;
1590 break;
1591 case PCIXCC_MCL3:
1592 case CEX2C:
1593 if (we_p->funccode == ICARSAMODEXPO)
1594 function = PCI_FUNC_KEY_ENCRYPT;
1595 else
1596 function = PCI_FUNC_KEY_DECRYPT;
1597 break;
1598 /**
1599 * PCICC does everything as a PKCS-1.2 format request
1600 */
1601 case PCICC:
1602 /* PCICC cannot handle input that is is PKCS#1.1 padded */
1603 if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
1604 return SEN_NOT_AVAIL;
1605 }
1606 if (we_p->funccode == ICARSAMODEXPO) {
1607 if (is_PKCS12_padded(temp_buffer,
1608 mex_p->inputdatalength))
1609 function = PCI_FUNC_KEY_ENCRYPT;
1610 else
1611 function = PCI_FUNC_KEY_DECRYPT;
1612 } else
1613 /* all CRT forms are decrypts */
1614 function = PCI_FUNC_KEY_DECRYPT;
1615 break;
1616 }
1617 PDEBUG("function: %04x\n", function);
1618 rv = build_caller(we_p, function);
1619 PDEBUG("rv from build_caller = %d\n", rv);
1620 return rv;
1621}
1622
1623static inline int
1624z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
1625 const char __user *buffer)
1626{
1627 int rv;
1628
1629 we_p->devindex = -1;
1630 if (funccode == ICARSAMODEXPO)
1631 we_p->buff_size = sizeof(struct ica_rsa_modexpo);
1632 else
1633 we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
1634
1635 if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
1636 return -EFAULT;
1637
1638 we_p->audit[0] |= FP_COPYFROM;
1639 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1640 we_p->funccode = funccode;
1641 we_p->devtype = -1;
1642 we_p->audit[0] |= FP_BUFFREQ;
1643 rv = get_crypto_request_buffer(we_p);
1644 switch (rv) {
1645 case 0:
1646 we_p->audit[0] |= FP_BUFFGOT;
1647 break;
1648 case SEN_USER_ERROR:
1649 rv = -EINVAL;
1650 break;
1651 case SEN_QUEUE_FULL:
1652 rv = 0;
1653 break;
1654 case SEN_RELEASED:
1655 rv = -EFAULT;
1656 break;
1657 case REC_NO_RESPONSE:
1658 rv = -ENODEV;
1659 break;
1660 case SEN_NOT_AVAIL:
1661 case EGETBUFF:
1662 rv = -EGETBUFF;
1663 break;
1664 default:
1665 PRINTK("rv = %d\n", rv);
1666 rv = -EGETBUFF;
1667 break;
1668 }
1669 if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
1670 SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
1671 return rv;
1672}
1673
1674static inline void
1675purge_work_element(struct work_element *we_p)
1676{
1677 struct list_head *lptr;
1678
1679 spin_lock_irq(&queuespinlock);
1680 list_for_each(lptr, &request_list) {
1681 if (lptr == &we_p->liste) {
1682 list_del_init(lptr);
1683 requestq_count--;
1684 break;
1685 }
1686 }
1687 list_for_each(lptr, &pending_list) {
1688 if (lptr == &we_p->liste) {
1689 list_del_init(lptr);
1690 pendingq_count--;
1691 break;
1692 }
1693 }
1694 spin_unlock_irq(&queuespinlock);
1695}
1696
1697/**
1698 * Build the request and send it.
1699 */
1700static inline int
1701z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1702 unsigned int cmd, unsigned long arg)
1703{
1704 struct work_element *we_p;
1705 int rv;
1706
1707 if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
1708 PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
1709 return rv;
1710 }
1711 if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
1712 PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
1713 if (!rv)
1714 if ((rv = z90crypt_send(we_p, (const char *)arg)))
1715 PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
1716 if (!rv) {
1717 we_p->audit[0] |= FP_ASLEEP;
1718 wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
1719 we_p->audit[0] |= FP_AWAKE;
1720 rv = we_p->retcode;
1721 }
1722 if (!rv)
1723 rv = z90crypt_process_results(we_p, (char __user *)arg);
1724
1725 if ((we_p->status[0] & STAT_FAILED)) {
1726 switch (rv) {
1727 /**
1728 * EINVAL *after* receive is almost always a padding error or
1729 * length error issued by a coprocessor (not an accelerator).
1730 * We convert this return value to -EGETBUFF which should
1731 * trigger a fallback to software.
1732 */
1733 case -EINVAL:
1734 if (we_p->devtype != PCICA)
1735 rv = -EGETBUFF;
1736 break;
1737 case -ETIMEOUT:
1738 if (z90crypt.mask.st_count > 0)
1739 rv = -ERESTARTSYS; // retry with another
1740 else
1741 rv = -ENODEV; // no cards left
1742 /* fall through to clean up request queue */
1743 case -ERESTARTSYS:
1744 case -ERELEASED:
1745 switch (CHK_RDWRMASK(we_p->status[0])) {
1746 case STAT_WRITTEN:
1747 purge_work_element(we_p);
1748 break;
1749 case STAT_READPEND:
1750 case STAT_NOWORK:
1751 default:
1752 break;
1753 }
1754 break;
1755 default:
1756 we_p->status[0] ^= STAT_FAILED;
1757 break;
1758 }
1759 }
1760 free_page((long)we_p);
1761 return rv;
1762}
1763
1764/**
1765 * This function is a little long, but it's really just one large switch
1766 * statement.
1767 */
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -07001768static long
1769z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770{
1771 struct priv_data *private_data_p = filp->private_data;
1772 unsigned char *status;
1773 unsigned char *qdepth;
1774 unsigned int *reqcnt;
1775 struct ica_z90_status *pstat;
1776 int ret, i, loopLim, tempstat;
1777 static int deprecated_msg_count1 = 0;
1778 static int deprecated_msg_count2 = 0;
1779
1780 PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
1781 PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
1782 cmd,
1783 !_IOC_DIR(cmd) ? "NO"
1784 : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
1785 : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
1786 : "WR")),
1787 _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
1788
1789 if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
1790 PRINTK("cmd 0x%08X contains bad magic\n", cmd);
1791 return -ENOTTY;
1792 }
1793
1794 ret = 0;
1795 switch (cmd) {
1796 case ICARSAMODEXPO:
1797 case ICARSACRT:
1798 if (quiesce_z90crypt) {
1799 ret = -EQUIESCE;
1800 break;
1801 }
1802 ret = -ENODEV; // Default if no devices
1803 loopLim = z90crypt.hdware_info->hdware_mask.st_count -
1804 (z90crypt.hdware_info->hdware_mask.disabled_count +
1805 z90crypt.hdware_info->hdware_mask.user_disabled_count);
1806 for (i = 0; i < loopLim; i++) {
1807 ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
1808 if (ret != -ERESTARTSYS)
1809 break;
1810 }
1811 if (ret == -ERESTARTSYS)
1812 ret = -ENODEV;
1813 break;
1814
1815 case Z90STAT_TOTALCOUNT:
1816 tempstat = get_status_totalcount();
1817 if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
1818 ret = -EFAULT;
1819 break;
1820
1821 case Z90STAT_PCICACOUNT:
1822 tempstat = get_status_PCICAcount();
1823 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1824 ret = -EFAULT;
1825 break;
1826
1827 case Z90STAT_PCICCCOUNT:
1828 tempstat = get_status_PCICCcount();
1829 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1830 ret = -EFAULT;
1831 break;
1832
1833 case Z90STAT_PCIXCCMCL2COUNT:
1834 tempstat = get_status_PCIXCCMCL2count();
1835 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1836 ret = -EFAULT;
1837 break;
1838
1839 case Z90STAT_PCIXCCMCL3COUNT:
1840 tempstat = get_status_PCIXCCMCL3count();
1841 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1842 ret = -EFAULT;
1843 break;
1844
1845 case Z90STAT_CEX2CCOUNT:
1846 tempstat = get_status_CEX2Ccount();
1847 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1848 ret = -EFAULT;
1849 break;
1850
1851 case Z90STAT_REQUESTQ_COUNT:
1852 tempstat = get_status_requestq_count();
1853 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1854 ret = -EFAULT;
1855 break;
1856
1857 case Z90STAT_PENDINGQ_COUNT:
1858 tempstat = get_status_pendingq_count();
1859 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1860 ret = -EFAULT;
1861 break;
1862
1863 case Z90STAT_TOTALOPEN_COUNT:
1864 tempstat = get_status_totalopen_count();
1865 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1866 ret = -EFAULT;
1867 break;
1868
1869 case Z90STAT_DOMAIN_INDEX:
1870 tempstat = get_status_domain_index();
1871 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1872 ret = -EFAULT;
1873 break;
1874
1875 case Z90STAT_STATUS_MASK:
1876 status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1877 if (!status) {
1878 PRINTK("kmalloc for status failed!\n");
1879 ret = -ENOMEM;
1880 break;
1881 }
1882 get_status_status_mask(status);
1883 if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
1884 != 0)
1885 ret = -EFAULT;
1886 kfree(status);
1887 break;
1888
1889 case Z90STAT_QDEPTH_MASK:
1890 qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1891 if (!qdepth) {
1892 PRINTK("kmalloc for qdepth failed!\n");
1893 ret = -ENOMEM;
1894 break;
1895 }
1896 get_status_qdepth_mask(qdepth);
1897 if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
1898 ret = -EFAULT;
1899 kfree(qdepth);
1900 break;
1901
1902 case Z90STAT_PERDEV_REQCNT:
1903 reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
1904 if (!reqcnt) {
1905 PRINTK("kmalloc for reqcnt failed!\n");
1906 ret = -ENOMEM;
1907 break;
1908 }
1909 get_status_perdevice_reqcnt(reqcnt);
1910 if (copy_to_user((char __user *) arg, reqcnt,
1911 Z90CRYPT_NUM_APS * sizeof(int)) != 0)
1912 ret = -EFAULT;
1913 kfree(reqcnt);
1914 break;
1915
1916 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1917 case ICAZ90STATUS:
1918 if (deprecated_msg_count1 < 20) {
1919 PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
1920 deprecated_msg_count1++;
1921 if (deprecated_msg_count1 == 20)
1922 PRINTK("No longer issuing messages related to "
1923 "deprecated call to ICAZ90STATUS.\n");
1924 }
1925
1926 pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
1927 if (!pstat) {
1928 PRINTK("kmalloc for pstat failed!\n");
1929 ret = -ENOMEM;
1930 break;
1931 }
1932
1933 pstat->totalcount = get_status_totalcount();
1934 pstat->leedslitecount = get_status_PCICAcount();
1935 pstat->leeds2count = get_status_PCICCcount();
1936 pstat->requestqWaitCount = get_status_requestq_count();
1937 pstat->pendingqWaitCount = get_status_pendingq_count();
1938 pstat->totalOpenCount = get_status_totalopen_count();
1939 pstat->cryptoDomain = get_status_domain_index();
1940 get_status_status_mask(pstat->status);
1941 get_status_qdepth_mask(pstat->qdepth);
1942
1943 if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
1944 sizeof(struct ica_z90_status)) != 0)
1945 ret = -EFAULT;
1946 kfree(pstat);
1947 break;
1948
1949 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1950 case Z90STAT_PCIXCCCOUNT:
1951 if (deprecated_msg_count2 < 20) {
1952 PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
1953 deprecated_msg_count2++;
1954 if (deprecated_msg_count2 == 20)
1955 PRINTK("No longer issuing messages about depre"
1956 "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
1957 }
1958
1959 tempstat = get_status_PCIXCCcount();
1960 if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
1961 ret = -EFAULT;
1962 break;
1963
1964 case Z90QUIESCE:
1965 if (current->euid != 0) {
1966 PRINTK("QUIESCE fails: euid %d\n",
1967 current->euid);
1968 ret = -EACCES;
1969 } else {
1970 PRINTK("QUIESCE device from PID %d\n", PID());
1971 quiesce_z90crypt = 1;
1972 }
1973 break;
1974
1975 default:
1976 /* user passed an invalid IOCTL number */
1977 PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
1978 ret = -ENOTTY;
1979 break;
1980 }
1981
1982 return ret;
1983}
1984
1985static inline int
1986sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1987{
1988 int hl, i;
1989
1990 hl = 0;
1991 for (i = 0; i < len; i++)
1992 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
1993 hl += sprintf(outaddr+hl, " ");
1994
1995 return hl;
1996}
1997
1998static inline int
1999sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
2000{
2001 int hl, inl, c, cx;
2002
2003 hl = sprintf(outaddr, " ");
2004 inl = 0;
2005 for (c = 0; c < (len / 16); c++) {
2006 hl += sprintcl(outaddr+hl, addr+inl, 16);
2007 inl += 16;
2008 }
2009
2010 cx = len%16;
2011 if (cx) {
2012 hl += sprintcl(outaddr+hl, addr+inl, cx);
2013 inl += cx;
2014 }
2015
2016 hl += sprintf(outaddr+hl, "\n");
2017
2018 return hl;
2019}
2020
2021static inline int
2022sprinthx(unsigned char *title, unsigned char *outaddr,
2023 unsigned char *addr, unsigned int len)
2024{
2025 int hl, inl, r, rx;
2026
2027 hl = sprintf(outaddr, "\n%s\n", title);
2028 inl = 0;
2029 for (r = 0; r < (len / 64); r++) {
2030 hl += sprintrw(outaddr+hl, addr+inl, 64);
2031 inl += 64;
2032 }
2033 rx = len % 64;
2034 if (rx) {
2035 hl += sprintrw(outaddr+hl, addr+inl, rx);
2036 inl += rx;
2037 }
2038
2039 hl += sprintf(outaddr+hl, "\n");
2040
2041 return hl;
2042}
2043
2044static inline int
2045sprinthx4(unsigned char *title, unsigned char *outaddr,
2046 unsigned int *array, unsigned int len)
2047{
2048 int hl, r;
2049
2050 hl = sprintf(outaddr, "\n%s\n", title);
2051
2052 for (r = 0; r < len; r++) {
2053 if ((r % 8) == 0)
2054 hl += sprintf(outaddr+hl, " ");
2055 hl += sprintf(outaddr+hl, "%08X ", array[r]);
2056 if ((r % 8) == 7)
2057 hl += sprintf(outaddr+hl, "\n");
2058 }
2059
2060 hl += sprintf(outaddr+hl, "\n");
2061
2062 return hl;
2063}
2064
2065static int
2066z90crypt_status(char *resp_buff, char **start, off_t offset,
2067 int count, int *eof, void *data)
2068{
2069 unsigned char *workarea;
2070 int len;
2071
2072 /* resp_buff is a page. Use the right half for a work area */
2073 workarea = resp_buff+2000;
2074 len = 0;
2075 len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
2076 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
2077 len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
2078 get_status_domain_index());
2079 len += sprintf(resp_buff+len, "Total device count: %d\n",
2080 get_status_totalcount());
2081 len += sprintf(resp_buff+len, "PCICA count: %d\n",
2082 get_status_PCICAcount());
2083 len += sprintf(resp_buff+len, "PCICC count: %d\n",
2084 get_status_PCICCcount());
2085 len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
2086 get_status_PCIXCCMCL2count());
2087 len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
2088 get_status_PCIXCCMCL3count());
2089 len += sprintf(resp_buff+len, "CEX2C count: %d\n",
2090 get_status_CEX2Ccount());
2091 len += sprintf(resp_buff+len, "requestq count: %d\n",
2092 get_status_requestq_count());
2093 len += sprintf(resp_buff+len, "pendingq count: %d\n",
2094 get_status_pendingq_count());
2095 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2096 get_status_totalopen_count());
2097 len += sprinthx(
2098 "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), "
2099 "4: PCIXCC (MCL3), 5: CEX2C",
2100 resp_buff+len,
2101 get_status_status_mask(workarea),
2102 Z90CRYPT_NUM_APS);
2103 len += sprinthx("Waiting work element counts",
2104 resp_buff+len,
2105 get_status_qdepth_mask(workarea),
2106 Z90CRYPT_NUM_APS);
2107 len += sprinthx4(
2108 "Per-device successfully completed request counts",
2109 resp_buff+len,
2110 get_status_perdevice_reqcnt((unsigned int *)workarea),
2111 Z90CRYPT_NUM_APS);
2112 *eof = 1;
2113 memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
2114 return len;
2115}
2116
2117static inline void
2118disable_card(int card_index)
2119{
2120 struct device *devp;
2121
2122 devp = LONG2DEVPTR(card_index);
2123 if (!devp || devp->user_disabled)
2124 return;
2125 devp->user_disabled = 1;
2126 z90crypt.hdware_info->hdware_mask.user_disabled_count++;
2127 if (devp->dev_type == -1)
2128 return;
2129 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
2130}
2131
2132static inline void
2133enable_card(int card_index)
2134{
2135 struct device *devp;
2136
2137 devp = LONG2DEVPTR(card_index);
2138 if (!devp || !devp->user_disabled)
2139 return;
2140 devp->user_disabled = 0;
2141 z90crypt.hdware_info->hdware_mask.user_disabled_count--;
2142 if (devp->dev_type == -1)
2143 return;
2144 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
2145}
2146
2147static inline int
2148scan_char(unsigned char *bf, unsigned int len,
2149 unsigned int *offs, unsigned int *p_eof, unsigned char c)
2150{
2151 unsigned int i, found;
2152
2153 found = 0;
2154 for (i = 0; i < len; i++) {
2155 if (bf[i] == c) {
2156 found = 1;
2157 break;
2158 }
2159 if (bf[i] == '\0') {
2160 *p_eof = 1;
2161 break;
2162 }
2163 if (bf[i] == '\n') {
2164 break;
2165 }
2166 }
2167 *offs = i+1;
2168 return found;
2169}
2170
2171static inline int
2172scan_string(unsigned char *bf, unsigned int len,
2173 unsigned int *offs, unsigned int *p_eof, unsigned char *s)
2174{
2175 unsigned int temp_len, temp_offs, found, eof;
2176
2177 temp_len = temp_offs = found = eof = 0;
2178 while (!eof && !found) {
2179 found = scan_char(bf+temp_len, len-temp_len,
2180 &temp_offs, &eof, *s);
2181
2182 temp_len += temp_offs;
2183 if (eof) {
2184 found = 0;
2185 break;
2186 }
2187
2188 if (found) {
2189 if (len >= temp_offs+strlen(s)) {
2190 found = !strncmp(bf+temp_len-1, s, strlen(s));
2191 if (found) {
2192 *offs = temp_len+strlen(s)-1;
2193 break;
2194 }
2195 } else {
2196 found = 0;
2197 *p_eof = 1;
2198 break;
2199 }
2200 }
2201 }
2202 return found;
2203}
2204
2205static int
2206z90crypt_status_write(struct file *file, const char __user *buffer,
2207 unsigned long count, void *data)
2208{
2209 int i, j, len, offs, found, eof;
2210 unsigned char *lbuf;
2211 unsigned int local_count;
2212
2213#define LBUFSIZE 600
2214 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
2215 if (!lbuf) {
2216 PRINTK("kmalloc failed!\n");
2217 return 0;
2218 }
2219
2220 if (count <= 0)
2221 return 0;
2222
2223 local_count = UMIN((unsigned int)count, LBUFSIZE-1);
2224
2225 if (copy_from_user(lbuf, buffer, local_count) != 0) {
2226 kfree(lbuf);
2227 return -EFAULT;
2228 }
2229
2230 lbuf[local_count-1] = '\0';
2231
2232 len = 0;
2233 eof = 0;
2234 found = 0;
2235 while (!eof) {
2236 found = scan_string(lbuf+len, local_count-len, &offs, &eof,
2237 "Online devices");
2238 len += offs;
2239 if (found == 1)
2240 break;
2241 }
2242
2243 if (eof) {
2244 kfree(lbuf);
2245 return count;
2246 }
2247
2248 if (found)
2249 found = scan_char(lbuf+len, local_count-len, &offs, &eof, '\n');
2250
2251 if (!found || eof) {
2252 kfree(lbuf);
2253 return count;
2254 }
2255
2256 len += offs;
2257 j = 0;
2258 for (i = 0; i < 80; i++) {
2259 switch (*(lbuf+len+i)) {
2260 case '\t':
2261 case ' ':
2262 break;
2263 case '\n':
2264 default:
2265 eof = 1;
2266 break;
2267 case '0':
2268 case '1':
2269 case '2':
2270 case '3':
2271 case '4':
2272 case '5':
2273 j++;
2274 break;
2275 case 'd':
2276 case 'D':
2277 disable_card(j);
2278 j++;
2279 break;
2280 case 'e':
2281 case 'E':
2282 enable_card(j);
2283 j++;
2284 break;
2285 }
2286 if (eof)
2287 break;
2288 }
2289
2290 kfree(lbuf);
2291 return count;
2292}
2293
2294/**
2295 * Functions that run under a timer, with no process id
2296 *
2297 * The task functions:
2298 * z90crypt_reader_task
2299 * helper_send_work
2300 * helper_handle_work_element
2301 * helper_receive_rc
2302 * z90crypt_config_task
2303 * z90crypt_cleanup_task
2304 *
2305 * Helper functions:
2306 * z90crypt_schedule_reader_timer
2307 * z90crypt_schedule_reader_task
2308 * z90crypt_schedule_config_task
2309 * z90crypt_schedule_cleanup_task
2310 */
2311static inline int
2312receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
2313 unsigned char *buff, unsigned char __user **dest_p_p)
2314{
2315 int dv, rv;
2316 struct device *dev_ptr;
2317 struct caller *caller_p;
2318 struct ica_rsa_modexpo *icaMsg_p;
2319 struct list_head *ptr, *tptr;
2320
2321 memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
2322
2323 if (z90crypt.terminating)
2324 return REC_FATAL_ERROR;
2325
2326 caller_p = 0;
2327 dev_ptr = z90crypt.device_p[index];
2328 rv = 0;
2329 do {
2330 if (!dev_ptr || dev_ptr->disabled) {
2331 rv = REC_NO_WORK; // a disabled device can't return work
2332 break;
2333 }
2334 if (dev_ptr->dev_self_x != index) {
2335 PRINTKC("Corrupt dev ptr\n");
2336 z90crypt.terminating = 1;
2337 rv = REC_FATAL_ERROR;
2338 break;
2339 }
2340 if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
2341 dv = DEV_REC_EXCEPTION;
2342 PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
2343 dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
2344 } else {
2345 PDEBUG("Dequeue called for device %d\n", index);
2346 dv = receive_from_AP(index, z90crypt.cdx,
2347 dev_ptr->dev_resp_l,
2348 dev_ptr->dev_resp_p, psmid);
2349 }
2350 switch (dv) {
2351 case DEV_REC_EXCEPTION:
2352 rv = REC_FATAL_ERROR;
2353 z90crypt.terminating = 1;
2354 PRINTKC("Exception in receive from device %d\n",
2355 index);
2356 break;
2357 case DEV_ONLINE:
2358 rv = 0;
2359 break;
2360 case DEV_EMPTY:
2361 rv = REC_EMPTY;
2362 break;
2363 case DEV_NO_WORK:
2364 rv = REC_NO_WORK;
2365 break;
2366 case DEV_BAD_MESSAGE:
2367 case DEV_GONE:
2368 case REC_HARDWAR_ERR:
2369 default:
2370 rv = REC_NO_RESPONSE;
2371 break;
2372 }
2373 if (rv)
2374 break;
2375 if (dev_ptr->dev_caller_count <= 0) {
2376 rv = REC_USER_GONE;
2377 break;
2378 }
2379
2380 list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
2381 caller_p = list_entry(ptr, struct caller, caller_liste);
2382 if (!memcmp(caller_p->caller_id, psmid,
2383 sizeof(caller_p->caller_id))) {
2384 if (!list_empty(&caller_p->caller_liste)) {
2385 list_del_init(ptr);
2386 dev_ptr->dev_caller_count--;
2387 break;
2388 }
2389 }
2390 caller_p = 0;
2391 }
2392 if (!caller_p) {
2393 PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
2394 "%02X%02X%02X in device list\n",
2395 psmid[0], psmid[1], psmid[2], psmid[3],
2396 psmid[4], psmid[5], psmid[6], psmid[7]);
2397 rv = REC_USER_GONE;
2398 break;
2399 }
2400
2401 PDEBUG("caller_p after successful receive: %p\n", caller_p);
2402 rv = convert_response(dev_ptr->dev_resp_p,
2403 caller_p->caller_buf_p, buff_len_p, buff);
2404 switch (rv) {
2405 case REC_USE_PCICA:
2406 break;
2407 case REC_OPERAND_INV:
2408 case REC_OPERAND_SIZE:
2409 case REC_EVEN_MOD:
2410 case REC_INVALID_PAD:
2411 PDEBUG("device %d: 'user error' %d\n", index, rv);
2412 break;
2413 case WRONG_DEVICE_TYPE:
2414 case REC_HARDWAR_ERR:
2415 case REC_BAD_MESSAGE:
2416 PRINTKW("device %d: hardware error %d\n", index, rv);
2417 rv = REC_NO_RESPONSE;
2418 break;
2419 default:
2420 PDEBUG("device %d: rv = %d\n", index, rv);
2421 break;
2422 }
2423 } while (0);
2424
2425 switch (rv) {
2426 case 0:
2427 PDEBUG("Successful receive from device %d\n", index);
2428 icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
2429 *dest_p_p = icaMsg_p->outputdata;
2430 if (*buff_len_p == 0)
2431 PRINTK("Zero *buff_len_p\n");
2432 break;
2433 case REC_NO_RESPONSE:
2434 PRINTKW("Removing device %d from availability\n", index);
2435 remove_device(dev_ptr);
2436 break;
2437 }
2438
2439 if (caller_p)
2440 unbuild_caller(dev_ptr, caller_p);
2441
2442 return rv;
2443}
2444
2445static inline void
2446helper_send_work(int index)
2447{
2448 struct work_element *rq_p;
2449 int rv;
2450
2451 if (list_empty(&request_list))
2452 return;
2453 requestq_count--;
2454 rq_p = list_entry(request_list.next, struct work_element, liste);
2455 list_del_init(&rq_p->liste);
2456 rq_p->audit[1] |= FP_REMREQUEST;
2457 if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
2458 rq_p->devindex = SHRT2LONG(index);
2459 rv = send_to_crypto_device(rq_p);
2460 if (rv == 0) {
2461 rq_p->requestsent = jiffies;
2462 rq_p->audit[0] |= FP_SENT;
2463 list_add_tail(&rq_p->liste, &pending_list);
2464 ++pendingq_count;
2465 rq_p->audit[0] |= FP_PENDING;
2466 } else {
2467 switch (rv) {
2468 case REC_OPERAND_INV:
2469 case REC_OPERAND_SIZE:
2470 case REC_EVEN_MOD:
2471 case REC_INVALID_PAD:
2472 rq_p->retcode = -EINVAL;
2473 break;
2474 case SEN_NOT_AVAIL:
2475 case SEN_RETRY:
2476 case REC_NO_RESPONSE:
2477 default:
2478 if (z90crypt.mask.st_count > 1)
2479 rq_p->retcode =
2480 -ERESTARTSYS;
2481 else
2482 rq_p->retcode = -ENODEV;
2483 break;
2484 }
2485 rq_p->status[0] |= STAT_FAILED;
2486 rq_p->audit[1] |= FP_AWAKENING;
2487 atomic_set(&rq_p->alarmrung, 1);
2488 wake_up(&rq_p->waitq);
2489 }
2490 } else {
2491 if (z90crypt.mask.st_count > 1)
2492 rq_p->retcode = -ERESTARTSYS;
2493 else
2494 rq_p->retcode = -ENODEV;
2495 rq_p->status[0] |= STAT_FAILED;
2496 rq_p->audit[1] |= FP_AWAKENING;
2497 atomic_set(&rq_p->alarmrung, 1);
2498 wake_up(&rq_p->waitq);
2499 }
2500}
2501
2502static inline void
2503helper_handle_work_element(int index, unsigned char psmid[8], int rc,
2504 int buff_len, unsigned char *buff,
2505 unsigned char __user *resp_addr)
2506{
2507 struct work_element *pq_p;
2508 struct list_head *lptr, *tptr;
2509
2510 pq_p = 0;
2511 list_for_each_safe(lptr, tptr, &pending_list) {
2512 pq_p = list_entry(lptr, struct work_element, liste);
2513 if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
2514 list_del_init(lptr);
2515 pendingq_count--;
2516 pq_p->audit[1] |= FP_NOTPENDING;
2517 break;
2518 }
2519 pq_p = 0;
2520 }
2521
2522 if (!pq_p) {
2523 PRINTK("device %d has work but no caller exists on pending Q\n",
2524 SHRT2LONG(index));
2525 return;
2526 }
2527
2528 switch (rc) {
2529 case 0:
2530 pq_p->resp_buff_size = buff_len;
2531 pq_p->audit[1] |= FP_RESPSIZESET;
2532 if (buff_len) {
2533 pq_p->resp_addr = resp_addr;
2534 pq_p->audit[1] |= FP_RESPADDRCOPIED;
2535 memcpy(pq_p->resp_buff, buff, buff_len);
2536 pq_p->audit[1] |= FP_RESPBUFFCOPIED;
2537 }
2538 break;
2539 case REC_OPERAND_INV:
2540 case REC_OPERAND_SIZE:
2541 case REC_EVEN_MOD:
2542 case REC_INVALID_PAD:
2543 PDEBUG("-EINVAL after application error %d\n", rc);
2544 pq_p->retcode = -EINVAL;
2545 pq_p->status[0] |= STAT_FAILED;
2546 break;
2547 case REC_USE_PCICA:
2548 pq_p->retcode = -ERESTARTSYS;
2549 pq_p->status[0] |= STAT_FAILED;
2550 break;
2551 case REC_NO_RESPONSE:
2552 default:
2553 if (z90crypt.mask.st_count > 1)
2554 pq_p->retcode = -ERESTARTSYS;
2555 else
2556 pq_p->retcode = -ENODEV;
2557 pq_p->status[0] |= STAT_FAILED;
2558 break;
2559 }
2560 if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
2561 pq_p->audit[1] |= FP_AWAKENING;
2562 atomic_set(&pq_p->alarmrung, 1);
2563 wake_up(&pq_p->waitq);
2564 }
2565}
2566
2567/**
2568 * return TRUE if the work element should be removed from the queue
2569 */
2570static inline int
2571helper_receive_rc(int index, int *rc_p)
2572{
2573 switch (*rc_p) {
2574 case 0:
2575 case REC_OPERAND_INV:
2576 case REC_OPERAND_SIZE:
2577 case REC_EVEN_MOD:
2578 case REC_INVALID_PAD:
2579 case REC_USE_PCICA:
2580 break;
2581
2582 case REC_BUSY:
2583 case REC_NO_WORK:
2584 case REC_EMPTY:
2585 case REC_RETRY_DEV:
2586 case REC_FATAL_ERROR:
2587 return 0;
2588
2589 case REC_NO_RESPONSE:
2590 break;
2591
2592 default:
2593 PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
2594 *rc_p, SHRT2LONG(index));
2595 *rc_p = REC_NO_RESPONSE;
2596 break;
2597 }
2598 return 1;
2599}
2600
2601static inline void
2602z90crypt_schedule_reader_timer(void)
2603{
2604 if (timer_pending(&reader_timer))
2605 return;
2606 if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
2607 PRINTK("Timer pending while modifying reader timer\n");
2608}
2609
2610static void
2611z90crypt_reader_task(unsigned long ptr)
2612{
2613 int workavail, index, rc, buff_len;
2614 unsigned char psmid[8];
2615 unsigned char __user *resp_addr;
2616 static unsigned char buff[1024];
2617
2618 /**
2619 * we use workavail = 2 to ensure 2 passes with nothing dequeued before
2620 * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
2621 * loop, there is no work remaining on the queues.
2622 */
2623 resp_addr = 0;
2624 workavail = 2;
2625 buff_len = 0;
2626 while (workavail) {
2627 workavail--;
2628 rc = 0;
2629 spin_lock_irq(&queuespinlock);
2630 memset(buff, 0x00, sizeof(buff));
2631
2632 /* Dequeue once from each device in round robin. */
2633 for (index = 0; index < z90crypt.mask.st_count; index++) {
2634 PDEBUG("About to receive.\n");
2635 rc = receive_from_crypto_device(SHRT2LONG(index),
2636 psmid,
2637 &buff_len,
2638 buff,
2639 &resp_addr);
2640 PDEBUG("Dequeued: rc = %d.\n", rc);
2641
2642 if (helper_receive_rc(index, &rc)) {
2643 if (rc != REC_NO_RESPONSE) {
2644 helper_send_work(index);
2645 workavail = 2;
2646 }
2647
2648 helper_handle_work_element(index, psmid, rc,
2649 buff_len, buff,
2650 resp_addr);
2651 }
2652
2653 if (rc == REC_FATAL_ERROR)
2654 PRINTKW("REC_FATAL_ERROR from device %d!\n",
2655 SHRT2LONG(index));
2656 }
2657 spin_unlock_irq(&queuespinlock);
2658 }
2659
2660 if (pendingq_count + requestq_count)
2661 z90crypt_schedule_reader_timer();
2662}
2663
2664static inline void
2665z90crypt_schedule_config_task(unsigned int expiration)
2666{
2667 if (timer_pending(&config_timer))
2668 return;
2669 if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
2670 PRINTK("Timer pending while modifying config timer\n");
2671}
2672
2673static void
2674z90crypt_config_task(unsigned long ptr)
2675{
2676 int rc;
2677
2678 PDEBUG("jiffies %ld\n", jiffies);
2679
2680 if ((rc = refresh_z90crypt(&z90crypt.cdx)))
2681 PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
2682 /* If return was fatal, don't bother reconfiguring */
2683 if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
2684 z90crypt_schedule_config_task(CONFIGTIME);
2685}
2686
2687static inline void
2688z90crypt_schedule_cleanup_task(void)
2689{
2690 if (timer_pending(&cleanup_timer))
2691 return;
2692 if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
2693 PRINTK("Timer pending while modifying cleanup timer\n");
2694}
2695
2696static inline void
2697helper_drain_queues(void)
2698{
2699 struct work_element *pq_p;
2700 struct list_head *lptr, *tptr;
2701
2702 list_for_each_safe(lptr, tptr, &pending_list) {
2703 pq_p = list_entry(lptr, struct work_element, liste);
2704 pq_p->retcode = -ENODEV;
2705 pq_p->status[0] |= STAT_FAILED;
2706 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2707 (struct caller *)pq_p->requestptr);
2708 list_del_init(lptr);
2709 pendingq_count--;
2710 pq_p->audit[1] |= FP_NOTPENDING;
2711 pq_p->audit[1] |= FP_AWAKENING;
2712 atomic_set(&pq_p->alarmrung, 1);
2713 wake_up(&pq_p->waitq);
2714 }
2715
2716 list_for_each_safe(lptr, tptr, &request_list) {
2717 pq_p = list_entry(lptr, struct work_element, liste);
2718 pq_p->retcode = -ENODEV;
2719 pq_p->status[0] |= STAT_FAILED;
2720 list_del_init(lptr);
2721 requestq_count--;
2722 pq_p->audit[1] |= FP_REMREQUEST;
2723 pq_p->audit[1] |= FP_AWAKENING;
2724 atomic_set(&pq_p->alarmrung, 1);
2725 wake_up(&pq_p->waitq);
2726 }
2727}
2728
2729static inline void
2730helper_timeout_requests(void)
2731{
2732 struct work_element *pq_p;
2733 struct list_head *lptr, *tptr;
2734 long timelimit;
2735
2736 timelimit = jiffies - (CLEANUPTIME * HZ);
2737 /* The list is in strict chronological order */
2738 list_for_each_safe(lptr, tptr, &pending_list) {
2739 pq_p = list_entry(lptr, struct work_element, liste);
2740 if (pq_p->requestsent >= timelimit)
2741 break;
2742 PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2743 ((struct caller *)pq_p->requestptr)->caller_id[0],
2744 ((struct caller *)pq_p->requestptr)->caller_id[1],
2745 ((struct caller *)pq_p->requestptr)->caller_id[2],
2746 ((struct caller *)pq_p->requestptr)->caller_id[3],
2747 ((struct caller *)pq_p->requestptr)->caller_id[4],
2748 ((struct caller *)pq_p->requestptr)->caller_id[5],
2749 ((struct caller *)pq_p->requestptr)->caller_id[6],
2750 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2751 pq_p->retcode = -ETIMEOUT;
2752 pq_p->status[0] |= STAT_FAILED;
2753 /* get this off any caller queue it may be on */
2754 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2755 (struct caller *) pq_p->requestptr);
2756 list_del_init(lptr);
2757 pendingq_count--;
2758 pq_p->audit[1] |= FP_TIMEDOUT;
2759 pq_p->audit[1] |= FP_NOTPENDING;
2760 pq_p->audit[1] |= FP_AWAKENING;
2761 atomic_set(&pq_p->alarmrung, 1);
2762 wake_up(&pq_p->waitq);
2763 }
2764
2765 /**
2766 * If pending count is zero, items left on the request queue may
2767 * never be processed.
2768 */
2769 if (pendingq_count <= 0) {
2770 list_for_each_safe(lptr, tptr, &request_list) {
2771 pq_p = list_entry(lptr, struct work_element, liste);
2772 if (pq_p->requestsent >= timelimit)
2773 break;
2774 PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2775 ((struct caller *)pq_p->requestptr)->caller_id[0],
2776 ((struct caller *)pq_p->requestptr)->caller_id[1],
2777 ((struct caller *)pq_p->requestptr)->caller_id[2],
2778 ((struct caller *)pq_p->requestptr)->caller_id[3],
2779 ((struct caller *)pq_p->requestptr)->caller_id[4],
2780 ((struct caller *)pq_p->requestptr)->caller_id[5],
2781 ((struct caller *)pq_p->requestptr)->caller_id[6],
2782 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2783 pq_p->retcode = -ETIMEOUT;
2784 pq_p->status[0] |= STAT_FAILED;
2785 list_del_init(lptr);
2786 requestq_count--;
2787 pq_p->audit[1] |= FP_TIMEDOUT;
2788 pq_p->audit[1] |= FP_REMREQUEST;
2789 pq_p->audit[1] |= FP_AWAKENING;
2790 atomic_set(&pq_p->alarmrung, 1);
2791 wake_up(&pq_p->waitq);
2792 }
2793 }
2794}
2795
2796static void
2797z90crypt_cleanup_task(unsigned long ptr)
2798{
2799 PDEBUG("jiffies %ld\n", jiffies);
2800 spin_lock_irq(&queuespinlock);
2801 if (z90crypt.mask.st_count <= 0) // no devices!
2802 helper_drain_queues();
2803 else
2804 helper_timeout_requests();
2805 spin_unlock_irq(&queuespinlock);
2806 z90crypt_schedule_cleanup_task();
2807}
2808
2809static void
2810z90crypt_schedule_reader_task(unsigned long ptr)
2811{
2812 tasklet_schedule(&reader_tasklet);
2813}
2814
2815/**
2816 * Lowlevel Functions:
2817 *
2818 * create_z90crypt: creates and initializes basic data structures
2819 * refresh_z90crypt: re-initializes basic data structures
2820 * find_crypto_devices: returns a count and mask of hardware status
2821 * create_crypto_device: builds the descriptor for a device
2822 * destroy_crypto_device: unallocates the descriptor for a device
2823 * destroy_z90crypt: drains all work, unallocates structs
2824 */
2825
2826/**
2827 * build the z90crypt root structure using the given domain index
2828 */
2829static int
2830create_z90crypt(int *cdx_p)
2831{
2832 struct hdware_block *hdware_blk_p;
2833
2834 memset(&z90crypt, 0x00, sizeof(struct z90crypt));
2835 z90crypt.domain_established = 0;
2836 z90crypt.len = sizeof(struct z90crypt);
2837 z90crypt.max_count = Z90CRYPT_NUM_DEVS;
2838 z90crypt.cdx = *cdx_p;
2839
2840 hdware_blk_p = (struct hdware_block *)
2841 kmalloc(sizeof(struct hdware_block), GFP_ATOMIC);
2842 if (!hdware_blk_p) {
2843 PDEBUG("kmalloc for hardware block failed\n");
2844 return ENOMEM;
2845 }
2846 memset(hdware_blk_p, 0x00, sizeof(struct hdware_block));
2847 z90crypt.hdware_info = hdware_blk_p;
2848
2849 return 0;
2850}
2851
2852static inline int
2853helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
2854{
2855 enum hdstat hd_stat;
2856 int q_depth, dev_type;
2857 int indx, chkdom, numdomains;
2858
2859 q_depth = dev_type = numdomains = 0;
2860 for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
2861 for (indx = 0; indx < z90crypt.max_count; indx++) {
2862 hd_stat = HD_NOT_THERE;
2863 numdomains = 0;
2864 for (chkdom = 0; chkdom <= 15; chkdom++) {
2865 hd_stat = query_online(indx, chkdom, MAX_RESET,
2866 &q_depth, &dev_type);
2867 if (hd_stat == HD_TSQ_EXCEPTION) {
2868 z90crypt.terminating = 1;
2869 PRINTKC("exception taken!\n");
2870 break;
2871 }
2872 if (hd_stat == HD_ONLINE) {
2873 cdx_array[numdomains++] = chkdom;
2874 if (*cdx_p == chkdom) {
2875 *correct_cdx_found = 1;
2876 break;
2877 }
2878 }
2879 }
2880 if ((*correct_cdx_found == 1) || (numdomains != 0))
2881 break;
2882 if (z90crypt.terminating)
2883 break;
2884 }
2885 return numdomains;
2886}
2887
2888static inline int
2889probe_crypto_domain(int *cdx_p)
2890{
2891 int cdx_array[16];
2892 char cdx_array_text[53], temp[5];
2893 int correct_cdx_found, numdomains;
2894
2895 correct_cdx_found = 0;
2896 numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
2897
2898 if (z90crypt.terminating)
2899 return TSQ_FATAL_ERROR;
2900
2901 if (correct_cdx_found)
2902 return 0;
2903
2904 if (numdomains == 0) {
2905 PRINTKW("Unable to find crypto domain: No devices found\n");
2906 return Z90C_NO_DEVICES;
2907 }
2908
2909 if (numdomains == 1) {
2910 if (*cdx_p == -1) {
2911 *cdx_p = cdx_array[0];
2912 return 0;
2913 }
2914 PRINTKW("incorrect domain: specified = %d, found = %d\n",
2915 *cdx_p, cdx_array[0]);
2916 return Z90C_INCORRECT_DOMAIN;
2917 }
2918
2919 numdomains--;
2920 sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
2921 while (numdomains) {
2922 numdomains--;
2923 sprintf(temp, ", %d", cdx_array[numdomains]);
2924 strcat(cdx_array_text, temp);
2925 }
2926
2927 PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
2928 *cdx_p, cdx_array_text);
2929 return Z90C_AMBIGUOUS_DOMAIN;
2930}
2931
2932static int
2933refresh_z90crypt(int *cdx_p)
2934{
2935 int i, j, indx, rv;
2936 static struct status local_mask;
2937 struct device *devPtr;
2938 unsigned char oldStat, newStat;
2939 int return_unchanged;
2940
2941 if (z90crypt.len != sizeof(z90crypt))
2942 return ENOTINIT;
2943 if (z90crypt.terminating)
2944 return TSQ_FATAL_ERROR;
2945 rv = 0;
2946 if (!z90crypt.hdware_info->hdware_mask.st_count &&
2947 !z90crypt.domain_established) {
2948 rv = probe_crypto_domain(cdx_p);
2949 if (z90crypt.terminating)
2950 return TSQ_FATAL_ERROR;
2951 if (rv == Z90C_NO_DEVICES)
2952 return 0; // try later
2953 if (rv)
2954 return rv;
2955 z90crypt.cdx = *cdx_p;
2956 z90crypt.domain_established = 1;
2957 }
2958 rv = find_crypto_devices(&local_mask);
2959 if (rv) {
2960 PRINTK("find crypto devices returned %d\n", rv);
2961 return rv;
2962 }
2963 if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
2964 sizeof(struct status))) {
2965 return_unchanged = 1;
2966 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
2967 /**
2968 * Check for disabled cards. If any device is marked
2969 * disabled, destroy it.
2970 */
2971 for (j = 0;
2972 j < z90crypt.hdware_info->type_mask[i].st_count;
2973 j++) {
2974 indx = z90crypt.hdware_info->type_x_addr[i].
2975 device_index[j];
2976 devPtr = z90crypt.device_p[indx];
2977 if (devPtr && devPtr->disabled) {
2978 local_mask.st_mask[indx] = HD_NOT_THERE;
2979 return_unchanged = 0;
2980 }
2981 }
2982 }
2983 if (return_unchanged == 1)
2984 return 0;
2985 }
2986
2987 spin_lock_irq(&queuespinlock);
2988 for (i = 0; i < z90crypt.max_count; i++) {
2989 oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
2990 newStat = local_mask.st_mask[i];
2991 if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
2992 destroy_crypto_device(i);
2993 else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
2994 rv = create_crypto_device(i);
2995 if (rv >= REC_FATAL_ERROR)
2996 return rv;
2997 if (rv != 0) {
2998 local_mask.st_mask[i] = HD_NOT_THERE;
2999 local_mask.st_count--;
3000 }
3001 }
3002 }
3003 memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
3004 sizeof(local_mask.st_mask));
3005 z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
3006 z90crypt.hdware_info->hdware_mask.disabled_count =
3007 local_mask.disabled_count;
3008 refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
3009 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
3010 refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
3011 &(z90crypt.hdware_info->type_x_addr[i]));
3012 spin_unlock_irq(&queuespinlock);
3013
3014 return rv;
3015}
3016
3017static int
3018find_crypto_devices(struct status *deviceMask)
3019{
3020 int i, q_depth, dev_type;
3021 enum hdstat hd_stat;
3022
3023 deviceMask->st_count = 0;
3024 deviceMask->disabled_count = 0;
3025 deviceMask->user_disabled_count = 0;
3026
3027 for (i = 0; i < z90crypt.max_count; i++) {
3028 hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
3029 &dev_type);
3030 if (hd_stat == HD_TSQ_EXCEPTION) {
3031 z90crypt.terminating = 1;
3032 PRINTKC("Exception during probe for crypto devices\n");
3033 return TSQ_FATAL_ERROR;
3034 }
3035 deviceMask->st_mask[i] = hd_stat;
3036 if (hd_stat == HD_ONLINE) {
3037 PDEBUG("Got an online crypto!: %d\n", i);
3038 PDEBUG("Got a queue depth of %d\n", q_depth);
3039 PDEBUG("Got a device type of %d\n", dev_type);
3040 if (q_depth <= 0)
3041 return TSQ_FATAL_ERROR;
3042 deviceMask->st_count++;
3043 z90crypt.q_depth_array[i] = q_depth;
3044 z90crypt.dev_type_array[i] = dev_type;
3045 }
3046 }
3047
3048 return 0;
3049}
3050
3051static int
3052refresh_index_array(struct status *status_str, struct device_x *index_array)
3053{
3054 int i, count;
3055 enum devstat stat;
3056
3057 i = -1;
3058 count = 0;
3059 do {
3060 stat = status_str->st_mask[++i];
3061 if (stat == DEV_ONLINE)
3062 index_array->device_index[count++] = i;
3063 } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
3064
3065 return count;
3066}
3067
3068static int
3069create_crypto_device(int index)
3070{
3071 int rv, devstat, total_size;
3072 struct device *dev_ptr;
3073 struct status *type_str_p;
3074 int deviceType;
3075
3076 dev_ptr = z90crypt.device_p[index];
3077 if (!dev_ptr) {
3078 total_size = sizeof(struct device) +
3079 z90crypt.q_depth_array[index] * sizeof(int);
3080
3081 dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC);
3082 if (!dev_ptr) {
3083 PRINTK("kmalloc device %d failed\n", index);
3084 return ENOMEM;
3085 }
3086 memset(dev_ptr, 0, total_size);
3087 dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
3088 if (!dev_ptr->dev_resp_p) {
3089 kfree(dev_ptr);
3090 PRINTK("kmalloc device %d rec buffer failed\n", index);
3091 return ENOMEM;
3092 }
3093 dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
3094 INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
3095 }
3096
3097 devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
3098 if (devstat == DEV_RSQ_EXCEPTION) {
3099 PRINTK("exception during reset device %d\n", index);
3100 kfree(dev_ptr->dev_resp_p);
3101 kfree(dev_ptr);
3102 return RSQ_FATAL_ERROR;
3103 }
3104 if (devstat == DEV_ONLINE) {
3105 dev_ptr->dev_self_x = index;
3106 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3107 if (dev_ptr->dev_type == NILDEV) {
3108 rv = probe_device_type(dev_ptr);
3109 if (rv) {
3110 PRINTK("rv = %d from probe_device_type %d\n",
3111 rv, index);
3112 kfree(dev_ptr->dev_resp_p);
3113 kfree(dev_ptr);
3114 return rv;
3115 }
3116 }
3117 if (dev_ptr->dev_type == PCIXCC_UNK) {
3118 rv = probe_PCIXCC_type(dev_ptr);
3119 if (rv) {
3120 PRINTK("rv = %d from probe_PCIXCC_type %d\n",
3121 rv, index);
3122 kfree(dev_ptr->dev_resp_p);
3123 kfree(dev_ptr);
3124 return rv;
3125 }
3126 }
3127 deviceType = dev_ptr->dev_type;
3128 z90crypt.dev_type_array[index] = deviceType;
3129 if (deviceType == PCICA)
3130 z90crypt.hdware_info->device_type_array[index] = 1;
3131 else if (deviceType == PCICC)
3132 z90crypt.hdware_info->device_type_array[index] = 2;
3133 else if (deviceType == PCIXCC_MCL2)
3134 z90crypt.hdware_info->device_type_array[index] = 3;
3135 else if (deviceType == PCIXCC_MCL3)
3136 z90crypt.hdware_info->device_type_array[index] = 4;
3137 else if (deviceType == CEX2C)
3138 z90crypt.hdware_info->device_type_array[index] = 5;
3139 else
3140 z90crypt.hdware_info->device_type_array[index] = -1;
3141 }
3142
3143 /**
3144 * 'q_depth' returned by the hardware is one less than
3145 * the actual depth
3146 */
3147 dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
3148 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3149 dev_ptr->dev_stat = devstat;
3150 dev_ptr->disabled = 0;
3151 z90crypt.device_p[index] = dev_ptr;
3152
3153 if (devstat == DEV_ONLINE) {
3154 if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
3155 z90crypt.mask.st_mask[index] = DEV_ONLINE;
3156 z90crypt.mask.st_count++;
3157 }
3158 deviceType = dev_ptr->dev_type;
3159 type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
3160 if (type_str_p->st_mask[index] != DEV_ONLINE) {
3161 type_str_p->st_mask[index] = DEV_ONLINE;
3162 type_str_p->st_count++;
3163 }
3164 }
3165
3166 return 0;
3167}
3168
3169static int
3170destroy_crypto_device(int index)
3171{
3172 struct device *dev_ptr;
3173 int t, disabledFlag;
3174
3175 dev_ptr = z90crypt.device_p[index];
3176
3177 /* remember device type; get rid of device struct */
3178 if (dev_ptr) {
3179 disabledFlag = dev_ptr->disabled;
3180 t = dev_ptr->dev_type;
3181 if (dev_ptr->dev_resp_p)
3182 kfree(dev_ptr->dev_resp_p);
3183 kfree(dev_ptr);
3184 } else {
3185 disabledFlag = 0;
3186 t = -1;
3187 }
3188 z90crypt.device_p[index] = 0;
3189
3190 /* if the type is valid, remove the device from the type_mask */
3191 if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
3192 z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
3193 z90crypt.hdware_info->type_mask[t].st_count--;
3194 if (disabledFlag == 1)
3195 z90crypt.hdware_info->type_mask[t].disabled_count--;
3196 }
3197 if (z90crypt.mask.st_mask[index] != DEV_GONE) {
3198 z90crypt.mask.st_mask[index] = DEV_GONE;
3199 z90crypt.mask.st_count--;
3200 }
3201 z90crypt.hdware_info->device_type_array[index] = 0;
3202
3203 return 0;
3204}
3205
3206static void
3207destroy_z90crypt(void)
3208{
3209 int i;
3210 for (i = 0; i < z90crypt.max_count; i++)
3211 if (z90crypt.device_p[i])
3212 destroy_crypto_device(i);
3213 if (z90crypt.hdware_info)
3214 kfree((void *)z90crypt.hdware_info);
3215 memset((void *)&z90crypt, 0, sizeof(z90crypt));
3216}
3217
3218static unsigned char static_testmsg[384] = {
32190x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
32200x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
32210x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
32220x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
32230x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32240x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32250x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
32260x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32270xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32280x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32290x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32300x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32310x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
32320x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
32330x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
32340x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
32350x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
32360x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
32370x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
32380x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
32390x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
32400xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
32410x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
32420x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
3243};
3244
3245static int
3246probe_device_type(struct device *devPtr)
3247{
3248 int rv, dv, i, index, length;
3249 unsigned char psmid[8];
3250 static unsigned char loc_testmsg[sizeof(static_testmsg)];
3251
3252 index = devPtr->dev_self_x;
3253 rv = 0;
3254 do {
3255 memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
3256 length = sizeof(static_testmsg) - 24;
3257 /* the -24 allows for the header */
3258 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3259 if (dv) {
3260 PDEBUG("dv returned by send during probe: %d\n", dv);
3261 if (dv == DEV_SEN_EXCEPTION) {
3262 rv = SEN_FATAL_ERROR;
3263 PRINTKC("exception in send to AP %d\n", index);
3264 break;
3265 }
3266 PDEBUG("return value from send_to_AP: %d\n", rv);
3267 switch (dv) {
3268 case DEV_GONE:
3269 PDEBUG("dev %d not available\n", index);
3270 rv = SEN_NOT_AVAIL;
3271 break;
3272 case DEV_ONLINE:
3273 rv = 0;
3274 break;
3275 case DEV_EMPTY:
3276 rv = SEN_NOT_AVAIL;
3277 break;
3278 case DEV_NO_WORK:
3279 rv = SEN_FATAL_ERROR;
3280 break;
3281 case DEV_BAD_MESSAGE:
3282 rv = SEN_USER_ERROR;
3283 break;
3284 case DEV_QUEUE_FULL:
3285 rv = SEN_QUEUE_FULL;
3286 break;
3287 default:
3288 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3289 rv = SEN_NOT_AVAIL;
3290 break;
3291 }
3292 }
3293
3294 if (rv)
3295 break;
3296
3297 for (i = 0; i < 6; i++) {
3298 mdelay(300);
3299 dv = receive_from_AP(index, z90crypt.cdx,
3300 devPtr->dev_resp_l,
3301 devPtr->dev_resp_p, psmid);
3302 PDEBUG("dv returned by DQ = %d\n", dv);
3303 if (dv == DEV_REC_EXCEPTION) {
3304 rv = REC_FATAL_ERROR;
3305 PRINTKC("exception in dequeue %d\n",
3306 index);
3307 break;
3308 }
3309 switch (dv) {
3310 case DEV_ONLINE:
3311 rv = 0;
3312 break;
3313 case DEV_EMPTY:
3314 rv = REC_EMPTY;
3315 break;
3316 case DEV_NO_WORK:
3317 rv = REC_NO_WORK;
3318 break;
3319 case DEV_BAD_MESSAGE:
3320 case DEV_GONE:
3321 default:
3322 rv = REC_NO_RESPONSE;
3323 break;
3324 }
3325 if ((rv != 0) && (rv != REC_NO_WORK))
3326 break;
3327 if (rv == 0)
3328 break;
3329 }
3330 if (rv)
3331 break;
3332 rv = (devPtr->dev_resp_p[0] == 0x00) &&
3333 (devPtr->dev_resp_p[1] == 0x86);
3334 if (rv)
3335 devPtr->dev_type = PCICC;
3336 else
3337 devPtr->dev_type = PCICA;
3338 rv = 0;
3339 } while (0);
3340 /* In a general error case, the card is not marked online */
3341 return rv;
3342}
3343
3344static unsigned char MCL3_testmsg[] = {
33450x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
33460x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33470x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33480x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33490x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
33500x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
33510x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
33520x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
33530x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33540x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33550x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33560x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33570x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33580x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33590x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33600x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33610x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33620x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33630x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33640x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33650x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
33660x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
33670x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
33680xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
33690x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
33700x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
33710x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
33720x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
33730x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
33740xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
33750xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
33760x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
33770x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
33780xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
33790x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
3380};
3381
3382static int
3383probe_PCIXCC_type(struct device *devPtr)
3384{
3385 int rv, dv, i, index, length;
3386 unsigned char psmid[8];
3387 static unsigned char loc_testmsg[548];
3388 struct CPRBX *cprbx_p;
3389
3390 index = devPtr->dev_self_x;
3391 rv = 0;
3392 do {
3393 memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
3394 length = sizeof(MCL3_testmsg) - 0x0C;
3395 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3396 if (dv) {
3397 PDEBUG("dv returned = %d\n", dv);
3398 if (dv == DEV_SEN_EXCEPTION) {
3399 rv = SEN_FATAL_ERROR;
3400 PRINTKC("exception in send to AP %d\n", index);
3401 break;
3402 }
3403 PDEBUG("return value from send_to_AP: %d\n", rv);
3404 switch (dv) {
3405 case DEV_GONE:
3406 PDEBUG("dev %d not available\n", index);
3407 rv = SEN_NOT_AVAIL;
3408 break;
3409 case DEV_ONLINE:
3410 rv = 0;
3411 break;
3412 case DEV_EMPTY:
3413 rv = SEN_NOT_AVAIL;
3414 break;
3415 case DEV_NO_WORK:
3416 rv = SEN_FATAL_ERROR;
3417 break;
3418 case DEV_BAD_MESSAGE:
3419 rv = SEN_USER_ERROR;
3420 break;
3421 case DEV_QUEUE_FULL:
3422 rv = SEN_QUEUE_FULL;
3423 break;
3424 default:
3425 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3426 rv = SEN_NOT_AVAIL;
3427 break;
3428 }
3429 }
3430
3431 if (rv)
3432 break;
3433
3434 for (i = 0; i < 6; i++) {
3435 mdelay(300);
3436 dv = receive_from_AP(index, z90crypt.cdx,
3437 devPtr->dev_resp_l,
3438 devPtr->dev_resp_p, psmid);
3439 PDEBUG("dv returned by DQ = %d\n", dv);
3440 if (dv == DEV_REC_EXCEPTION) {
3441 rv = REC_FATAL_ERROR;
3442 PRINTKC("exception in dequeue %d\n",
3443 index);
3444 break;
3445 }
3446 switch (dv) {
3447 case DEV_ONLINE:
3448 rv = 0;
3449 break;
3450 case DEV_EMPTY:
3451 rv = REC_EMPTY;
3452 break;
3453 case DEV_NO_WORK:
3454 rv = REC_NO_WORK;
3455 break;
3456 case DEV_BAD_MESSAGE:
3457 case DEV_GONE:
3458 default:
3459 rv = REC_NO_RESPONSE;
3460 break;
3461 }
3462 if ((rv != 0) && (rv != REC_NO_WORK))
3463 break;
3464 if (rv == 0)
3465 break;
3466 }
3467 if (rv)
3468 break;
3469 cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
3470 if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
3471 devPtr->dev_type = PCIXCC_MCL2;
3472 PDEBUG("device %d is MCL2\n", index);
3473 } else {
3474 devPtr->dev_type = PCIXCC_MCL3;
3475 PDEBUG("device %d is MCL3\n", index);
3476 }
3477 } while (0);
3478 /* In a general error case, the card is not marked online */
3479 return rv;
3480}
3481
3482#ifdef Z90CRYPT_USE_HOTPLUG
3483static void
3484z90crypt_hotplug_event(int dev_major, int dev_minor, int action)
3485{
3486#ifdef CONFIG_HOTPLUG
3487 char *argv[3];
3488 char *envp[6];
3489 char major[20];
3490 char minor[20];
3491
3492 sprintf(major, "MAJOR=%d", dev_major);
3493 sprintf(minor, "MINOR=%d", dev_minor);
3494
3495 argv[0] = hotplug_path;
3496 argv[1] = "z90crypt";
3497 argv[2] = 0;
3498
3499 envp[0] = "HOME=/";
3500 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
3501
3502 switch (action) {
3503 case Z90CRYPT_HOTPLUG_ADD:
3504 envp[2] = "ACTION=add";
3505 break;
3506 case Z90CRYPT_HOTPLUG_REMOVE:
3507 envp[2] = "ACTION=remove";
3508 break;
3509 default:
3510 BUG();
3511 break;
3512 }
3513 envp[3] = major;
3514 envp[4] = minor;
3515 envp[5] = 0;
3516
3517 call_usermodehelper(argv[0], argv, envp, 0);
3518#endif
3519}
3520#endif
3521
3522module_init(z90crypt_init_module);
3523module_exit(z90crypt_cleanup_module);