blob: b2740ff2e615dc4fe525ccb4b26252733081141b [file] [log] [blame]
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001/*
2 * linux/drivers/s390/crypto/zcrypt_api.c
3 *
Ralph Wuerthner54321142006-09-20 15:58:36 +02004 * zcrypt 2.1.0
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02005 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 * Cornelia Huck <cornelia.huck@de.ibm.com>
10 *
11 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
12 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
13 * Ralph Wuerthner <rwuerthn@de.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 */
29
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/interrupt.h>
33#include <linux/miscdevice.h>
34#include <linux/fs.h>
35#include <linux/proc_fs.h>
36#include <linux/compat.h>
37#include <asm/atomic.h>
38#include <asm/uaccess.h>
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +020039#include <linux/hw_random.h>
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +020040
41#include "zcrypt_api.h"
42
43/**
44 * Module description.
45 */
46MODULE_AUTHOR("IBM Corporation");
47MODULE_DESCRIPTION("Cryptographic Coprocessor interface, "
48 "Copyright 2001, 2006 IBM Corporation");
49MODULE_LICENSE("GPL");
50
51static DEFINE_SPINLOCK(zcrypt_device_lock);
52static LIST_HEAD(zcrypt_device_list);
53static int zcrypt_device_count = 0;
54static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
55
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +020056static int zcrypt_rng_device_add(void);
57static void zcrypt_rng_device_remove(void);
58
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +020059/**
60 * Device attributes common for all crypto devices.
61 */
62static ssize_t zcrypt_type_show(struct device *dev,
63 struct device_attribute *attr, char *buf)
64{
65 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
66 return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
67}
68
69static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
70
71static ssize_t zcrypt_online_show(struct device *dev,
72 struct device_attribute *attr, char *buf)
73{
74 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
75 return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
76}
77
78static ssize_t zcrypt_online_store(struct device *dev,
79 struct device_attribute *attr,
80 const char *buf, size_t count)
81{
82 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
83 int online;
84
85 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
86 return -EINVAL;
87 zdev->online = online;
88 if (!online)
89 ap_flush_queue(zdev->ap_dev);
90 return count;
91}
92
93static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
94
95static struct attribute * zcrypt_device_attrs[] = {
96 &dev_attr_type.attr,
97 &dev_attr_online.attr,
98 NULL,
99};
100
101static struct attribute_group zcrypt_device_attr_group = {
102 .attrs = zcrypt_device_attrs,
103};
104
105/**
106 * Move the device towards the head of the device list.
107 * Need to be called while holding the zcrypt device list lock.
108 * Note: cards with speed_rating of 0 are kept at the end of the list.
109 */
110static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
111{
112 struct zcrypt_device *tmp;
113 struct list_head *l;
114
115 if (zdev->speed_rating == 0)
116 return;
117 for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
118 tmp = list_entry(l, struct zcrypt_device, list);
119 if ((tmp->request_count + 1) * tmp->speed_rating <=
120 (zdev->request_count + 1) * zdev->speed_rating &&
121 tmp->speed_rating != 0)
122 break;
123 }
124 if (l == zdev->list.prev)
125 return;
126 /* Move zdev behind l */
127 list_del(&zdev->list);
128 list_add(&zdev->list, l);
129}
130
131/**
132 * Move the device towards the tail of the device list.
133 * Need to be called while holding the zcrypt device list lock.
134 * Note: cards with speed_rating of 0 are kept at the end of the list.
135 */
136static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
137{
138 struct zcrypt_device *tmp;
139 struct list_head *l;
140
141 if (zdev->speed_rating == 0)
142 return;
143 for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
144 tmp = list_entry(l, struct zcrypt_device, list);
145 if ((tmp->request_count + 1) * tmp->speed_rating >
146 (zdev->request_count + 1) * zdev->speed_rating ||
147 tmp->speed_rating == 0)
148 break;
149 }
150 if (l == zdev->list.next)
151 return;
152 /* Move zdev before l */
153 list_del(&zdev->list);
154 list_add_tail(&zdev->list, l);
155}
156
157static void zcrypt_device_release(struct kref *kref)
158{
159 struct zcrypt_device *zdev =
160 container_of(kref, struct zcrypt_device, refcount);
161 zcrypt_device_free(zdev);
162}
163
164void zcrypt_device_get(struct zcrypt_device *zdev)
165{
166 kref_get(&zdev->refcount);
167}
168EXPORT_SYMBOL(zcrypt_device_get);
169
170int zcrypt_device_put(struct zcrypt_device *zdev)
171{
172 return kref_put(&zdev->refcount, zcrypt_device_release);
173}
174EXPORT_SYMBOL(zcrypt_device_put);
175
176struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
177{
178 struct zcrypt_device *zdev;
179
180 zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
181 if (!zdev)
182 return NULL;
183 zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
184 if (!zdev->reply.message)
185 goto out_free;
186 zdev->reply.length = max_response_size;
187 spin_lock_init(&zdev->lock);
188 INIT_LIST_HEAD(&zdev->list);
189 return zdev;
190
191out_free:
192 kfree(zdev);
193 return NULL;
194}
195EXPORT_SYMBOL(zcrypt_device_alloc);
196
197void zcrypt_device_free(struct zcrypt_device *zdev)
198{
199 kfree(zdev->reply.message);
200 kfree(zdev);
201}
202EXPORT_SYMBOL(zcrypt_device_free);
203
204/**
205 * Register a crypto device.
206 */
207int zcrypt_device_register(struct zcrypt_device *zdev)
208{
209 int rc;
210
211 rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
212 &zcrypt_device_attr_group);
213 if (rc)
214 goto out;
215 get_device(&zdev->ap_dev->device);
216 kref_init(&zdev->refcount);
217 spin_lock_bh(&zcrypt_device_lock);
218 zdev->online = 1; /* New devices are online by default. */
219 list_add_tail(&zdev->list, &zcrypt_device_list);
220 __zcrypt_increase_preference(zdev);
221 zcrypt_device_count++;
222 spin_unlock_bh(&zcrypt_device_lock);
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +0200223 if (zdev->ops->rng) {
224 rc = zcrypt_rng_device_add();
225 if (rc)
226 goto out_unregister;
227 }
228 return 0;
229
230out_unregister:
231 spin_lock_bh(&zcrypt_device_lock);
232 zcrypt_device_count--;
233 list_del_init(&zdev->list);
234 spin_unlock_bh(&zcrypt_device_lock);
235 sysfs_remove_group(&zdev->ap_dev->device.kobj,
236 &zcrypt_device_attr_group);
237 put_device(&zdev->ap_dev->device);
238 zcrypt_device_put(zdev);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200239out:
240 return rc;
241}
242EXPORT_SYMBOL(zcrypt_device_register);
243
244/**
245 * Unregister a crypto device.
246 */
247void zcrypt_device_unregister(struct zcrypt_device *zdev)
248{
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +0200249 if (zdev->ops->rng)
250 zcrypt_rng_device_remove();
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200251 spin_lock_bh(&zcrypt_device_lock);
252 zcrypt_device_count--;
253 list_del_init(&zdev->list);
254 spin_unlock_bh(&zcrypt_device_lock);
255 sysfs_remove_group(&zdev->ap_dev->device.kobj,
256 &zcrypt_device_attr_group);
257 put_device(&zdev->ap_dev->device);
258 zcrypt_device_put(zdev);
259}
260EXPORT_SYMBOL(zcrypt_device_unregister);
261
262/**
263 * zcrypt_read is not be supported beyond zcrypt 1.3.1
264 */
265static ssize_t zcrypt_read(struct file *filp, char __user *buf,
266 size_t count, loff_t *f_pos)
267{
268 return -EPERM;
269}
270
271/**
272 * Write is is not allowed
273 */
274static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
275 size_t count, loff_t *f_pos)
276{
277 return -EPERM;
278}
279
280/**
281 * Device open/close functions to count number of users.
282 */
283static int zcrypt_open(struct inode *inode, struct file *filp)
284{
285 atomic_inc(&zcrypt_open_count);
286 return 0;
287}
288
289static int zcrypt_release(struct inode *inode, struct file *filp)
290{
291 atomic_dec(&zcrypt_open_count);
292 return 0;
293}
294
295/**
296 * zcrypt ioctls.
297 */
298static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
299{
300 struct zcrypt_device *zdev;
301 int rc;
302
303 if (mex->outputdatalength < mex->inputdatalength)
304 return -EINVAL;
305 /**
306 * As long as outputdatalength is big enough, we can set the
307 * outputdatalength equal to the inputdatalength, since that is the
308 * number of bytes we will copy in any case
309 */
310 mex->outputdatalength = mex->inputdatalength;
311
312 spin_lock_bh(&zcrypt_device_lock);
313 list_for_each_entry(zdev, &zcrypt_device_list, list) {
314 if (!zdev->online ||
315 !zdev->ops->rsa_modexpo ||
316 zdev->min_mod_size > mex->inputdatalength ||
317 zdev->max_mod_size < mex->inputdatalength)
318 continue;
319 zcrypt_device_get(zdev);
320 get_device(&zdev->ap_dev->device);
321 zdev->request_count++;
322 __zcrypt_decrease_preference(zdev);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200323 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
Ralph Wuerthner43a867a2007-03-19 13:19:19 +0100324 spin_unlock_bh(&zcrypt_device_lock);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200325 rc = zdev->ops->rsa_modexpo(zdev, mex);
Ralph Wuerthner43a867a2007-03-19 13:19:19 +0100326 spin_lock_bh(&zcrypt_device_lock);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200327 module_put(zdev->ap_dev->drv->driver.owner);
328 }
329 else
330 rc = -EAGAIN;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200331 zdev->request_count--;
332 __zcrypt_increase_preference(zdev);
333 put_device(&zdev->ap_dev->device);
334 zcrypt_device_put(zdev);
335 spin_unlock_bh(&zcrypt_device_lock);
336 return rc;
337 }
338 spin_unlock_bh(&zcrypt_device_lock);
339 return -ENODEV;
340}
341
342static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
343{
344 struct zcrypt_device *zdev;
345 unsigned long long z1, z2, z3;
346 int rc, copied;
347
348 if (crt->outputdatalength < crt->inputdatalength ||
349 (crt->inputdatalength & 1))
350 return -EINVAL;
351 /**
352 * As long as outputdatalength is big enough, we can set the
353 * outputdatalength equal to the inputdatalength, since that is the
354 * number of bytes we will copy in any case
355 */
356 crt->outputdatalength = crt->inputdatalength;
357
358 copied = 0;
359 restart:
360 spin_lock_bh(&zcrypt_device_lock);
361 list_for_each_entry(zdev, &zcrypt_device_list, list) {
362 if (!zdev->online ||
363 !zdev->ops->rsa_modexpo_crt ||
364 zdev->min_mod_size > crt->inputdatalength ||
365 zdev->max_mod_size < crt->inputdatalength)
366 continue;
367 if (zdev->short_crt && crt->inputdatalength > 240) {
368 /**
369 * Check inputdata for leading zeros for cards
370 * that can't handle np_prime, bp_key, or
371 * u_mult_inv > 128 bytes.
372 */
373 if (copied == 0) {
374 int len;
375 spin_unlock_bh(&zcrypt_device_lock);
376 /* len is max 256 / 2 - 120 = 8 */
377 len = crt->inputdatalength / 2 - 120;
378 z1 = z2 = z3 = 0;
379 if (copy_from_user(&z1, crt->np_prime, len) ||
380 copy_from_user(&z2, crt->bp_key, len) ||
381 copy_from_user(&z3, crt->u_mult_inv, len))
382 return -EFAULT;
383 copied = 1;
384 /**
385 * We have to restart device lookup -
386 * the device list may have changed by now.
387 */
388 goto restart;
389 }
390 if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
391 /* The device can't handle this request. */
392 continue;
393 }
394 zcrypt_device_get(zdev);
395 get_device(&zdev->ap_dev->device);
396 zdev->request_count++;
397 __zcrypt_decrease_preference(zdev);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200398 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
Ralph Wuerthner43a867a2007-03-19 13:19:19 +0100399 spin_unlock_bh(&zcrypt_device_lock);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200400 rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
Ralph Wuerthner43a867a2007-03-19 13:19:19 +0100401 spin_lock_bh(&zcrypt_device_lock);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200402 module_put(zdev->ap_dev->drv->driver.owner);
403 }
404 else
405 rc = -EAGAIN;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200406 zdev->request_count--;
407 __zcrypt_increase_preference(zdev);
408 put_device(&zdev->ap_dev->device);
409 zcrypt_device_put(zdev);
410 spin_unlock_bh(&zcrypt_device_lock);
411 return rc;
412 }
413 spin_unlock_bh(&zcrypt_device_lock);
414 return -ENODEV;
415}
416
Ralph Wuerthner54321142006-09-20 15:58:36 +0200417static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
418{
419 struct zcrypt_device *zdev;
420 int rc;
421
422 spin_lock_bh(&zcrypt_device_lock);
423 list_for_each_entry(zdev, &zcrypt_device_list, list) {
424 if (!zdev->online || !zdev->ops->send_cprb ||
425 (xcRB->user_defined != AUTOSELECT &&
426 AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)
427 )
428 continue;
429 zcrypt_device_get(zdev);
430 get_device(&zdev->ap_dev->device);
431 zdev->request_count++;
432 __zcrypt_decrease_preference(zdev);
Ralph Wuerthner54321142006-09-20 15:58:36 +0200433 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
Ralph Wuerthner43a867a2007-03-19 13:19:19 +0100434 spin_unlock_bh(&zcrypt_device_lock);
Ralph Wuerthner54321142006-09-20 15:58:36 +0200435 rc = zdev->ops->send_cprb(zdev, xcRB);
Ralph Wuerthner43a867a2007-03-19 13:19:19 +0100436 spin_lock_bh(&zcrypt_device_lock);
Ralph Wuerthner54321142006-09-20 15:58:36 +0200437 module_put(zdev->ap_dev->drv->driver.owner);
438 }
439 else
440 rc = -EAGAIN;
Ralph Wuerthner54321142006-09-20 15:58:36 +0200441 zdev->request_count--;
442 __zcrypt_increase_preference(zdev);
443 put_device(&zdev->ap_dev->device);
444 zcrypt_device_put(zdev);
445 spin_unlock_bh(&zcrypt_device_lock);
446 return rc;
447 }
448 spin_unlock_bh(&zcrypt_device_lock);
449 return -ENODEV;
450}
451
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +0200452static long zcrypt_rng(char *buffer)
453{
454 struct zcrypt_device *zdev;
455 int rc;
456
457 spin_lock_bh(&zcrypt_device_lock);
458 list_for_each_entry(zdev, &zcrypt_device_list, list) {
459 if (!zdev->online || !zdev->ops->rng)
460 continue;
461 zcrypt_device_get(zdev);
462 get_device(&zdev->ap_dev->device);
463 zdev->request_count++;
464 __zcrypt_decrease_preference(zdev);
465 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
466 spin_unlock_bh(&zcrypt_device_lock);
467 rc = zdev->ops->rng(zdev, buffer);
468 spin_lock_bh(&zcrypt_device_lock);
469 module_put(zdev->ap_dev->drv->driver.owner);
470 } else
471 rc = -EAGAIN;
472 zdev->request_count--;
473 __zcrypt_increase_preference(zdev);
474 put_device(&zdev->ap_dev->device);
475 zcrypt_device_put(zdev);
476 spin_unlock_bh(&zcrypt_device_lock);
477 return rc;
478 }
479 spin_unlock_bh(&zcrypt_device_lock);
480 return -ENODEV;
481}
482
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200483static void zcrypt_status_mask(char status[AP_DEVICES])
484{
485 struct zcrypt_device *zdev;
486
487 memset(status, 0, sizeof(char) * AP_DEVICES);
488 spin_lock_bh(&zcrypt_device_lock);
489 list_for_each_entry(zdev, &zcrypt_device_list, list)
490 status[AP_QID_DEVICE(zdev->ap_dev->qid)] =
491 zdev->online ? zdev->user_space_type : 0x0d;
492 spin_unlock_bh(&zcrypt_device_lock);
493}
494
495static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
496{
497 struct zcrypt_device *zdev;
498
499 memset(qdepth, 0, sizeof(char) * AP_DEVICES);
500 spin_lock_bh(&zcrypt_device_lock);
501 list_for_each_entry(zdev, &zcrypt_device_list, list) {
502 spin_lock(&zdev->ap_dev->lock);
503 qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] =
504 zdev->ap_dev->pendingq_count +
505 zdev->ap_dev->requestq_count;
506 spin_unlock(&zdev->ap_dev->lock);
507 }
508 spin_unlock_bh(&zcrypt_device_lock);
509}
510
511static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
512{
513 struct zcrypt_device *zdev;
514
515 memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
516 spin_lock_bh(&zcrypt_device_lock);
517 list_for_each_entry(zdev, &zcrypt_device_list, list) {
518 spin_lock(&zdev->ap_dev->lock);
519 reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] =
520 zdev->ap_dev->total_request_count;
521 spin_unlock(&zdev->ap_dev->lock);
522 }
523 spin_unlock_bh(&zcrypt_device_lock);
524}
525
526static int zcrypt_pendingq_count(void)
527{
528 struct zcrypt_device *zdev;
529 int pendingq_count = 0;
530
531 spin_lock_bh(&zcrypt_device_lock);
532 list_for_each_entry(zdev, &zcrypt_device_list, list) {
533 spin_lock(&zdev->ap_dev->lock);
534 pendingq_count += zdev->ap_dev->pendingq_count;
535 spin_unlock(&zdev->ap_dev->lock);
536 }
537 spin_unlock_bh(&zcrypt_device_lock);
538 return pendingq_count;
539}
540
541static int zcrypt_requestq_count(void)
542{
543 struct zcrypt_device *zdev;
544 int requestq_count = 0;
545
546 spin_lock_bh(&zcrypt_device_lock);
547 list_for_each_entry(zdev, &zcrypt_device_list, list) {
548 spin_lock(&zdev->ap_dev->lock);
549 requestq_count += zdev->ap_dev->requestq_count;
550 spin_unlock(&zdev->ap_dev->lock);
551 }
552 spin_unlock_bh(&zcrypt_device_lock);
553 return requestq_count;
554}
555
556static int zcrypt_count_type(int type)
557{
558 struct zcrypt_device *zdev;
559 int device_count = 0;
560
561 spin_lock_bh(&zcrypt_device_lock);
562 list_for_each_entry(zdev, &zcrypt_device_list, list)
563 if (zdev->user_space_type == type)
564 device_count++;
565 spin_unlock_bh(&zcrypt_device_lock);
566 return device_count;
567}
568
569/**
570 * Old, deprecated combi status call.
571 */
572static long zcrypt_ica_status(struct file *filp, unsigned long arg)
573{
574 struct ica_z90_status *pstat;
575 int ret;
576
577 pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
578 if (!pstat)
579 return -ENOMEM;
580 pstat->totalcount = zcrypt_device_count;
581 pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
582 pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
583 pstat->requestqWaitCount = zcrypt_requestq_count();
584 pstat->pendingqWaitCount = zcrypt_pendingq_count();
585 pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
586 pstat->cryptoDomain = ap_domain_index;
587 zcrypt_status_mask(pstat->status);
588 zcrypt_qdepth_mask(pstat->qdepth);
589 ret = 0;
590 if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
591 ret = -EFAULT;
592 kfree(pstat);
593 return ret;
594}
595
596static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
597 unsigned long arg)
598{
599 int rc;
600
601 switch (cmd) {
602 case ICARSAMODEXPO: {
603 struct ica_rsa_modexpo __user *umex = (void __user *) arg;
604 struct ica_rsa_modexpo mex;
605 if (copy_from_user(&mex, umex, sizeof(mex)))
606 return -EFAULT;
607 do {
608 rc = zcrypt_rsa_modexpo(&mex);
609 } while (rc == -EAGAIN);
610 if (rc)
611 return rc;
612 return put_user(mex.outputdatalength, &umex->outputdatalength);
613 }
614 case ICARSACRT: {
615 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
616 struct ica_rsa_modexpo_crt crt;
617 if (copy_from_user(&crt, ucrt, sizeof(crt)))
618 return -EFAULT;
619 do {
620 rc = zcrypt_rsa_crt(&crt);
621 } while (rc == -EAGAIN);
622 if (rc)
623 return rc;
624 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
625 }
Ralph Wuerthner54321142006-09-20 15:58:36 +0200626 case ZSECSENDCPRB: {
627 struct ica_xcRB __user *uxcRB = (void __user *) arg;
628 struct ica_xcRB xcRB;
629 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
630 return -EFAULT;
631 do {
632 rc = zcrypt_send_cprb(&xcRB);
633 } while (rc == -EAGAIN);
634 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
635 return -EFAULT;
636 return rc;
637 }
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200638 case Z90STAT_STATUS_MASK: {
639 char status[AP_DEVICES];
640 zcrypt_status_mask(status);
641 if (copy_to_user((char __user *) arg, status,
642 sizeof(char) * AP_DEVICES))
643 return -EFAULT;
644 return 0;
645 }
646 case Z90STAT_QDEPTH_MASK: {
647 char qdepth[AP_DEVICES];
648 zcrypt_qdepth_mask(qdepth);
649 if (copy_to_user((char __user *) arg, qdepth,
650 sizeof(char) * AP_DEVICES))
651 return -EFAULT;
652 return 0;
653 }
654 case Z90STAT_PERDEV_REQCNT: {
655 int reqcnt[AP_DEVICES];
656 zcrypt_perdev_reqcnt(reqcnt);
657 if (copy_to_user((int __user *) arg, reqcnt,
658 sizeof(int) * AP_DEVICES))
659 return -EFAULT;
660 return 0;
661 }
662 case Z90STAT_REQUESTQ_COUNT:
663 return put_user(zcrypt_requestq_count(), (int __user *) arg);
664 case Z90STAT_PENDINGQ_COUNT:
665 return put_user(zcrypt_pendingq_count(), (int __user *) arg);
666 case Z90STAT_TOTALOPEN_COUNT:
667 return put_user(atomic_read(&zcrypt_open_count),
668 (int __user *) arg);
669 case Z90STAT_DOMAIN_INDEX:
670 return put_user(ap_domain_index, (int __user *) arg);
671 /**
672 * Deprecated ioctls. Don't add another device count ioctl,
673 * you can count them yourself in the user space with the
674 * output of the Z90STAT_STATUS_MASK ioctl.
675 */
676 case ICAZ90STATUS:
677 return zcrypt_ica_status(filp, arg);
678 case Z90STAT_TOTALCOUNT:
679 return put_user(zcrypt_device_count, (int __user *) arg);
680 case Z90STAT_PCICACOUNT:
681 return put_user(zcrypt_count_type(ZCRYPT_PCICA),
682 (int __user *) arg);
683 case Z90STAT_PCICCCOUNT:
684 return put_user(zcrypt_count_type(ZCRYPT_PCICC),
685 (int __user *) arg);
686 case Z90STAT_PCIXCCMCL2COUNT:
687 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2),
688 (int __user *) arg);
689 case Z90STAT_PCIXCCMCL3COUNT:
690 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
691 (int __user *) arg);
692 case Z90STAT_PCIXCCCOUNT:
693 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) +
694 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
695 (int __user *) arg);
696 case Z90STAT_CEX2CCOUNT:
697 return put_user(zcrypt_count_type(ZCRYPT_CEX2C),
698 (int __user *) arg);
699 case Z90STAT_CEX2ACOUNT:
700 return put_user(zcrypt_count_type(ZCRYPT_CEX2A),
701 (int __user *) arg);
702 default:
703 /* unknown ioctl number */
704 return -ENOIOCTLCMD;
705 }
706}
707
708#ifdef CONFIG_COMPAT
709/**
710 * ioctl32 conversion routines
711 */
712struct compat_ica_rsa_modexpo {
713 compat_uptr_t inputdata;
714 unsigned int inputdatalength;
715 compat_uptr_t outputdata;
716 unsigned int outputdatalength;
717 compat_uptr_t b_key;
718 compat_uptr_t n_modulus;
719};
720
721static long trans_modexpo32(struct file *filp, unsigned int cmd,
722 unsigned long arg)
723{
724 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
725 struct compat_ica_rsa_modexpo mex32;
726 struct ica_rsa_modexpo mex64;
727 long rc;
728
729 if (copy_from_user(&mex32, umex32, sizeof(mex32)))
730 return -EFAULT;
731 mex64.inputdata = compat_ptr(mex32.inputdata);
732 mex64.inputdatalength = mex32.inputdatalength;
733 mex64.outputdata = compat_ptr(mex32.outputdata);
734 mex64.outputdatalength = mex32.outputdatalength;
735 mex64.b_key = compat_ptr(mex32.b_key);
736 mex64.n_modulus = compat_ptr(mex32.n_modulus);
737 do {
738 rc = zcrypt_rsa_modexpo(&mex64);
739 } while (rc == -EAGAIN);
740 if (!rc)
741 rc = put_user(mex64.outputdatalength,
742 &umex32->outputdatalength);
743 return rc;
744}
745
746struct compat_ica_rsa_modexpo_crt {
747 compat_uptr_t inputdata;
748 unsigned int inputdatalength;
749 compat_uptr_t outputdata;
750 unsigned int outputdatalength;
751 compat_uptr_t bp_key;
752 compat_uptr_t bq_key;
753 compat_uptr_t np_prime;
754 compat_uptr_t nq_prime;
755 compat_uptr_t u_mult_inv;
756};
757
758static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
759 unsigned long arg)
760{
761 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
762 struct compat_ica_rsa_modexpo_crt crt32;
763 struct ica_rsa_modexpo_crt crt64;
764 long rc;
765
766 if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
767 return -EFAULT;
768 crt64.inputdata = compat_ptr(crt32.inputdata);
769 crt64.inputdatalength = crt32.inputdatalength;
770 crt64.outputdata= compat_ptr(crt32.outputdata);
771 crt64.outputdatalength = crt32.outputdatalength;
772 crt64.bp_key = compat_ptr(crt32.bp_key);
773 crt64.bq_key = compat_ptr(crt32.bq_key);
774 crt64.np_prime = compat_ptr(crt32.np_prime);
775 crt64.nq_prime = compat_ptr(crt32.nq_prime);
776 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
777 do {
778 rc = zcrypt_rsa_crt(&crt64);
779 } while (rc == -EAGAIN);
780 if (!rc)
781 rc = put_user(crt64.outputdatalength,
782 &ucrt32->outputdatalength);
783 return rc;
784}
785
Ralph Wuerthner54321142006-09-20 15:58:36 +0200786struct compat_ica_xcRB {
787 unsigned short agent_ID;
788 unsigned int user_defined;
789 unsigned short request_ID;
790 unsigned int request_control_blk_length;
791 unsigned char padding1[16 - sizeof (compat_uptr_t)];
792 compat_uptr_t request_control_blk_addr;
793 unsigned int request_data_length;
794 char padding2[16 - sizeof (compat_uptr_t)];
795 compat_uptr_t request_data_address;
796 unsigned int reply_control_blk_length;
797 char padding3[16 - sizeof (compat_uptr_t)];
798 compat_uptr_t reply_control_blk_addr;
799 unsigned int reply_data_length;
800 char padding4[16 - sizeof (compat_uptr_t)];
801 compat_uptr_t reply_data_addr;
802 unsigned short priority_window;
803 unsigned int status;
804} __attribute__((packed));
805
806static long trans_xcRB32(struct file *filp, unsigned int cmd,
807 unsigned long arg)
808{
809 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
810 struct compat_ica_xcRB xcRB32;
811 struct ica_xcRB xcRB64;
812 long rc;
813
814 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
815 return -EFAULT;
816 xcRB64.agent_ID = xcRB32.agent_ID;
817 xcRB64.user_defined = xcRB32.user_defined;
818 xcRB64.request_ID = xcRB32.request_ID;
819 xcRB64.request_control_blk_length =
820 xcRB32.request_control_blk_length;
821 xcRB64.request_control_blk_addr =
822 compat_ptr(xcRB32.request_control_blk_addr);
823 xcRB64.request_data_length =
824 xcRB32.request_data_length;
825 xcRB64.request_data_address =
826 compat_ptr(xcRB32.request_data_address);
827 xcRB64.reply_control_blk_length =
828 xcRB32.reply_control_blk_length;
829 xcRB64.reply_control_blk_addr =
830 compat_ptr(xcRB32.reply_control_blk_addr);
831 xcRB64.reply_data_length = xcRB32.reply_data_length;
832 xcRB64.reply_data_addr =
833 compat_ptr(xcRB32.reply_data_addr);
834 xcRB64.priority_window = xcRB32.priority_window;
835 xcRB64.status = xcRB32.status;
836 do {
837 rc = zcrypt_send_cprb(&xcRB64);
838 } while (rc == -EAGAIN);
839 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
840 xcRB32.reply_data_length = xcRB64.reply_data_length;
841 xcRB32.status = xcRB64.status;
842 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
843 return -EFAULT;
844 return rc;
845}
846
Heiko Carstens2b67fc42007-02-05 21:16:47 +0100847static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200848 unsigned long arg)
849{
850 if (cmd == ICARSAMODEXPO)
851 return trans_modexpo32(filp, cmd, arg);
852 if (cmd == ICARSACRT)
853 return trans_modexpo_crt32(filp, cmd, arg);
Ralph Wuerthner54321142006-09-20 15:58:36 +0200854 if (cmd == ZSECSENDCPRB)
855 return trans_xcRB32(filp, cmd, arg);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200856 return zcrypt_unlocked_ioctl(filp, cmd, arg);
857}
858#endif
859
860/**
861 * Misc device file operations.
862 */
Arjan van de Vend54b1fd2007-02-12 00:55:34 -0800863static const struct file_operations zcrypt_fops = {
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200864 .owner = THIS_MODULE,
865 .read = zcrypt_read,
866 .write = zcrypt_write,
867 .unlocked_ioctl = zcrypt_unlocked_ioctl,
868#ifdef CONFIG_COMPAT
869 .compat_ioctl = zcrypt_compat_ioctl,
870#endif
871 .open = zcrypt_open,
872 .release = zcrypt_release
873};
874
875/**
876 * Misc device.
877 */
878static struct miscdevice zcrypt_misc_device = {
879 .minor = MISC_DYNAMIC_MINOR,
880 .name = "z90crypt",
881 .fops = &zcrypt_fops,
882};
883
884/**
885 * Deprecated /proc entry support.
886 */
887static struct proc_dir_entry *zcrypt_entry;
888
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100889static int sprintcl(unsigned char *outaddr, unsigned char *addr,
890 unsigned int len)
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200891{
892 int hl, i;
893
894 hl = 0;
895 for (i = 0; i < len; i++)
896 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
897 hl += sprintf(outaddr+hl, " ");
898 return hl;
899}
900
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100901static int sprintrw(unsigned char *outaddr, unsigned char *addr,
902 unsigned int len)
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200903{
904 int hl, inl, c, cx;
905
906 hl = sprintf(outaddr, " ");
907 inl = 0;
908 for (c = 0; c < (len / 16); c++) {
909 hl += sprintcl(outaddr+hl, addr+inl, 16);
910 inl += 16;
911 }
912 cx = len%16;
913 if (cx) {
914 hl += sprintcl(outaddr+hl, addr+inl, cx);
915 inl += cx;
916 }
917 hl += sprintf(outaddr+hl, "\n");
918 return hl;
919}
920
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100921static int sprinthx(unsigned char *title, unsigned char *outaddr,
922 unsigned char *addr, unsigned int len)
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200923{
924 int hl, inl, r, rx;
925
926 hl = sprintf(outaddr, "\n%s\n", title);
927 inl = 0;
928 for (r = 0; r < (len / 64); r++) {
929 hl += sprintrw(outaddr+hl, addr+inl, 64);
930 inl += 64;
931 }
932 rx = len % 64;
933 if (rx) {
934 hl += sprintrw(outaddr+hl, addr+inl, rx);
935 inl += rx;
936 }
937 hl += sprintf(outaddr+hl, "\n");
938 return hl;
939}
940
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100941static int sprinthx4(unsigned char *title, unsigned char *outaddr,
942 unsigned int *array, unsigned int len)
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200943{
944 int hl, r;
945
946 hl = sprintf(outaddr, "\n%s\n", title);
947 for (r = 0; r < len; r++) {
948 if ((r % 8) == 0)
949 hl += sprintf(outaddr+hl, " ");
950 hl += sprintf(outaddr+hl, "%08X ", array[r]);
951 if ((r % 8) == 7)
952 hl += sprintf(outaddr+hl, "\n");
953 }
954 hl += sprintf(outaddr+hl, "\n");
955 return hl;
956}
957
958static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
959 int count, int *eof, void *data)
960{
961 unsigned char *workarea;
962 int len;
963
964 len = 0;
965
966 /* resp_buff is a page. Use the right half for a work area */
967 workarea = resp_buff + 2000;
968 len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n",
969 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
970 len += sprintf(resp_buff + len, "Cryptographic domain: %d\n",
971 ap_domain_index);
972 len += sprintf(resp_buff + len, "Total device count: %d\n",
973 zcrypt_device_count);
974 len += sprintf(resp_buff + len, "PCICA count: %d\n",
975 zcrypt_count_type(ZCRYPT_PCICA));
976 len += sprintf(resp_buff + len, "PCICC count: %d\n",
977 zcrypt_count_type(ZCRYPT_PCICC));
978 len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n",
979 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
980 len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n",
981 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
982 len += sprintf(resp_buff + len, "CEX2C count: %d\n",
983 zcrypt_count_type(ZCRYPT_CEX2C));
984 len += sprintf(resp_buff + len, "CEX2A count: %d\n",
985 zcrypt_count_type(ZCRYPT_CEX2A));
986 len += sprintf(resp_buff + len, "requestq count: %d\n",
987 zcrypt_requestq_count());
988 len += sprintf(resp_buff + len, "pendingq count: %d\n",
989 zcrypt_pendingq_count());
990 len += sprintf(resp_buff + len, "Total open handles: %d\n\n",
991 atomic_read(&zcrypt_open_count));
992 zcrypt_status_mask(workarea);
993 len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
994 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
995 resp_buff+len, workarea, AP_DEVICES);
996 zcrypt_qdepth_mask(workarea);
997 len += sprinthx("Waiting work element counts",
998 resp_buff+len, workarea, AP_DEVICES);
Heiko Carstens2b67fc42007-02-05 21:16:47 +0100999 zcrypt_perdev_reqcnt((int *) workarea);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001000 len += sprinthx4("Per-device successfully completed request counts",
1001 resp_buff+len,(unsigned int *) workarea, AP_DEVICES);
1002 *eof = 1;
1003 memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int));
1004 return len;
1005}
1006
1007static void zcrypt_disable_card(int index)
1008{
1009 struct zcrypt_device *zdev;
1010
1011 spin_lock_bh(&zcrypt_device_lock);
1012 list_for_each_entry(zdev, &zcrypt_device_list, list)
1013 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
1014 zdev->online = 0;
1015 ap_flush_queue(zdev->ap_dev);
1016 break;
1017 }
1018 spin_unlock_bh(&zcrypt_device_lock);
1019}
1020
1021static void zcrypt_enable_card(int index)
1022{
1023 struct zcrypt_device *zdev;
1024
1025 spin_lock_bh(&zcrypt_device_lock);
1026 list_for_each_entry(zdev, &zcrypt_device_list, list)
1027 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
1028 zdev->online = 1;
1029 break;
1030 }
1031 spin_unlock_bh(&zcrypt_device_lock);
1032}
1033
1034static int zcrypt_status_write(struct file *file, const char __user *buffer,
1035 unsigned long count, void *data)
1036{
1037 unsigned char *lbuf, *ptr;
1038 unsigned long local_count;
1039 int j;
1040
1041 if (count <= 0)
1042 return 0;
1043
1044#define LBUFSIZE 1200UL
1045 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
1046 if (!lbuf) {
1047 PRINTK("kmalloc failed!\n");
1048 return 0;
1049 }
1050
1051 local_count = min(LBUFSIZE - 1, count);
1052 if (copy_from_user(lbuf, buffer, local_count) != 0) {
1053 kfree(lbuf);
1054 return -EFAULT;
1055 }
1056 lbuf[local_count] = '\0';
1057
1058 ptr = strstr(lbuf, "Online devices");
1059 if (!ptr) {
1060 PRINTK("Unable to parse data (missing \"Online devices\")\n");
1061 goto out;
1062 }
1063 ptr = strstr(ptr, "\n");
1064 if (!ptr) {
1065 PRINTK("Unable to parse data (missing newline "
1066 "after \"Online devices\")\n");
1067 goto out;
1068 }
1069 ptr++;
1070
1071 if (strstr(ptr, "Waiting work element counts") == NULL) {
1072 PRINTK("Unable to parse data (missing "
1073 "\"Waiting work element counts\")\n");
1074 goto out;
1075 }
1076
1077 for (j = 0; j < 64 && *ptr; ptr++) {
1078 /**
1079 * '0' for no device, '1' for PCICA, '2' for PCICC,
1080 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1081 * '5' for CEX2C and '6' for CEX2A'
1082 */
1083 if (*ptr >= '0' && *ptr <= '6')
1084 j++;
1085 else if (*ptr == 'd' || *ptr == 'D')
1086 zcrypt_disable_card(j++);
1087 else if (*ptr == 'e' || *ptr == 'E')
1088 zcrypt_enable_card(j++);
1089 else if (*ptr != ' ' && *ptr != '\t')
1090 break;
1091 }
1092out:
1093 kfree(lbuf);
1094 return count;
1095}
1096
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001097static int zcrypt_rng_device_count;
1098static u32 *zcrypt_rng_buffer;
1099static int zcrypt_rng_buffer_index;
1100static DEFINE_MUTEX(zcrypt_rng_mutex);
1101
1102static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1103{
1104 int rc;
1105
1106 /**
1107 * We don't need locking here because the RNG API guarantees serialized
1108 * read method calls.
1109 */
1110 if (zcrypt_rng_buffer_index == 0) {
1111 rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1112 if (rc < 0)
1113 return -EIO;
1114 zcrypt_rng_buffer_index = rc / sizeof *data;
1115 }
1116 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1117 return sizeof *data;
1118}
1119
1120static struct hwrng zcrypt_rng_dev = {
1121 .name = "zcrypt",
1122 .data_read = zcrypt_rng_data_read,
1123};
1124
1125static int zcrypt_rng_device_add(void)
1126{
1127 int rc = 0;
1128
1129 mutex_lock(&zcrypt_rng_mutex);
1130 if (zcrypt_rng_device_count == 0) {
1131 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
1132 if (!zcrypt_rng_buffer) {
1133 rc = -ENOMEM;
1134 goto out;
1135 }
1136 zcrypt_rng_buffer_index = 0;
1137 rc = hwrng_register(&zcrypt_rng_dev);
1138 if (rc)
1139 goto out_free;
1140 zcrypt_rng_device_count = 1;
1141 } else
1142 zcrypt_rng_device_count++;
1143 mutex_unlock(&zcrypt_rng_mutex);
1144 return 0;
1145
1146out_free:
1147 free_page((unsigned long) zcrypt_rng_buffer);
1148out:
1149 mutex_unlock(&zcrypt_rng_mutex);
1150 return rc;
1151}
1152
1153static void zcrypt_rng_device_remove(void)
1154{
1155 mutex_lock(&zcrypt_rng_mutex);
1156 zcrypt_rng_device_count--;
1157 if (zcrypt_rng_device_count == 0) {
1158 hwrng_unregister(&zcrypt_rng_dev);
1159 free_page((unsigned long) zcrypt_rng_buffer);
1160 }
1161 mutex_unlock(&zcrypt_rng_mutex);
1162}
1163
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001164/**
1165 * The module initialization code.
1166 */
1167int __init zcrypt_api_init(void)
1168{
1169 int rc;
1170
1171 /* Register the request sprayer. */
1172 rc = misc_register(&zcrypt_misc_device);
1173 if (rc < 0) {
1174 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
1175 zcrypt_misc_device.minor, rc);
1176 goto out;
1177 }
1178
1179 /* Set up the proc file system */
1180 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL);
1181 if (!zcrypt_entry) {
1182 PRINTK("Couldn't create z90crypt proc entry\n");
1183 rc = -ENOMEM;
1184 goto out_misc;
1185 }
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001186 zcrypt_entry->data = NULL;
1187 zcrypt_entry->read_proc = zcrypt_status_read;
1188 zcrypt_entry->write_proc = zcrypt_status_write;
1189
1190 return 0;
1191
1192out_misc:
1193 misc_deregister(&zcrypt_misc_device);
1194out:
1195 return rc;
1196}
1197
1198/**
1199 * The module termination code.
1200 */
1201void zcrypt_api_exit(void)
1202{
1203 remove_proc_entry("driver/z90crypt", NULL);
1204 misc_deregister(&zcrypt_misc_device);
1205}
1206
1207#ifndef CONFIG_ZCRYPT_MONOLITHIC
1208module_init(zcrypt_api_init);
1209module_exit(zcrypt_api_exit);
1210#endif