blob: 3a4eed4bba2a2bbbbb170af4c8ccd48e916b6655 [file] [log] [blame]
David S. Miller0a625fd2010-05-19 14:14:04 +10001/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
2 *
3 * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_device.h>
12#include <linux/cpumask.h>
13#include <linux/slab.h>
14#include <linux/interrupt.h>
15#include <linux/crypto.h>
16#include <crypto/md5.h>
17#include <crypto/sha.h>
18#include <crypto/aes.h>
19#include <crypto/des.h>
20#include <linux/mutex.h>
21#include <linux/delay.h>
22#include <linux/sched.h>
23
24#include <crypto/internal/hash.h>
25#include <crypto/scatterwalk.h>
26#include <crypto/algapi.h>
27
28#include <asm/hypervisor.h>
29#include <asm/mdesc.h>
30
31#include "n2_core.h"
32
33#define DRV_MODULE_NAME "n2_crypto"
34#define DRV_MODULE_VERSION "0.1"
35#define DRV_MODULE_RELDATE "April 29, 2010"
36
37static char version[] __devinitdata =
38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
39
40MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
41MODULE_DESCRIPTION("Niagara2 Crypto driver");
42MODULE_LICENSE("GPL");
43MODULE_VERSION(DRV_MODULE_VERSION);
44
45#define N2_CRA_PRIORITY 300
46
47static DEFINE_MUTEX(spu_lock);
48
49struct spu_queue {
50 cpumask_t sharing;
51 unsigned long qhandle;
52
53 spinlock_t lock;
54 u8 q_type;
55 void *q;
56 unsigned long head;
57 unsigned long tail;
58 struct list_head jobs;
59
60 unsigned long devino;
61
62 char irq_name[32];
63 unsigned int irq;
64
65 struct list_head list;
66};
67
68static struct spu_queue **cpu_to_cwq;
69static struct spu_queue **cpu_to_mau;
70
71static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
72{
73 if (q->q_type == HV_NCS_QTYPE_MAU) {
74 off += MAU_ENTRY_SIZE;
75 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
76 off = 0;
77 } else {
78 off += CWQ_ENTRY_SIZE;
79 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
80 off = 0;
81 }
82 return off;
83}
84
85struct n2_request_common {
86 struct list_head entry;
87 unsigned int offset;
88};
89#define OFFSET_NOT_RUNNING (~(unsigned int)0)
90
91/* An async job request records the final tail value it used in
92 * n2_request_common->offset, test to see if that offset is in
93 * the range old_head, new_head, inclusive.
94 */
95static inline bool job_finished(struct spu_queue *q, unsigned int offset,
96 unsigned long old_head, unsigned long new_head)
97{
98 if (old_head <= new_head) {
99 if (offset > old_head && offset <= new_head)
100 return true;
101 } else {
102 if (offset > old_head || offset <= new_head)
103 return true;
104 }
105 return false;
106}
107
108/* When the HEAD marker is unequal to the actual HEAD, we get
109 * a virtual device INO interrupt. We should process the
110 * completed CWQ entries and adjust the HEAD marker to clear
111 * the IRQ.
112 */
113static irqreturn_t cwq_intr(int irq, void *dev_id)
114{
115 unsigned long off, new_head, hv_ret;
116 struct spu_queue *q = dev_id;
117
118 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
119 smp_processor_id(), q->qhandle);
120
121 spin_lock(&q->lock);
122
123 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
124
125 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
126 smp_processor_id(), new_head, hv_ret);
127
128 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
129 /* XXX ... XXX */
130 }
131
132 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
133 if (hv_ret == HV_EOK)
134 q->head = new_head;
135
136 spin_unlock(&q->lock);
137
138 return IRQ_HANDLED;
139}
140
141static irqreturn_t mau_intr(int irq, void *dev_id)
142{
143 struct spu_queue *q = dev_id;
144 unsigned long head, hv_ret;
145
146 spin_lock(&q->lock);
147
148 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
149 smp_processor_id(), q->qhandle);
150
151 hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
152
153 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
154 smp_processor_id(), head, hv_ret);
155
156 sun4v_ncs_sethead_marker(q->qhandle, head);
157
158 spin_unlock(&q->lock);
159
160 return IRQ_HANDLED;
161}
162
163static void *spu_queue_next(struct spu_queue *q, void *cur)
164{
165 return q->q + spu_next_offset(q, cur - q->q);
166}
167
168static int spu_queue_num_free(struct spu_queue *q)
169{
170 unsigned long head = q->head;
171 unsigned long tail = q->tail;
172 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
173 unsigned long diff;
174
175 if (head > tail)
176 diff = head - tail;
177 else
178 diff = (end - tail) + head;
179
180 return (diff / CWQ_ENTRY_SIZE) - 1;
181}
182
183static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
184{
185 int avail = spu_queue_num_free(q);
186
187 if (avail >= num_entries)
188 return q->q + q->tail;
189
190 return NULL;
191}
192
193static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
194{
195 unsigned long hv_ret, new_tail;
196
197 new_tail = spu_next_offset(q, last - q->q);
198
199 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
200 if (hv_ret == HV_EOK)
201 q->tail = new_tail;
202 return hv_ret;
203}
204
205static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
206 int enc_type, int auth_type,
207 unsigned int hash_len,
208 bool sfas, bool sob, bool eob, bool encrypt,
209 int opcode)
210{
211 u64 word = (len - 1) & CONTROL_LEN;
212
213 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
214 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
215 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
216 if (sfas)
217 word |= CONTROL_STORE_FINAL_AUTH_STATE;
218 if (sob)
219 word |= CONTROL_START_OF_BLOCK;
220 if (eob)
221 word |= CONTROL_END_OF_BLOCK;
222 if (encrypt)
223 word |= CONTROL_ENCRYPT;
224 if (hmac_key_len)
225 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
226 if (hash_len)
227 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
228
229 return word;
230}
231
232#if 0
233static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
234{
235 if (this_len >= 64 ||
236 qp->head != qp->tail)
237 return true;
238 return false;
239}
240#endif
241
David S. Miller0a625fd2010-05-19 14:14:04 +1000242struct n2_hash_ctx {
David S. Millerc9aa55e2010-05-22 01:09:04 -0700243 struct crypto_ahash *fallback_tfm;
244};
David S. Miller0a625fd2010-05-19 14:14:04 +1000245
David S. Millerc9aa55e2010-05-22 01:09:04 -0700246struct n2_hash_req_ctx {
David S. Miller0a625fd2010-05-19 14:14:04 +1000247 union {
248 struct md5_state md5;
249 struct sha1_state sha1;
250 struct sha256_state sha256;
251 } u;
252
253 unsigned char hash_key[64];
254 unsigned char keyed_zero_hash[32];
David S. Millerc9aa55e2010-05-22 01:09:04 -0700255
256 struct ahash_request fallback_req;
David S. Miller0a625fd2010-05-19 14:14:04 +1000257};
258
259static int n2_hash_async_init(struct ahash_request *req)
260{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700261 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000262 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
263 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
264
David S. Millerc9aa55e2010-05-22 01:09:04 -0700265 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
266 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
David S. Miller0a625fd2010-05-19 14:14:04 +1000267
David S. Millerc9aa55e2010-05-22 01:09:04 -0700268 return crypto_ahash_init(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000269}
270
271static int n2_hash_async_update(struct ahash_request *req)
272{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700273 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000274 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
275 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
276
David S. Millerc9aa55e2010-05-22 01:09:04 -0700277 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
278 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
279 rctx->fallback_req.nbytes = req->nbytes;
280 rctx->fallback_req.src = req->src;
David S. Miller0a625fd2010-05-19 14:14:04 +1000281
David S. Millerc9aa55e2010-05-22 01:09:04 -0700282 return crypto_ahash_update(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000283}
284
285static int n2_hash_async_final(struct ahash_request *req)
286{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700287 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000288 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
289 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
290
David S. Millerc9aa55e2010-05-22 01:09:04 -0700291 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
292 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
293 rctx->fallback_req.result = req->result;
David S. Miller0a625fd2010-05-19 14:14:04 +1000294
David S. Millerc9aa55e2010-05-22 01:09:04 -0700295 return crypto_ahash_final(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000296}
297
298static int n2_hash_async_finup(struct ahash_request *req)
299{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700300 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000301 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
302 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
303
David S. Millerc9aa55e2010-05-22 01:09:04 -0700304 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
305 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
306 rctx->fallback_req.nbytes = req->nbytes;
307 rctx->fallback_req.src = req->src;
308 rctx->fallback_req.result = req->result;
David S. Miller0a625fd2010-05-19 14:14:04 +1000309
David S. Millerc9aa55e2010-05-22 01:09:04 -0700310 return crypto_ahash_finup(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000311}
312
313static int n2_hash_cra_init(struct crypto_tfm *tfm)
314{
315 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
316 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
317 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
318 struct crypto_ahash *fallback_tfm;
319 int err;
320
321 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
322 CRYPTO_ALG_NEED_FALLBACK);
323 if (IS_ERR(fallback_tfm)) {
324 pr_warning("Fallback driver '%s' could not be loaded!\n",
325 fallback_driver_name);
326 err = PTR_ERR(fallback_tfm);
327 goto out;
328 }
329
David S. Millerc9aa55e2010-05-22 01:09:04 -0700330 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
331 crypto_ahash_reqsize(fallback_tfm)));
332
333 ctx->fallback_tfm = fallback_tfm;
David S. Miller0a625fd2010-05-19 14:14:04 +1000334 return 0;
335
336out:
337 return err;
338}
339
340static void n2_hash_cra_exit(struct crypto_tfm *tfm)
341{
342 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
343 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
344
David S. Millerc9aa55e2010-05-22 01:09:04 -0700345 crypto_free_ahash(ctx->fallback_tfm);
David S. Miller0a625fd2010-05-19 14:14:04 +1000346}
347
348static unsigned long wait_for_tail(struct spu_queue *qp)
349{
350 unsigned long head, hv_ret;
351
352 do {
353 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
354 if (hv_ret != HV_EOK) {
355 pr_err("Hypervisor error on gethead\n");
356 break;
357 }
358 if (head == qp->tail) {
359 qp->head = head;
360 break;
361 }
362 } while (1);
363 return hv_ret;
364}
365
366static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
367 struct cwq_initial_entry *ent)
368{
369 unsigned long hv_ret = spu_queue_submit(qp, ent);
370
371 if (hv_ret == HV_EOK)
372 hv_ret = wait_for_tail(qp);
373
374 return hv_ret;
375}
376
377static int n2_hash_async_digest(struct ahash_request *req,
378 unsigned int auth_type, unsigned int digest_size,
379 unsigned int result_size, void *hash_loc)
380{
381 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000382 struct cwq_initial_entry *ent;
383 struct crypto_hash_walk walk;
384 struct spu_queue *qp;
385 unsigned long flags;
386 int err = -ENODEV;
387 int nbytes, cpu;
388
389 /* The total effective length of the operation may not
390 * exceed 2^16.
391 */
392 if (unlikely(req->nbytes > (1 << 16))) {
David S. Millerc9aa55e2010-05-22 01:09:04 -0700393 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller65a23d62010-05-22 01:11:03 -0700394 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
David S. Miller0a625fd2010-05-19 14:14:04 +1000395
David S. Millerc9aa55e2010-05-22 01:09:04 -0700396 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
397 rctx->fallback_req.base.flags =
398 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
399 rctx->fallback_req.nbytes = req->nbytes;
400 rctx->fallback_req.src = req->src;
401 rctx->fallback_req.result = req->result;
402
403 return crypto_ahash_digest(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000404 }
405
David S. Miller0a625fd2010-05-19 14:14:04 +1000406 nbytes = crypto_hash_walk_first(req, &walk);
407
408 cpu = get_cpu();
409 qp = cpu_to_cwq[cpu];
410 if (!qp)
411 goto out;
412
413 spin_lock_irqsave(&qp->lock, flags);
414
415 /* XXX can do better, improve this later by doing a by-hand scatterlist
416 * XXX walk, etc.
417 */
418 ent = qp->q + qp->tail;
419
420 ent->control = control_word_base(nbytes, 0, 0,
421 auth_type, digest_size,
422 false, true, false, false,
423 OPCODE_INPLACE_BIT |
424 OPCODE_AUTH_MAC);
425 ent->src_addr = __pa(walk.data);
426 ent->auth_key_addr = 0UL;
427 ent->auth_iv_addr = __pa(hash_loc);
428 ent->final_auth_state_addr = 0UL;
429 ent->enc_key_addr = 0UL;
430 ent->enc_iv_addr = 0UL;
431 ent->dest_addr = __pa(hash_loc);
432
433 nbytes = crypto_hash_walk_done(&walk, 0);
434 while (nbytes > 0) {
435 ent = spu_queue_next(qp, ent);
436
437 ent->control = (nbytes - 1);
438 ent->src_addr = __pa(walk.data);
439 ent->auth_key_addr = 0UL;
440 ent->auth_iv_addr = 0UL;
441 ent->final_auth_state_addr = 0UL;
442 ent->enc_key_addr = 0UL;
443 ent->enc_iv_addr = 0UL;
444 ent->dest_addr = 0UL;
445
446 nbytes = crypto_hash_walk_done(&walk, 0);
447 }
448 ent->control |= CONTROL_END_OF_BLOCK;
449
450 if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
451 err = -EINVAL;
452 else
453 err = 0;
454
455 spin_unlock_irqrestore(&qp->lock, flags);
456
457 if (!err)
458 memcpy(req->result, hash_loc, result_size);
459out:
460 put_cpu();
461
462 return err;
463}
464
465static int n2_md5_async_digest(struct ahash_request *req)
466{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700467 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
468 struct md5_state *m = &rctx->u.md5;
David S. Miller0a625fd2010-05-19 14:14:04 +1000469
470 if (unlikely(req->nbytes == 0)) {
471 static const char md5_zero[MD5_DIGEST_SIZE] = {
472 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
473 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
474 };
475
476 memcpy(req->result, md5_zero, MD5_DIGEST_SIZE);
477 return 0;
478 }
479 m->hash[0] = cpu_to_le32(0x67452301);
480 m->hash[1] = cpu_to_le32(0xefcdab89);
481 m->hash[2] = cpu_to_le32(0x98badcfe);
482 m->hash[3] = cpu_to_le32(0x10325476);
483
484 return n2_hash_async_digest(req, AUTH_TYPE_MD5,
485 MD5_DIGEST_SIZE, MD5_DIGEST_SIZE,
486 m->hash);
487}
488
489static int n2_sha1_async_digest(struct ahash_request *req)
490{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700491 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
492 struct sha1_state *s = &rctx->u.sha1;
David S. Miller0a625fd2010-05-19 14:14:04 +1000493
494 if (unlikely(req->nbytes == 0)) {
495 static const char sha1_zero[SHA1_DIGEST_SIZE] = {
496 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
497 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
498 0x07, 0x09
499 };
500
501 memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE);
502 return 0;
503 }
504 s->state[0] = SHA1_H0;
505 s->state[1] = SHA1_H1;
506 s->state[2] = SHA1_H2;
507 s->state[3] = SHA1_H3;
508 s->state[4] = SHA1_H4;
509
510 return n2_hash_async_digest(req, AUTH_TYPE_SHA1,
511 SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE,
512 s->state);
513}
514
515static int n2_sha256_async_digest(struct ahash_request *req)
516{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700517 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
518 struct sha256_state *s = &rctx->u.sha256;
David S. Miller0a625fd2010-05-19 14:14:04 +1000519
520 if (req->nbytes == 0) {
521 static const char sha256_zero[SHA256_DIGEST_SIZE] = {
522 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
523 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
524 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
525 0x1b, 0x78, 0x52, 0xb8, 0x55
526 };
527
528 memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE);
529 return 0;
530 }
531 s->state[0] = SHA256_H0;
532 s->state[1] = SHA256_H1;
533 s->state[2] = SHA256_H2;
534 s->state[3] = SHA256_H3;
535 s->state[4] = SHA256_H4;
536 s->state[5] = SHA256_H5;
537 s->state[6] = SHA256_H6;
538 s->state[7] = SHA256_H7;
539
540 return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
541 SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE,
542 s->state);
543}
544
545static int n2_sha224_async_digest(struct ahash_request *req)
546{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700547 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
548 struct sha256_state *s = &rctx->u.sha256;
David S. Miller0a625fd2010-05-19 14:14:04 +1000549
550 if (req->nbytes == 0) {
551 static const char sha224_zero[SHA224_DIGEST_SIZE] = {
552 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
553 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
554 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
555 0x2f
556 };
557
558 memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE);
559 return 0;
560 }
561 s->state[0] = SHA224_H0;
562 s->state[1] = SHA224_H1;
563 s->state[2] = SHA224_H2;
564 s->state[3] = SHA224_H3;
565 s->state[4] = SHA224_H4;
566 s->state[5] = SHA224_H5;
567 s->state[6] = SHA224_H6;
568 s->state[7] = SHA224_H7;
569
570 return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
571 SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE,
572 s->state);
573}
574
575struct n2_cipher_context {
576 int key_len;
577 int enc_type;
578 union {
579 u8 aes[AES_MAX_KEY_SIZE];
580 u8 des[DES_KEY_SIZE];
581 u8 des3[3 * DES_KEY_SIZE];
582 u8 arc4[258]; /* S-box, X, Y */
583 } key;
584};
585
586#define N2_CHUNK_ARR_LEN 16
587
588struct n2_crypto_chunk {
589 struct list_head entry;
590 unsigned long iv_paddr : 44;
591 unsigned long arr_len : 20;
592 unsigned long dest_paddr;
593 unsigned long dest_final;
594 struct {
595 unsigned long src_paddr : 44;
596 unsigned long src_len : 20;
597 } arr[N2_CHUNK_ARR_LEN];
598};
599
600struct n2_request_context {
601 struct ablkcipher_walk walk;
602 struct list_head chunk_list;
603 struct n2_crypto_chunk chunk;
604 u8 temp_iv[16];
605};
606
607/* The SPU allows some level of flexibility for partial cipher blocks
608 * being specified in a descriptor.
609 *
610 * It merely requires that every descriptor's length field is at least
611 * as large as the cipher block size. This means that a cipher block
612 * can span at most 2 descriptors. However, this does not allow a
613 * partial block to span into the final descriptor as that would
614 * violate the rule (since every descriptor's length must be at lest
615 * the block size). So, for example, assuming an 8 byte block size:
616 *
617 * 0xe --> 0xa --> 0x8
618 *
619 * is a valid length sequence, whereas:
620 *
621 * 0xe --> 0xb --> 0x7
622 *
623 * is not a valid sequence.
624 */
625
626struct n2_cipher_alg {
627 struct list_head entry;
628 u8 enc_type;
629 struct crypto_alg alg;
630};
631
632static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
633{
634 struct crypto_alg *alg = tfm->__crt_alg;
635
636 return container_of(alg, struct n2_cipher_alg, alg);
637}
638
639struct n2_cipher_request_context {
640 struct ablkcipher_walk walk;
641};
642
643static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
644 unsigned int keylen)
645{
646 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
647 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
648 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
649
650 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
651
652 switch (keylen) {
653 case AES_KEYSIZE_128:
654 ctx->enc_type |= ENC_TYPE_ALG_AES128;
655 break;
656 case AES_KEYSIZE_192:
657 ctx->enc_type |= ENC_TYPE_ALG_AES192;
658 break;
659 case AES_KEYSIZE_256:
660 ctx->enc_type |= ENC_TYPE_ALG_AES256;
661 break;
662 default:
663 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
664 return -EINVAL;
665 }
666
667 ctx->key_len = keylen;
668 memcpy(ctx->key.aes, key, keylen);
669 return 0;
670}
671
672static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
673 unsigned int keylen)
674{
675 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
676 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
677 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
678 u32 tmp[DES_EXPKEY_WORDS];
679 int err;
680
681 ctx->enc_type = n2alg->enc_type;
682
683 if (keylen != DES_KEY_SIZE) {
684 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
685 return -EINVAL;
686 }
687
688 err = des_ekey(tmp, key);
689 if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
690 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
691 return -EINVAL;
692 }
693
694 ctx->key_len = keylen;
695 memcpy(ctx->key.des, key, keylen);
696 return 0;
697}
698
699static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
700 unsigned int keylen)
701{
702 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
703 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
704 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
705
706 ctx->enc_type = n2alg->enc_type;
707
708 if (keylen != (3 * DES_KEY_SIZE)) {
709 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
710 return -EINVAL;
711 }
712 ctx->key_len = keylen;
713 memcpy(ctx->key.des3, key, keylen);
714 return 0;
715}
716
717static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
718 unsigned int keylen)
719{
720 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
721 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
722 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
723 u8 *s = ctx->key.arc4;
724 u8 *x = s + 256;
725 u8 *y = x + 1;
726 int i, j, k;
727
728 ctx->enc_type = n2alg->enc_type;
729
730 j = k = 0;
731 *x = 0;
732 *y = 0;
733 for (i = 0; i < 256; i++)
734 s[i] = i;
735 for (i = 0; i < 256; i++) {
736 u8 a = s[i];
737 j = (j + key[k] + a) & 0xff;
738 s[i] = s[j];
739 s[j] = a;
740 if (++k >= keylen)
741 k = 0;
742 }
743
744 return 0;
745}
746
747static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
748{
749 int this_len = nbytes;
750
751 this_len -= (nbytes & (block_size - 1));
752 return this_len > (1 << 16) ? (1 << 16) : this_len;
753}
754
755static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
756 struct spu_queue *qp, bool encrypt)
757{
758 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
759 struct cwq_initial_entry *ent;
760 bool in_place;
761 int i;
762
763 ent = spu_queue_alloc(qp, cp->arr_len);
764 if (!ent) {
765 pr_info("queue_alloc() of %d fails\n",
766 cp->arr_len);
767 return -EBUSY;
768 }
769
770 in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
771
772 ent->control = control_word_base(cp->arr[0].src_len,
773 0, ctx->enc_type, 0, 0,
774 false, true, false, encrypt,
775 OPCODE_ENCRYPT |
776 (in_place ? OPCODE_INPLACE_BIT : 0));
777 ent->src_addr = cp->arr[0].src_paddr;
778 ent->auth_key_addr = 0UL;
779 ent->auth_iv_addr = 0UL;
780 ent->final_auth_state_addr = 0UL;
781 ent->enc_key_addr = __pa(&ctx->key);
782 ent->enc_iv_addr = cp->iv_paddr;
783 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
784
785 for (i = 1; i < cp->arr_len; i++) {
786 ent = spu_queue_next(qp, ent);
787
788 ent->control = cp->arr[i].src_len - 1;
789 ent->src_addr = cp->arr[i].src_paddr;
790 ent->auth_key_addr = 0UL;
791 ent->auth_iv_addr = 0UL;
792 ent->final_auth_state_addr = 0UL;
793 ent->enc_key_addr = 0UL;
794 ent->enc_iv_addr = 0UL;
795 ent->dest_addr = 0UL;
796 }
797 ent->control |= CONTROL_END_OF_BLOCK;
798
799 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
800}
801
802static int n2_compute_chunks(struct ablkcipher_request *req)
803{
804 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
805 struct ablkcipher_walk *walk = &rctx->walk;
806 struct n2_crypto_chunk *chunk;
807 unsigned long dest_prev;
808 unsigned int tot_len;
809 bool prev_in_place;
810 int err, nbytes;
811
812 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
813 err = ablkcipher_walk_phys(req, walk);
814 if (err)
815 return err;
816
817 INIT_LIST_HEAD(&rctx->chunk_list);
818
819 chunk = &rctx->chunk;
820 INIT_LIST_HEAD(&chunk->entry);
821
822 chunk->iv_paddr = 0UL;
823 chunk->arr_len = 0;
824 chunk->dest_paddr = 0UL;
825
826 prev_in_place = false;
827 dest_prev = ~0UL;
828 tot_len = 0;
829
830 while ((nbytes = walk->nbytes) != 0) {
831 unsigned long dest_paddr, src_paddr;
832 bool in_place;
833 int this_len;
834
835 src_paddr = (page_to_phys(walk->src.page) +
836 walk->src.offset);
837 dest_paddr = (page_to_phys(walk->dst.page) +
838 walk->dst.offset);
839 in_place = (src_paddr == dest_paddr);
840 this_len = cipher_descriptor_len(nbytes, walk->blocksize);
841
842 if (chunk->arr_len != 0) {
843 if (in_place != prev_in_place ||
844 (!prev_in_place &&
845 dest_paddr != dest_prev) ||
846 chunk->arr_len == N2_CHUNK_ARR_LEN ||
847 tot_len + this_len > (1 << 16)) {
848 chunk->dest_final = dest_prev;
849 list_add_tail(&chunk->entry,
850 &rctx->chunk_list);
851 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
852 if (!chunk) {
853 err = -ENOMEM;
854 break;
855 }
856 INIT_LIST_HEAD(&chunk->entry);
857 }
858 }
859 if (chunk->arr_len == 0) {
860 chunk->dest_paddr = dest_paddr;
861 tot_len = 0;
862 }
863 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
864 chunk->arr[chunk->arr_len].src_len = this_len;
865 chunk->arr_len++;
866
867 dest_prev = dest_paddr + this_len;
868 prev_in_place = in_place;
869 tot_len += this_len;
870
871 err = ablkcipher_walk_done(req, walk, nbytes - this_len);
872 if (err)
873 break;
874 }
875 if (!err && chunk->arr_len != 0) {
876 chunk->dest_final = dest_prev;
877 list_add_tail(&chunk->entry, &rctx->chunk_list);
878 }
879
880 return err;
881}
882
883static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
884{
885 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
886 struct n2_crypto_chunk *c, *tmp;
887
888 if (final_iv)
889 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
890
891 ablkcipher_walk_complete(&rctx->walk);
892 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
893 list_del(&c->entry);
894 if (unlikely(c != &rctx->chunk))
895 kfree(c);
896 }
897
898}
899
900static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
901{
902 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
903 struct crypto_tfm *tfm = req->base.tfm;
904 int err = n2_compute_chunks(req);
905 struct n2_crypto_chunk *c, *tmp;
906 unsigned long flags, hv_ret;
907 struct spu_queue *qp;
908
909 if (err)
910 return err;
911
912 qp = cpu_to_cwq[get_cpu()];
913 err = -ENODEV;
914 if (!qp)
915 goto out;
916
917 spin_lock_irqsave(&qp->lock, flags);
918
919 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
920 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
921 if (err)
922 break;
923 list_del(&c->entry);
924 if (unlikely(c != &rctx->chunk))
925 kfree(c);
926 }
927 if (!err) {
928 hv_ret = wait_for_tail(qp);
929 if (hv_ret != HV_EOK)
930 err = -EINVAL;
931 }
932
933 spin_unlock_irqrestore(&qp->lock, flags);
934
935 put_cpu();
936
937out:
938 n2_chunk_complete(req, NULL);
939 return err;
940}
941
942static int n2_encrypt_ecb(struct ablkcipher_request *req)
943{
944 return n2_do_ecb(req, true);
945}
946
947static int n2_decrypt_ecb(struct ablkcipher_request *req)
948{
949 return n2_do_ecb(req, false);
950}
951
952static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
953{
954 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
955 struct crypto_tfm *tfm = req->base.tfm;
956 unsigned long flags, hv_ret, iv_paddr;
957 int err = n2_compute_chunks(req);
958 struct n2_crypto_chunk *c, *tmp;
959 struct spu_queue *qp;
960 void *final_iv_addr;
961
962 final_iv_addr = NULL;
963
964 if (err)
965 return err;
966
967 qp = cpu_to_cwq[get_cpu()];
968 err = -ENODEV;
969 if (!qp)
970 goto out;
971
972 spin_lock_irqsave(&qp->lock, flags);
973
974 if (encrypt) {
975 iv_paddr = __pa(rctx->walk.iv);
976 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
977 entry) {
978 c->iv_paddr = iv_paddr;
979 err = __n2_crypt_chunk(tfm, c, qp, true);
980 if (err)
981 break;
982 iv_paddr = c->dest_final - rctx->walk.blocksize;
983 list_del(&c->entry);
984 if (unlikely(c != &rctx->chunk))
985 kfree(c);
986 }
987 final_iv_addr = __va(iv_paddr);
988 } else {
989 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
990 entry) {
991 if (c == &rctx->chunk) {
992 iv_paddr = __pa(rctx->walk.iv);
993 } else {
994 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
995 tmp->arr[tmp->arr_len-1].src_len -
996 rctx->walk.blocksize);
997 }
998 if (!final_iv_addr) {
999 unsigned long pa;
1000
1001 pa = (c->arr[c->arr_len-1].src_paddr +
1002 c->arr[c->arr_len-1].src_len -
1003 rctx->walk.blocksize);
1004 final_iv_addr = rctx->temp_iv;
1005 memcpy(rctx->temp_iv, __va(pa),
1006 rctx->walk.blocksize);
1007 }
1008 c->iv_paddr = iv_paddr;
1009 err = __n2_crypt_chunk(tfm, c, qp, false);
1010 if (err)
1011 break;
1012 list_del(&c->entry);
1013 if (unlikely(c != &rctx->chunk))
1014 kfree(c);
1015 }
1016 }
1017 if (!err) {
1018 hv_ret = wait_for_tail(qp);
1019 if (hv_ret != HV_EOK)
1020 err = -EINVAL;
1021 }
1022
1023 spin_unlock_irqrestore(&qp->lock, flags);
1024
1025 put_cpu();
1026
1027out:
1028 n2_chunk_complete(req, err ? NULL : final_iv_addr);
1029 return err;
1030}
1031
1032static int n2_encrypt_chaining(struct ablkcipher_request *req)
1033{
1034 return n2_do_chaining(req, true);
1035}
1036
1037static int n2_decrypt_chaining(struct ablkcipher_request *req)
1038{
1039 return n2_do_chaining(req, false);
1040}
1041
1042struct n2_cipher_tmpl {
1043 const char *name;
1044 const char *drv_name;
1045 u8 block_size;
1046 u8 enc_type;
1047 struct ablkcipher_alg ablkcipher;
1048};
1049
1050static const struct n2_cipher_tmpl cipher_tmpls[] = {
1051 /* ARC4: only ECB is supported (chaining bits ignored) */
1052 { .name = "ecb(arc4)",
1053 .drv_name = "ecb-arc4",
1054 .block_size = 1,
1055 .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
1056 ENC_TYPE_CHAINING_ECB),
1057 .ablkcipher = {
1058 .min_keysize = 1,
1059 .max_keysize = 256,
1060 .setkey = n2_arc4_setkey,
1061 .encrypt = n2_encrypt_ecb,
1062 .decrypt = n2_decrypt_ecb,
1063 },
1064 },
1065
1066 /* DES: ECB CBC and CFB are supported */
1067 { .name = "ecb(des)",
1068 .drv_name = "ecb-des",
1069 .block_size = DES_BLOCK_SIZE,
1070 .enc_type = (ENC_TYPE_ALG_DES |
1071 ENC_TYPE_CHAINING_ECB),
1072 .ablkcipher = {
1073 .min_keysize = DES_KEY_SIZE,
1074 .max_keysize = DES_KEY_SIZE,
1075 .setkey = n2_des_setkey,
1076 .encrypt = n2_encrypt_ecb,
1077 .decrypt = n2_decrypt_ecb,
1078 },
1079 },
1080 { .name = "cbc(des)",
1081 .drv_name = "cbc-des",
1082 .block_size = DES_BLOCK_SIZE,
1083 .enc_type = (ENC_TYPE_ALG_DES |
1084 ENC_TYPE_CHAINING_CBC),
1085 .ablkcipher = {
1086 .ivsize = DES_BLOCK_SIZE,
1087 .min_keysize = DES_KEY_SIZE,
1088 .max_keysize = DES_KEY_SIZE,
1089 .setkey = n2_des_setkey,
1090 .encrypt = n2_encrypt_chaining,
1091 .decrypt = n2_decrypt_chaining,
1092 },
1093 },
1094 { .name = "cfb(des)",
1095 .drv_name = "cfb-des",
1096 .block_size = DES_BLOCK_SIZE,
1097 .enc_type = (ENC_TYPE_ALG_DES |
1098 ENC_TYPE_CHAINING_CFB),
1099 .ablkcipher = {
1100 .min_keysize = DES_KEY_SIZE,
1101 .max_keysize = DES_KEY_SIZE,
1102 .setkey = n2_des_setkey,
1103 .encrypt = n2_encrypt_chaining,
1104 .decrypt = n2_decrypt_chaining,
1105 },
1106 },
1107
1108 /* 3DES: ECB CBC and CFB are supported */
1109 { .name = "ecb(des3_ede)",
1110 .drv_name = "ecb-3des",
1111 .block_size = DES_BLOCK_SIZE,
1112 .enc_type = (ENC_TYPE_ALG_3DES |
1113 ENC_TYPE_CHAINING_ECB),
1114 .ablkcipher = {
1115 .min_keysize = 3 * DES_KEY_SIZE,
1116 .max_keysize = 3 * DES_KEY_SIZE,
1117 .setkey = n2_3des_setkey,
1118 .encrypt = n2_encrypt_ecb,
1119 .decrypt = n2_decrypt_ecb,
1120 },
1121 },
1122 { .name = "cbc(des3_ede)",
1123 .drv_name = "cbc-3des",
1124 .block_size = DES_BLOCK_SIZE,
1125 .enc_type = (ENC_TYPE_ALG_3DES |
1126 ENC_TYPE_CHAINING_CBC),
1127 .ablkcipher = {
1128 .ivsize = DES_BLOCK_SIZE,
1129 .min_keysize = 3 * DES_KEY_SIZE,
1130 .max_keysize = 3 * DES_KEY_SIZE,
1131 .setkey = n2_3des_setkey,
1132 .encrypt = n2_encrypt_chaining,
1133 .decrypt = n2_decrypt_chaining,
1134 },
1135 },
1136 { .name = "cfb(des3_ede)",
1137 .drv_name = "cfb-3des",
1138 .block_size = DES_BLOCK_SIZE,
1139 .enc_type = (ENC_TYPE_ALG_3DES |
1140 ENC_TYPE_CHAINING_CFB),
1141 .ablkcipher = {
1142 .min_keysize = 3 * DES_KEY_SIZE,
1143 .max_keysize = 3 * DES_KEY_SIZE,
1144 .setkey = n2_3des_setkey,
1145 .encrypt = n2_encrypt_chaining,
1146 .decrypt = n2_decrypt_chaining,
1147 },
1148 },
1149 /* AES: ECB CBC and CTR are supported */
1150 { .name = "ecb(aes)",
1151 .drv_name = "ecb-aes",
1152 .block_size = AES_BLOCK_SIZE,
1153 .enc_type = (ENC_TYPE_ALG_AES128 |
1154 ENC_TYPE_CHAINING_ECB),
1155 .ablkcipher = {
1156 .min_keysize = AES_MIN_KEY_SIZE,
1157 .max_keysize = AES_MAX_KEY_SIZE,
1158 .setkey = n2_aes_setkey,
1159 .encrypt = n2_encrypt_ecb,
1160 .decrypt = n2_decrypt_ecb,
1161 },
1162 },
1163 { .name = "cbc(aes)",
1164 .drv_name = "cbc-aes",
1165 .block_size = AES_BLOCK_SIZE,
1166 .enc_type = (ENC_TYPE_ALG_AES128 |
1167 ENC_TYPE_CHAINING_CBC),
1168 .ablkcipher = {
1169 .ivsize = AES_BLOCK_SIZE,
1170 .min_keysize = AES_MIN_KEY_SIZE,
1171 .max_keysize = AES_MAX_KEY_SIZE,
1172 .setkey = n2_aes_setkey,
1173 .encrypt = n2_encrypt_chaining,
1174 .decrypt = n2_decrypt_chaining,
1175 },
1176 },
1177 { .name = "ctr(aes)",
1178 .drv_name = "ctr-aes",
1179 .block_size = AES_BLOCK_SIZE,
1180 .enc_type = (ENC_TYPE_ALG_AES128 |
1181 ENC_TYPE_CHAINING_COUNTER),
1182 .ablkcipher = {
1183 .ivsize = AES_BLOCK_SIZE,
1184 .min_keysize = AES_MIN_KEY_SIZE,
1185 .max_keysize = AES_MAX_KEY_SIZE,
1186 .setkey = n2_aes_setkey,
1187 .encrypt = n2_encrypt_chaining,
1188 .decrypt = n2_encrypt_chaining,
1189 },
1190 },
1191
1192};
1193#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
1194
1195static LIST_HEAD(cipher_algs);
1196
1197struct n2_hash_tmpl {
1198 const char *name;
1199 int (*digest)(struct ahash_request *req);
1200 u8 digest_size;
1201 u8 block_size;
1202};
1203static const struct n2_hash_tmpl hash_tmpls[] = {
1204 { .name = "md5",
1205 .digest = n2_md5_async_digest,
1206 .digest_size = MD5_DIGEST_SIZE,
1207 .block_size = MD5_HMAC_BLOCK_SIZE },
1208 { .name = "sha1",
1209 .digest = n2_sha1_async_digest,
1210 .digest_size = SHA1_DIGEST_SIZE,
1211 .block_size = SHA1_BLOCK_SIZE },
1212 { .name = "sha256",
1213 .digest = n2_sha256_async_digest,
1214 .digest_size = SHA256_DIGEST_SIZE,
1215 .block_size = SHA256_BLOCK_SIZE },
1216 { .name = "sha224",
1217 .digest = n2_sha224_async_digest,
1218 .digest_size = SHA224_DIGEST_SIZE,
1219 .block_size = SHA224_BLOCK_SIZE },
1220};
1221#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1222
1223struct n2_ahash_alg {
1224 struct list_head entry;
1225 struct ahash_alg alg;
1226};
1227static LIST_HEAD(ahash_algs);
1228
1229static int algs_registered;
1230
1231static void __n2_unregister_algs(void)
1232{
1233 struct n2_cipher_alg *cipher, *cipher_tmp;
1234 struct n2_ahash_alg *alg, *alg_tmp;
1235
1236 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
1237 crypto_unregister_alg(&cipher->alg);
1238 list_del(&cipher->entry);
1239 kfree(cipher);
1240 }
1241 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1242 crypto_unregister_ahash(&alg->alg);
1243 list_del(&alg->entry);
1244 kfree(alg);
1245 }
1246}
1247
1248static int n2_cipher_cra_init(struct crypto_tfm *tfm)
1249{
1250 tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
1251 return 0;
1252}
1253
1254static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
1255{
1256 struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1257 struct crypto_alg *alg;
1258 int err;
1259
1260 if (!p)
1261 return -ENOMEM;
1262
1263 alg = &p->alg;
1264
1265 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1266 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1267 alg->cra_priority = N2_CRA_PRIORITY;
1268 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1269 alg->cra_blocksize = tmpl->block_size;
1270 p->enc_type = tmpl->enc_type;
1271 alg->cra_ctxsize = sizeof(struct n2_cipher_context);
1272 alg->cra_type = &crypto_ablkcipher_type;
1273 alg->cra_u.ablkcipher = tmpl->ablkcipher;
1274 alg->cra_init = n2_cipher_cra_init;
1275 alg->cra_module = THIS_MODULE;
1276
1277 list_add(&p->entry, &cipher_algs);
1278 err = crypto_register_alg(alg);
1279 if (err) {
1280 list_del(&p->entry);
1281 kfree(p);
1282 }
1283 return err;
1284}
1285
1286static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1287{
1288 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1289 struct hash_alg_common *halg;
1290 struct crypto_alg *base;
1291 struct ahash_alg *ahash;
1292 int err;
1293
1294 if (!p)
1295 return -ENOMEM;
1296
1297 ahash = &p->alg;
1298 ahash->init = n2_hash_async_init;
1299 ahash->update = n2_hash_async_update;
1300 ahash->final = n2_hash_async_final;
1301 ahash->finup = n2_hash_async_finup;
1302 ahash->digest = tmpl->digest;
1303
1304 halg = &ahash->halg;
1305 halg->digestsize = tmpl->digest_size;
1306
1307 base = &halg->base;
1308 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1309 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1310 base->cra_priority = N2_CRA_PRIORITY;
1311 base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK;
1312 base->cra_blocksize = tmpl->block_size;
1313 base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1314 base->cra_module = THIS_MODULE;
1315 base->cra_init = n2_hash_cra_init;
1316 base->cra_exit = n2_hash_cra_exit;
1317
1318 list_add(&p->entry, &ahash_algs);
1319 err = crypto_register_ahash(ahash);
1320 if (err) {
1321 list_del(&p->entry);
1322 kfree(p);
1323 }
1324 return err;
1325}
1326
1327static int __devinit n2_register_algs(void)
1328{
1329 int i, err = 0;
1330
1331 mutex_lock(&spu_lock);
1332 if (algs_registered++)
1333 goto out;
1334
1335 for (i = 0; i < NUM_HASH_TMPLS; i++) {
1336 err = __n2_register_one_ahash(&hash_tmpls[i]);
1337 if (err) {
1338 __n2_unregister_algs();
1339 goto out;
1340 }
1341 }
1342 for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1343 err = __n2_register_one_cipher(&cipher_tmpls[i]);
1344 if (err) {
1345 __n2_unregister_algs();
1346 goto out;
1347 }
1348 }
1349
1350out:
1351 mutex_unlock(&spu_lock);
1352 return err;
1353}
1354
1355static void __exit n2_unregister_algs(void)
1356{
1357 mutex_lock(&spu_lock);
1358 if (!--algs_registered)
1359 __n2_unregister_algs();
1360 mutex_unlock(&spu_lock);
1361}
1362
1363/* To map CWQ queues to interrupt sources, the hypervisor API provides
1364 * a devino. This isn't very useful to us because all of the
1365 * interrupts listed in the of_device node have been translated to
1366 * Linux virtual IRQ cookie numbers.
1367 *
1368 * So we have to back-translate, going through the 'intr' and 'ino'
1369 * property tables of the n2cp MDESC node, matching it with the OF
1370 * 'interrupts' property entries, in order to to figure out which
1371 * devino goes to which already-translated IRQ.
1372 */
1373static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
1374 unsigned long dev_ino)
1375{
1376 const unsigned int *dev_intrs;
1377 unsigned int intr;
1378 int i;
1379
1380 for (i = 0; i < ip->num_intrs; i++) {
1381 if (ip->ino_table[i].ino == dev_ino)
1382 break;
1383 }
1384 if (i == ip->num_intrs)
1385 return -ENODEV;
1386
1387 intr = ip->ino_table[i].intr;
1388
David S. Millerff6c7342010-05-25 17:37:08 -07001389 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
David S. Miller0a625fd2010-05-19 14:14:04 +10001390 if (!dev_intrs)
1391 return -ENODEV;
1392
1393 for (i = 0; i < dev->num_irqs; i++) {
1394 if (dev_intrs[i] == intr)
1395 return i;
1396 }
1397
1398 return -ENODEV;
1399}
1400
1401static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip,
1402 const char *irq_name, struct spu_queue *p,
1403 irq_handler_t handler)
1404{
1405 unsigned long herr;
1406 int index;
1407
1408 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1409 if (herr)
1410 return -EINVAL;
1411
1412 index = find_devino_index(dev, ip, p->devino);
1413 if (index < 0)
1414 return index;
1415
1416 p->irq = dev->irqs[index];
1417
1418 sprintf(p->irq_name, "%s-%d", irq_name, index);
1419
1420 return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
1421 p->irq_name, p);
1422}
1423
1424static struct kmem_cache *queue_cache[2];
1425
1426static void *new_queue(unsigned long q_type)
1427{
1428 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1429}
1430
1431static void free_queue(void *p, unsigned long q_type)
1432{
1433 return kmem_cache_free(queue_cache[q_type - 1], p);
1434}
1435
1436static int queue_cache_init(void)
1437{
1438 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1439 queue_cache[HV_NCS_QTYPE_MAU - 1] =
David S. Miller527b9522010-05-22 00:50:12 -07001440 kmem_cache_create("mau_queue",
David S. Miller0a625fd2010-05-19 14:14:04 +10001441 (MAU_NUM_ENTRIES *
1442 MAU_ENTRY_SIZE),
1443 MAU_ENTRY_SIZE, 0, NULL);
1444 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1445 return -ENOMEM;
1446
1447 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1448 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1449 kmem_cache_create("cwq_queue",
1450 (CWQ_NUM_ENTRIES *
1451 CWQ_ENTRY_SIZE),
1452 CWQ_ENTRY_SIZE, 0, NULL);
1453 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1454 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1455 return -ENOMEM;
1456 }
1457 return 0;
1458}
1459
1460static void queue_cache_destroy(void)
1461{
1462 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1463 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1464}
1465
1466static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1467{
1468 cpumask_var_t old_allowed;
1469 unsigned long hv_ret;
1470
1471 if (cpumask_empty(&p->sharing))
1472 return -EINVAL;
1473
1474 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
1475 return -ENOMEM;
1476
1477 cpumask_copy(old_allowed, &current->cpus_allowed);
1478
1479 set_cpus_allowed_ptr(current, &p->sharing);
1480
1481 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1482 CWQ_NUM_ENTRIES, &p->qhandle);
1483 if (!hv_ret)
1484 sun4v_ncs_sethead_marker(p->qhandle, 0);
1485
1486 set_cpus_allowed_ptr(current, old_allowed);
1487
1488 free_cpumask_var(old_allowed);
1489
1490 return (hv_ret ? -EINVAL : 0);
1491}
1492
1493static int spu_queue_setup(struct spu_queue *p)
1494{
1495 int err;
1496
1497 p->q = new_queue(p->q_type);
1498 if (!p->q)
1499 return -ENOMEM;
1500
1501 err = spu_queue_register(p, p->q_type);
1502 if (err) {
1503 free_queue(p->q, p->q_type);
1504 p->q = NULL;
1505 }
1506
1507 return err;
1508}
1509
1510static void spu_queue_destroy(struct spu_queue *p)
1511{
1512 unsigned long hv_ret;
1513
1514 if (!p->q)
1515 return;
1516
1517 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1518
1519 if (!hv_ret)
1520 free_queue(p->q, p->q_type);
1521}
1522
1523static void spu_list_destroy(struct list_head *list)
1524{
1525 struct spu_queue *p, *n;
1526
1527 list_for_each_entry_safe(p, n, list, list) {
1528 int i;
1529
1530 for (i = 0; i < NR_CPUS; i++) {
1531 if (cpu_to_cwq[i] == p)
1532 cpu_to_cwq[i] = NULL;
1533 }
1534
1535 if (p->irq) {
1536 free_irq(p->irq, p);
1537 p->irq = 0;
1538 }
1539 spu_queue_destroy(p);
1540 list_del(&p->list);
1541 kfree(p);
1542 }
1543}
1544
1545/* Walk the backward arcs of a CWQ 'exec-unit' node,
1546 * gathering cpu membership information.
1547 */
1548static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1549 struct of_device *dev,
1550 u64 node, struct spu_queue *p,
1551 struct spu_queue **table)
1552{
1553 u64 arc;
1554
1555 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1556 u64 tgt = mdesc_arc_target(mdesc, arc);
1557 const char *name = mdesc_node_name(mdesc, tgt);
1558 const u64 *id;
1559
1560 if (strcmp(name, "cpu"))
1561 continue;
1562 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1563 if (table[*id] != NULL) {
1564 dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
David S. Millerff6c7342010-05-25 17:37:08 -07001565 dev->dev.of_node->full_name);
David S. Miller0a625fd2010-05-19 14:14:04 +10001566 return -EINVAL;
1567 }
1568 cpu_set(*id, p->sharing);
1569 table[*id] = p;
1570 }
1571 return 0;
1572}
1573
1574/* Process an 'exec-unit' MDESC node of type 'cwq'. */
1575static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1576 struct of_device *dev, struct mdesc_handle *mdesc,
1577 u64 node, const char *iname, unsigned long q_type,
1578 irq_handler_t handler, struct spu_queue **table)
1579{
1580 struct spu_queue *p;
1581 int err;
1582
1583 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1584 if (!p) {
1585 dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
David S. Millerff6c7342010-05-25 17:37:08 -07001586 dev->dev.of_node->full_name);
David S. Miller0a625fd2010-05-19 14:14:04 +10001587 return -ENOMEM;
1588 }
1589
1590 cpus_clear(p->sharing);
1591 spin_lock_init(&p->lock);
1592 p->q_type = q_type;
1593 INIT_LIST_HEAD(&p->jobs);
1594 list_add(&p->list, list);
1595
1596 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1597 if (err)
1598 return err;
1599
1600 err = spu_queue_setup(p);
1601 if (err)
1602 return err;
1603
1604 return spu_map_ino(dev, ip, iname, p, handler);
1605}
1606
1607static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev,
1608 struct spu_mdesc_info *ip, struct list_head *list,
1609 const char *exec_name, unsigned long q_type,
1610 irq_handler_t handler, struct spu_queue **table)
1611{
1612 int err = 0;
1613 u64 node;
1614
1615 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1616 const char *type;
1617
1618 type = mdesc_get_property(mdesc, node, "type", NULL);
1619 if (!type || strcmp(type, exec_name))
1620 continue;
1621
1622 err = handle_exec_unit(ip, list, dev, mdesc, node,
1623 exec_name, q_type, handler, table);
1624 if (err) {
1625 spu_list_destroy(list);
1626 break;
1627 }
1628 }
1629
1630 return err;
1631}
1632
1633static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
1634 struct spu_mdesc_info *ip)
1635{
1636 const u64 *intr, *ino;
1637 int intr_len, ino_len;
1638 int i;
1639
1640 intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
1641 if (!intr)
1642 return -ENODEV;
1643
1644 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1645 if (!intr)
1646 return -ENODEV;
1647
1648 if (intr_len != ino_len)
1649 return -EINVAL;
1650
1651 ip->num_intrs = intr_len / sizeof(u64);
1652 ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1653 ip->num_intrs),
1654 GFP_KERNEL);
1655 if (!ip->ino_table)
1656 return -ENOMEM;
1657
1658 for (i = 0; i < ip->num_intrs; i++) {
1659 struct ino_blob *b = &ip->ino_table[i];
1660 b->intr = intr[i];
1661 b->ino = ino[i];
1662 }
1663
1664 return 0;
1665}
1666
1667static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1668 struct of_device *dev,
1669 struct spu_mdesc_info *ip,
1670 const char *node_name)
1671{
1672 const unsigned int *reg;
1673 u64 node;
1674
David S. Millerff6c7342010-05-25 17:37:08 -07001675 reg = of_get_property(dev->dev.of_node, "reg", NULL);
David S. Miller0a625fd2010-05-19 14:14:04 +10001676 if (!reg)
1677 return -ENODEV;
1678
1679 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1680 const char *name;
1681 const u64 *chdl;
1682
1683 name = mdesc_get_property(mdesc, node, "name", NULL);
1684 if (!name || strcmp(name, node_name))
1685 continue;
1686 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1687 if (!chdl || (*chdl != *reg))
1688 continue;
1689 ip->cfg_handle = *chdl;
1690 return get_irq_props(mdesc, node, ip);
1691 }
1692
1693 return -ENODEV;
1694}
1695
1696static unsigned long n2_spu_hvapi_major;
1697static unsigned long n2_spu_hvapi_minor;
1698
1699static int __devinit n2_spu_hvapi_register(void)
1700{
1701 int err;
1702
1703 n2_spu_hvapi_major = 2;
1704 n2_spu_hvapi_minor = 0;
1705
1706 err = sun4v_hvapi_register(HV_GRP_NCS,
1707 n2_spu_hvapi_major,
1708 &n2_spu_hvapi_minor);
1709
1710 if (!err)
1711 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1712 n2_spu_hvapi_major,
1713 n2_spu_hvapi_minor);
1714
1715 return err;
1716}
1717
1718static void n2_spu_hvapi_unregister(void)
1719{
1720 sun4v_hvapi_unregister(HV_GRP_NCS);
1721}
1722
1723static int global_ref;
1724
1725static int __devinit grab_global_resources(void)
1726{
1727 int err = 0;
1728
1729 mutex_lock(&spu_lock);
1730
1731 if (global_ref++)
1732 goto out;
1733
1734 err = n2_spu_hvapi_register();
1735 if (err)
1736 goto out;
1737
1738 err = queue_cache_init();
1739 if (err)
1740 goto out_hvapi_release;
1741
1742 err = -ENOMEM;
1743 cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1744 GFP_KERNEL);
1745 if (!cpu_to_cwq)
1746 goto out_queue_cache_destroy;
1747
1748 cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1749 GFP_KERNEL);
1750 if (!cpu_to_mau)
1751 goto out_free_cwq_table;
1752
1753 err = 0;
1754
1755out:
1756 if (err)
1757 global_ref--;
1758 mutex_unlock(&spu_lock);
1759 return err;
1760
1761out_free_cwq_table:
1762 kfree(cpu_to_cwq);
1763 cpu_to_cwq = NULL;
1764
1765out_queue_cache_destroy:
1766 queue_cache_destroy();
1767
1768out_hvapi_release:
1769 n2_spu_hvapi_unregister();
1770 goto out;
1771}
1772
1773static void release_global_resources(void)
1774{
1775 mutex_lock(&spu_lock);
1776 if (!--global_ref) {
1777 kfree(cpu_to_cwq);
1778 cpu_to_cwq = NULL;
1779
1780 kfree(cpu_to_mau);
1781 cpu_to_mau = NULL;
1782
1783 queue_cache_destroy();
1784 n2_spu_hvapi_unregister();
1785 }
1786 mutex_unlock(&spu_lock);
1787}
1788
1789static struct n2_crypto * __devinit alloc_n2cp(void)
1790{
1791 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1792
1793 if (np)
1794 INIT_LIST_HEAD(&np->cwq_list);
1795
1796 return np;
1797}
1798
1799static void free_n2cp(struct n2_crypto *np)
1800{
1801 if (np->cwq_info.ino_table) {
1802 kfree(np->cwq_info.ino_table);
1803 np->cwq_info.ino_table = NULL;
1804 }
1805
1806 kfree(np);
1807}
1808
1809static void __devinit n2_spu_driver_version(void)
1810{
1811 static int n2_spu_version_printed;
1812
1813 if (n2_spu_version_printed++ == 0)
1814 pr_info("%s", version);
1815}
1816
1817static int __devinit n2_crypto_probe(struct of_device *dev,
1818 const struct of_device_id *match)
1819{
1820 struct mdesc_handle *mdesc;
1821 const char *full_name;
1822 struct n2_crypto *np;
1823 int err;
1824
1825 n2_spu_driver_version();
1826
David S. Millerff6c7342010-05-25 17:37:08 -07001827 full_name = dev->dev.of_node->full_name;
David S. Miller0a625fd2010-05-19 14:14:04 +10001828 pr_info("Found N2CP at %s\n", full_name);
1829
1830 np = alloc_n2cp();
1831 if (!np) {
1832 dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
1833 full_name);
1834 return -ENOMEM;
1835 }
1836
1837 err = grab_global_resources();
1838 if (err) {
1839 dev_err(&dev->dev, "%s: Unable to grab "
1840 "global resources.\n", full_name);
1841 goto out_free_n2cp;
1842 }
1843
1844 mdesc = mdesc_grab();
1845
1846 if (!mdesc) {
1847 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
1848 full_name);
1849 err = -ENODEV;
1850 goto out_free_global;
1851 }
1852 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
1853 if (err) {
1854 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
1855 full_name);
1856 mdesc_release(mdesc);
1857 goto out_free_global;
1858 }
1859
1860 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
1861 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
1862 cpu_to_cwq);
1863 mdesc_release(mdesc);
1864
1865 if (err) {
1866 dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
1867 full_name);
1868 goto out_free_global;
1869 }
1870
1871 err = n2_register_algs();
1872 if (err) {
1873 dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
1874 full_name);
1875 goto out_free_spu_list;
1876 }
1877
1878 dev_set_drvdata(&dev->dev, np);
1879
1880 return 0;
1881
1882out_free_spu_list:
1883 spu_list_destroy(&np->cwq_list);
1884
1885out_free_global:
1886 release_global_resources();
1887
1888out_free_n2cp:
1889 free_n2cp(np);
1890
1891 return err;
1892}
1893
1894static int __devexit n2_crypto_remove(struct of_device *dev)
1895{
1896 struct n2_crypto *np = dev_get_drvdata(&dev->dev);
1897
1898 n2_unregister_algs();
1899
1900 spu_list_destroy(&np->cwq_list);
1901
1902 release_global_resources();
1903
1904 free_n2cp(np);
1905
1906 return 0;
1907}
1908
1909static struct n2_mau * __devinit alloc_ncp(void)
1910{
1911 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
1912
1913 if (mp)
1914 INIT_LIST_HEAD(&mp->mau_list);
1915
1916 return mp;
1917}
1918
1919static void free_ncp(struct n2_mau *mp)
1920{
1921 if (mp->mau_info.ino_table) {
1922 kfree(mp->mau_info.ino_table);
1923 mp->mau_info.ino_table = NULL;
1924 }
1925
1926 kfree(mp);
1927}
1928
1929static int __devinit n2_mau_probe(struct of_device *dev,
1930 const struct of_device_id *match)
1931{
1932 struct mdesc_handle *mdesc;
1933 const char *full_name;
1934 struct n2_mau *mp;
1935 int err;
1936
1937 n2_spu_driver_version();
1938
David S. Millerff6c7342010-05-25 17:37:08 -07001939 full_name = dev->dev.of_node->full_name;
David S. Miller0a625fd2010-05-19 14:14:04 +10001940 pr_info("Found NCP at %s\n", full_name);
1941
1942 mp = alloc_ncp();
1943 if (!mp) {
1944 dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
1945 full_name);
1946 return -ENOMEM;
1947 }
1948
1949 err = grab_global_resources();
1950 if (err) {
1951 dev_err(&dev->dev, "%s: Unable to grab "
1952 "global resources.\n", full_name);
1953 goto out_free_ncp;
1954 }
1955
1956 mdesc = mdesc_grab();
1957
1958 if (!mdesc) {
1959 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
1960 full_name);
1961 err = -ENODEV;
1962 goto out_free_global;
1963 }
1964
1965 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
1966 if (err) {
1967 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
1968 full_name);
1969 mdesc_release(mdesc);
1970 goto out_free_global;
1971 }
1972
1973 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
1974 "mau", HV_NCS_QTYPE_MAU, mau_intr,
1975 cpu_to_mau);
1976 mdesc_release(mdesc);
1977
1978 if (err) {
1979 dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
1980 full_name);
1981 goto out_free_global;
1982 }
1983
1984 dev_set_drvdata(&dev->dev, mp);
1985
1986 return 0;
1987
1988out_free_global:
1989 release_global_resources();
1990
1991out_free_ncp:
1992 free_ncp(mp);
1993
1994 return err;
1995}
1996
1997static int __devexit n2_mau_remove(struct of_device *dev)
1998{
1999 struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2000
2001 spu_list_destroy(&mp->mau_list);
2002
2003 release_global_resources();
2004
2005 free_ncp(mp);
2006
2007 return 0;
2008}
2009
2010static struct of_device_id n2_crypto_match[] = {
2011 {
2012 .name = "n2cp",
2013 .compatible = "SUNW,n2-cwq",
2014 },
2015 {
2016 .name = "n2cp",
2017 .compatible = "SUNW,vf-cwq",
2018 },
2019 {},
2020};
2021
2022MODULE_DEVICE_TABLE(of, n2_crypto_match);
2023
2024static struct of_platform_driver n2_crypto_driver = {
David S. Millerff6c7342010-05-25 17:37:08 -07002025 .driver = {
2026 .name = "n2cp",
2027 .owner = THIS_MODULE,
2028 .of_match_table = n2_crypto_match,
2029 },
David S. Miller0a625fd2010-05-19 14:14:04 +10002030 .probe = n2_crypto_probe,
2031 .remove = __devexit_p(n2_crypto_remove),
2032};
2033
2034static struct of_device_id n2_mau_match[] = {
2035 {
2036 .name = "ncp",
2037 .compatible = "SUNW,n2-mau",
2038 },
2039 {
2040 .name = "ncp",
2041 .compatible = "SUNW,vf-mau",
2042 },
2043 {},
2044};
2045
2046MODULE_DEVICE_TABLE(of, n2_mau_match);
2047
2048static struct of_platform_driver n2_mau_driver = {
David S. Millerff6c7342010-05-25 17:37:08 -07002049 .driver = {
2050 .name = "ncp",
2051 .owner = THIS_MODULE,
2052 .of_match_table = n2_mau_match,
2053 },
David S. Miller0a625fd2010-05-19 14:14:04 +10002054 .probe = n2_mau_probe,
2055 .remove = __devexit_p(n2_mau_remove),
2056};
2057
2058static int __init n2_init(void)
2059{
2060 int err = of_register_driver(&n2_crypto_driver, &of_bus_type);
2061
2062 if (!err) {
2063 err = of_register_driver(&n2_mau_driver, &of_bus_type);
2064 if (err)
2065 of_unregister_driver(&n2_crypto_driver);
2066 }
2067 return err;
2068}
2069
2070static void __exit n2_exit(void)
2071{
2072 of_unregister_driver(&n2_mau_driver);
2073 of_unregister_driver(&n2_crypto_driver);
2074}
2075
2076module_init(n2_init);
2077module_exit(n2_exit);