blob: 23163fda5035357a50c892c1a65a35e7ae3a89ea [file] [log] [blame]
David S. Miller0a625fd2010-05-19 14:14:04 +10001/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
2 *
3 * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_device.h>
12#include <linux/cpumask.h>
13#include <linux/slab.h>
14#include <linux/interrupt.h>
15#include <linux/crypto.h>
16#include <crypto/md5.h>
17#include <crypto/sha.h>
18#include <crypto/aes.h>
19#include <crypto/des.h>
20#include <linux/mutex.h>
21#include <linux/delay.h>
22#include <linux/sched.h>
23
24#include <crypto/internal/hash.h>
25#include <crypto/scatterwalk.h>
26#include <crypto/algapi.h>
27
28#include <asm/hypervisor.h>
29#include <asm/mdesc.h>
30
31#include "n2_core.h"
32
33#define DRV_MODULE_NAME "n2_crypto"
34#define DRV_MODULE_VERSION "0.1"
35#define DRV_MODULE_RELDATE "April 29, 2010"
36
37static char version[] __devinitdata =
38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
39
40MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
41MODULE_DESCRIPTION("Niagara2 Crypto driver");
42MODULE_LICENSE("GPL");
43MODULE_VERSION(DRV_MODULE_VERSION);
44
45#define N2_CRA_PRIORITY 300
46
47static DEFINE_MUTEX(spu_lock);
48
49struct spu_queue {
50 cpumask_t sharing;
51 unsigned long qhandle;
52
53 spinlock_t lock;
54 u8 q_type;
55 void *q;
56 unsigned long head;
57 unsigned long tail;
58 struct list_head jobs;
59
60 unsigned long devino;
61
62 char irq_name[32];
63 unsigned int irq;
64
65 struct list_head list;
66};
67
68static struct spu_queue **cpu_to_cwq;
69static struct spu_queue **cpu_to_mau;
70
71static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
72{
73 if (q->q_type == HV_NCS_QTYPE_MAU) {
74 off += MAU_ENTRY_SIZE;
75 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
76 off = 0;
77 } else {
78 off += CWQ_ENTRY_SIZE;
79 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
80 off = 0;
81 }
82 return off;
83}
84
85struct n2_request_common {
86 struct list_head entry;
87 unsigned int offset;
88};
89#define OFFSET_NOT_RUNNING (~(unsigned int)0)
90
91/* An async job request records the final tail value it used in
92 * n2_request_common->offset, test to see if that offset is in
93 * the range old_head, new_head, inclusive.
94 */
95static inline bool job_finished(struct spu_queue *q, unsigned int offset,
96 unsigned long old_head, unsigned long new_head)
97{
98 if (old_head <= new_head) {
99 if (offset > old_head && offset <= new_head)
100 return true;
101 } else {
102 if (offset > old_head || offset <= new_head)
103 return true;
104 }
105 return false;
106}
107
108/* When the HEAD marker is unequal to the actual HEAD, we get
109 * a virtual device INO interrupt. We should process the
110 * completed CWQ entries and adjust the HEAD marker to clear
111 * the IRQ.
112 */
113static irqreturn_t cwq_intr(int irq, void *dev_id)
114{
115 unsigned long off, new_head, hv_ret;
116 struct spu_queue *q = dev_id;
117
118 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
119 smp_processor_id(), q->qhandle);
120
121 spin_lock(&q->lock);
122
123 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
124
125 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
126 smp_processor_id(), new_head, hv_ret);
127
128 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
129 /* XXX ... XXX */
130 }
131
132 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
133 if (hv_ret == HV_EOK)
134 q->head = new_head;
135
136 spin_unlock(&q->lock);
137
138 return IRQ_HANDLED;
139}
140
141static irqreturn_t mau_intr(int irq, void *dev_id)
142{
143 struct spu_queue *q = dev_id;
144 unsigned long head, hv_ret;
145
146 spin_lock(&q->lock);
147
148 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
149 smp_processor_id(), q->qhandle);
150
151 hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
152
153 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
154 smp_processor_id(), head, hv_ret);
155
156 sun4v_ncs_sethead_marker(q->qhandle, head);
157
158 spin_unlock(&q->lock);
159
160 return IRQ_HANDLED;
161}
162
163static void *spu_queue_next(struct spu_queue *q, void *cur)
164{
165 return q->q + spu_next_offset(q, cur - q->q);
166}
167
168static int spu_queue_num_free(struct spu_queue *q)
169{
170 unsigned long head = q->head;
171 unsigned long tail = q->tail;
172 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
173 unsigned long diff;
174
175 if (head > tail)
176 diff = head - tail;
177 else
178 diff = (end - tail) + head;
179
180 return (diff / CWQ_ENTRY_SIZE) - 1;
181}
182
183static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
184{
185 int avail = spu_queue_num_free(q);
186
187 if (avail >= num_entries)
188 return q->q + q->tail;
189
190 return NULL;
191}
192
193static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
194{
195 unsigned long hv_ret, new_tail;
196
197 new_tail = spu_next_offset(q, last - q->q);
198
199 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
200 if (hv_ret == HV_EOK)
201 q->tail = new_tail;
202 return hv_ret;
203}
204
205static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
206 int enc_type, int auth_type,
207 unsigned int hash_len,
208 bool sfas, bool sob, bool eob, bool encrypt,
209 int opcode)
210{
211 u64 word = (len - 1) & CONTROL_LEN;
212
213 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
214 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
215 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
216 if (sfas)
217 word |= CONTROL_STORE_FINAL_AUTH_STATE;
218 if (sob)
219 word |= CONTROL_START_OF_BLOCK;
220 if (eob)
221 word |= CONTROL_END_OF_BLOCK;
222 if (encrypt)
223 word |= CONTROL_ENCRYPT;
224 if (hmac_key_len)
225 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
226 if (hash_len)
227 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
228
229 return word;
230}
231
232#if 0
233static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
234{
235 if (this_len >= 64 ||
236 qp->head != qp->tail)
237 return true;
238 return false;
239}
240#endif
241
242struct n2_base_ctx {
243 struct list_head list;
244};
245
246static void n2_base_ctx_init(struct n2_base_ctx *ctx)
247{
248 INIT_LIST_HEAD(&ctx->list);
249}
250
251struct n2_hash_ctx {
252 struct n2_base_ctx base;
253
David S. Millerc9aa55e2010-05-22 01:09:04 -0700254 struct crypto_ahash *fallback_tfm;
255};
David S. Miller0a625fd2010-05-19 14:14:04 +1000256
David S. Millerc9aa55e2010-05-22 01:09:04 -0700257struct n2_hash_req_ctx {
David S. Miller0a625fd2010-05-19 14:14:04 +1000258 union {
259 struct md5_state md5;
260 struct sha1_state sha1;
261 struct sha256_state sha256;
262 } u;
263
264 unsigned char hash_key[64];
265 unsigned char keyed_zero_hash[32];
David S. Millerc9aa55e2010-05-22 01:09:04 -0700266
267 struct ahash_request fallback_req;
David S. Miller0a625fd2010-05-19 14:14:04 +1000268};
269
270static int n2_hash_async_init(struct ahash_request *req)
271{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700272 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000273 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
274 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
275
David S. Millerc9aa55e2010-05-22 01:09:04 -0700276 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
277 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
David S. Miller0a625fd2010-05-19 14:14:04 +1000278
David S. Millerc9aa55e2010-05-22 01:09:04 -0700279 return crypto_ahash_init(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000280}
281
282static int n2_hash_async_update(struct ahash_request *req)
283{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700284 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000285 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
286 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
287
David S. Millerc9aa55e2010-05-22 01:09:04 -0700288 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
289 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
290 rctx->fallback_req.nbytes = req->nbytes;
291 rctx->fallback_req.src = req->src;
David S. Miller0a625fd2010-05-19 14:14:04 +1000292
David S. Millerc9aa55e2010-05-22 01:09:04 -0700293 return crypto_ahash_update(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000294}
295
296static int n2_hash_async_final(struct ahash_request *req)
297{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700298 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000299 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
300 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
301
David S. Millerc9aa55e2010-05-22 01:09:04 -0700302 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
303 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
304 rctx->fallback_req.result = req->result;
David S. Miller0a625fd2010-05-19 14:14:04 +1000305
David S. Millerc9aa55e2010-05-22 01:09:04 -0700306 return crypto_ahash_final(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000307}
308
309static int n2_hash_async_finup(struct ahash_request *req)
310{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700311 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000312 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
313 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
314
David S. Millerc9aa55e2010-05-22 01:09:04 -0700315 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
316 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
317 rctx->fallback_req.nbytes = req->nbytes;
318 rctx->fallback_req.src = req->src;
319 rctx->fallback_req.result = req->result;
David S. Miller0a625fd2010-05-19 14:14:04 +1000320
David S. Millerc9aa55e2010-05-22 01:09:04 -0700321 return crypto_ahash_finup(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000322}
323
324static int n2_hash_cra_init(struct crypto_tfm *tfm)
325{
326 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
327 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
328 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
329 struct crypto_ahash *fallback_tfm;
330 int err;
331
332 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
333 CRYPTO_ALG_NEED_FALLBACK);
334 if (IS_ERR(fallback_tfm)) {
335 pr_warning("Fallback driver '%s' could not be loaded!\n",
336 fallback_driver_name);
337 err = PTR_ERR(fallback_tfm);
338 goto out;
339 }
340
David S. Millerc9aa55e2010-05-22 01:09:04 -0700341 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
342 crypto_ahash_reqsize(fallback_tfm)));
343
344 ctx->fallback_tfm = fallback_tfm;
David S. Miller0a625fd2010-05-19 14:14:04 +1000345 return 0;
346
347out:
348 return err;
349}
350
351static void n2_hash_cra_exit(struct crypto_tfm *tfm)
352{
353 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
354 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
355
David S. Millerc9aa55e2010-05-22 01:09:04 -0700356 crypto_free_ahash(ctx->fallback_tfm);
David S. Miller0a625fd2010-05-19 14:14:04 +1000357}
358
359static unsigned long wait_for_tail(struct spu_queue *qp)
360{
361 unsigned long head, hv_ret;
362
363 do {
364 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
365 if (hv_ret != HV_EOK) {
366 pr_err("Hypervisor error on gethead\n");
367 break;
368 }
369 if (head == qp->tail) {
370 qp->head = head;
371 break;
372 }
373 } while (1);
374 return hv_ret;
375}
376
377static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
378 struct cwq_initial_entry *ent)
379{
380 unsigned long hv_ret = spu_queue_submit(qp, ent);
381
382 if (hv_ret == HV_EOK)
383 hv_ret = wait_for_tail(qp);
384
385 return hv_ret;
386}
387
388static int n2_hash_async_digest(struct ahash_request *req,
389 unsigned int auth_type, unsigned int digest_size,
390 unsigned int result_size, void *hash_loc)
391{
392 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
393 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
394 struct cwq_initial_entry *ent;
395 struct crypto_hash_walk walk;
396 struct spu_queue *qp;
397 unsigned long flags;
398 int err = -ENODEV;
399 int nbytes, cpu;
400
401 /* The total effective length of the operation may not
402 * exceed 2^16.
403 */
404 if (unlikely(req->nbytes > (1 << 16))) {
David S. Millerc9aa55e2010-05-22 01:09:04 -0700405 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000406
David S. Millerc9aa55e2010-05-22 01:09:04 -0700407 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
408 rctx->fallback_req.base.flags =
409 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
410 rctx->fallback_req.nbytes = req->nbytes;
411 rctx->fallback_req.src = req->src;
412 rctx->fallback_req.result = req->result;
413
414 return crypto_ahash_digest(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000415 }
416
417 n2_base_ctx_init(&ctx->base);
418
419 nbytes = crypto_hash_walk_first(req, &walk);
420
421 cpu = get_cpu();
422 qp = cpu_to_cwq[cpu];
423 if (!qp)
424 goto out;
425
426 spin_lock_irqsave(&qp->lock, flags);
427
428 /* XXX can do better, improve this later by doing a by-hand scatterlist
429 * XXX walk, etc.
430 */
431 ent = qp->q + qp->tail;
432
433 ent->control = control_word_base(nbytes, 0, 0,
434 auth_type, digest_size,
435 false, true, false, false,
436 OPCODE_INPLACE_BIT |
437 OPCODE_AUTH_MAC);
438 ent->src_addr = __pa(walk.data);
439 ent->auth_key_addr = 0UL;
440 ent->auth_iv_addr = __pa(hash_loc);
441 ent->final_auth_state_addr = 0UL;
442 ent->enc_key_addr = 0UL;
443 ent->enc_iv_addr = 0UL;
444 ent->dest_addr = __pa(hash_loc);
445
446 nbytes = crypto_hash_walk_done(&walk, 0);
447 while (nbytes > 0) {
448 ent = spu_queue_next(qp, ent);
449
450 ent->control = (nbytes - 1);
451 ent->src_addr = __pa(walk.data);
452 ent->auth_key_addr = 0UL;
453 ent->auth_iv_addr = 0UL;
454 ent->final_auth_state_addr = 0UL;
455 ent->enc_key_addr = 0UL;
456 ent->enc_iv_addr = 0UL;
457 ent->dest_addr = 0UL;
458
459 nbytes = crypto_hash_walk_done(&walk, 0);
460 }
461 ent->control |= CONTROL_END_OF_BLOCK;
462
463 if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
464 err = -EINVAL;
465 else
466 err = 0;
467
468 spin_unlock_irqrestore(&qp->lock, flags);
469
470 if (!err)
471 memcpy(req->result, hash_loc, result_size);
472out:
473 put_cpu();
474
475 return err;
476}
477
478static int n2_md5_async_digest(struct ahash_request *req)
479{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700480 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
481 struct md5_state *m = &rctx->u.md5;
David S. Miller0a625fd2010-05-19 14:14:04 +1000482
483 if (unlikely(req->nbytes == 0)) {
484 static const char md5_zero[MD5_DIGEST_SIZE] = {
485 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
486 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
487 };
488
489 memcpy(req->result, md5_zero, MD5_DIGEST_SIZE);
490 return 0;
491 }
492 m->hash[0] = cpu_to_le32(0x67452301);
493 m->hash[1] = cpu_to_le32(0xefcdab89);
494 m->hash[2] = cpu_to_le32(0x98badcfe);
495 m->hash[3] = cpu_to_le32(0x10325476);
496
497 return n2_hash_async_digest(req, AUTH_TYPE_MD5,
498 MD5_DIGEST_SIZE, MD5_DIGEST_SIZE,
499 m->hash);
500}
501
502static int n2_sha1_async_digest(struct ahash_request *req)
503{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700504 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
505 struct sha1_state *s = &rctx->u.sha1;
David S. Miller0a625fd2010-05-19 14:14:04 +1000506
507 if (unlikely(req->nbytes == 0)) {
508 static const char sha1_zero[SHA1_DIGEST_SIZE] = {
509 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
510 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
511 0x07, 0x09
512 };
513
514 memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE);
515 return 0;
516 }
517 s->state[0] = SHA1_H0;
518 s->state[1] = SHA1_H1;
519 s->state[2] = SHA1_H2;
520 s->state[3] = SHA1_H3;
521 s->state[4] = SHA1_H4;
522
523 return n2_hash_async_digest(req, AUTH_TYPE_SHA1,
524 SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE,
525 s->state);
526}
527
528static int n2_sha256_async_digest(struct ahash_request *req)
529{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700530 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
531 struct sha256_state *s = &rctx->u.sha256;
David S. Miller0a625fd2010-05-19 14:14:04 +1000532
533 if (req->nbytes == 0) {
534 static const char sha256_zero[SHA256_DIGEST_SIZE] = {
535 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
536 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
537 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
538 0x1b, 0x78, 0x52, 0xb8, 0x55
539 };
540
541 memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE);
542 return 0;
543 }
544 s->state[0] = SHA256_H0;
545 s->state[1] = SHA256_H1;
546 s->state[2] = SHA256_H2;
547 s->state[3] = SHA256_H3;
548 s->state[4] = SHA256_H4;
549 s->state[5] = SHA256_H5;
550 s->state[6] = SHA256_H6;
551 s->state[7] = SHA256_H7;
552
553 return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
554 SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE,
555 s->state);
556}
557
558static int n2_sha224_async_digest(struct ahash_request *req)
559{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700560 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
561 struct sha256_state *s = &rctx->u.sha256;
David S. Miller0a625fd2010-05-19 14:14:04 +1000562
563 if (req->nbytes == 0) {
564 static const char sha224_zero[SHA224_DIGEST_SIZE] = {
565 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
566 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
567 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
568 0x2f
569 };
570
571 memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE);
572 return 0;
573 }
574 s->state[0] = SHA224_H0;
575 s->state[1] = SHA224_H1;
576 s->state[2] = SHA224_H2;
577 s->state[3] = SHA224_H3;
578 s->state[4] = SHA224_H4;
579 s->state[5] = SHA224_H5;
580 s->state[6] = SHA224_H6;
581 s->state[7] = SHA224_H7;
582
583 return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
584 SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE,
585 s->state);
586}
587
588struct n2_cipher_context {
589 int key_len;
590 int enc_type;
591 union {
592 u8 aes[AES_MAX_KEY_SIZE];
593 u8 des[DES_KEY_SIZE];
594 u8 des3[3 * DES_KEY_SIZE];
595 u8 arc4[258]; /* S-box, X, Y */
596 } key;
597};
598
599#define N2_CHUNK_ARR_LEN 16
600
601struct n2_crypto_chunk {
602 struct list_head entry;
603 unsigned long iv_paddr : 44;
604 unsigned long arr_len : 20;
605 unsigned long dest_paddr;
606 unsigned long dest_final;
607 struct {
608 unsigned long src_paddr : 44;
609 unsigned long src_len : 20;
610 } arr[N2_CHUNK_ARR_LEN];
611};
612
613struct n2_request_context {
614 struct ablkcipher_walk walk;
615 struct list_head chunk_list;
616 struct n2_crypto_chunk chunk;
617 u8 temp_iv[16];
618};
619
620/* The SPU allows some level of flexibility for partial cipher blocks
621 * being specified in a descriptor.
622 *
623 * It merely requires that every descriptor's length field is at least
624 * as large as the cipher block size. This means that a cipher block
625 * can span at most 2 descriptors. However, this does not allow a
626 * partial block to span into the final descriptor as that would
627 * violate the rule (since every descriptor's length must be at lest
628 * the block size). So, for example, assuming an 8 byte block size:
629 *
630 * 0xe --> 0xa --> 0x8
631 *
632 * is a valid length sequence, whereas:
633 *
634 * 0xe --> 0xb --> 0x7
635 *
636 * is not a valid sequence.
637 */
638
639struct n2_cipher_alg {
640 struct list_head entry;
641 u8 enc_type;
642 struct crypto_alg alg;
643};
644
645static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
646{
647 struct crypto_alg *alg = tfm->__crt_alg;
648
649 return container_of(alg, struct n2_cipher_alg, alg);
650}
651
652struct n2_cipher_request_context {
653 struct ablkcipher_walk walk;
654};
655
656static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
657 unsigned int keylen)
658{
659 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
660 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
661 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
662
663 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
664
665 switch (keylen) {
666 case AES_KEYSIZE_128:
667 ctx->enc_type |= ENC_TYPE_ALG_AES128;
668 break;
669 case AES_KEYSIZE_192:
670 ctx->enc_type |= ENC_TYPE_ALG_AES192;
671 break;
672 case AES_KEYSIZE_256:
673 ctx->enc_type |= ENC_TYPE_ALG_AES256;
674 break;
675 default:
676 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
677 return -EINVAL;
678 }
679
680 ctx->key_len = keylen;
681 memcpy(ctx->key.aes, key, keylen);
682 return 0;
683}
684
685static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
686 unsigned int keylen)
687{
688 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
689 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
690 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
691 u32 tmp[DES_EXPKEY_WORDS];
692 int err;
693
694 ctx->enc_type = n2alg->enc_type;
695
696 if (keylen != DES_KEY_SIZE) {
697 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
698 return -EINVAL;
699 }
700
701 err = des_ekey(tmp, key);
702 if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
703 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
704 return -EINVAL;
705 }
706
707 ctx->key_len = keylen;
708 memcpy(ctx->key.des, key, keylen);
709 return 0;
710}
711
712static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
713 unsigned int keylen)
714{
715 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
716 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
717 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
718
719 ctx->enc_type = n2alg->enc_type;
720
721 if (keylen != (3 * DES_KEY_SIZE)) {
722 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
723 return -EINVAL;
724 }
725 ctx->key_len = keylen;
726 memcpy(ctx->key.des3, key, keylen);
727 return 0;
728}
729
730static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
731 unsigned int keylen)
732{
733 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
734 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
735 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
736 u8 *s = ctx->key.arc4;
737 u8 *x = s + 256;
738 u8 *y = x + 1;
739 int i, j, k;
740
741 ctx->enc_type = n2alg->enc_type;
742
743 j = k = 0;
744 *x = 0;
745 *y = 0;
746 for (i = 0; i < 256; i++)
747 s[i] = i;
748 for (i = 0; i < 256; i++) {
749 u8 a = s[i];
750 j = (j + key[k] + a) & 0xff;
751 s[i] = s[j];
752 s[j] = a;
753 if (++k >= keylen)
754 k = 0;
755 }
756
757 return 0;
758}
759
760static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
761{
762 int this_len = nbytes;
763
764 this_len -= (nbytes & (block_size - 1));
765 return this_len > (1 << 16) ? (1 << 16) : this_len;
766}
767
768static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
769 struct spu_queue *qp, bool encrypt)
770{
771 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
772 struct cwq_initial_entry *ent;
773 bool in_place;
774 int i;
775
776 ent = spu_queue_alloc(qp, cp->arr_len);
777 if (!ent) {
778 pr_info("queue_alloc() of %d fails\n",
779 cp->arr_len);
780 return -EBUSY;
781 }
782
783 in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
784
785 ent->control = control_word_base(cp->arr[0].src_len,
786 0, ctx->enc_type, 0, 0,
787 false, true, false, encrypt,
788 OPCODE_ENCRYPT |
789 (in_place ? OPCODE_INPLACE_BIT : 0));
790 ent->src_addr = cp->arr[0].src_paddr;
791 ent->auth_key_addr = 0UL;
792 ent->auth_iv_addr = 0UL;
793 ent->final_auth_state_addr = 0UL;
794 ent->enc_key_addr = __pa(&ctx->key);
795 ent->enc_iv_addr = cp->iv_paddr;
796 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
797
798 for (i = 1; i < cp->arr_len; i++) {
799 ent = spu_queue_next(qp, ent);
800
801 ent->control = cp->arr[i].src_len - 1;
802 ent->src_addr = cp->arr[i].src_paddr;
803 ent->auth_key_addr = 0UL;
804 ent->auth_iv_addr = 0UL;
805 ent->final_auth_state_addr = 0UL;
806 ent->enc_key_addr = 0UL;
807 ent->enc_iv_addr = 0UL;
808 ent->dest_addr = 0UL;
809 }
810 ent->control |= CONTROL_END_OF_BLOCK;
811
812 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
813}
814
815static int n2_compute_chunks(struct ablkcipher_request *req)
816{
817 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
818 struct ablkcipher_walk *walk = &rctx->walk;
819 struct n2_crypto_chunk *chunk;
820 unsigned long dest_prev;
821 unsigned int tot_len;
822 bool prev_in_place;
823 int err, nbytes;
824
825 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
826 err = ablkcipher_walk_phys(req, walk);
827 if (err)
828 return err;
829
830 INIT_LIST_HEAD(&rctx->chunk_list);
831
832 chunk = &rctx->chunk;
833 INIT_LIST_HEAD(&chunk->entry);
834
835 chunk->iv_paddr = 0UL;
836 chunk->arr_len = 0;
837 chunk->dest_paddr = 0UL;
838
839 prev_in_place = false;
840 dest_prev = ~0UL;
841 tot_len = 0;
842
843 while ((nbytes = walk->nbytes) != 0) {
844 unsigned long dest_paddr, src_paddr;
845 bool in_place;
846 int this_len;
847
848 src_paddr = (page_to_phys(walk->src.page) +
849 walk->src.offset);
850 dest_paddr = (page_to_phys(walk->dst.page) +
851 walk->dst.offset);
852 in_place = (src_paddr == dest_paddr);
853 this_len = cipher_descriptor_len(nbytes, walk->blocksize);
854
855 if (chunk->arr_len != 0) {
856 if (in_place != prev_in_place ||
857 (!prev_in_place &&
858 dest_paddr != dest_prev) ||
859 chunk->arr_len == N2_CHUNK_ARR_LEN ||
860 tot_len + this_len > (1 << 16)) {
861 chunk->dest_final = dest_prev;
862 list_add_tail(&chunk->entry,
863 &rctx->chunk_list);
864 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
865 if (!chunk) {
866 err = -ENOMEM;
867 break;
868 }
869 INIT_LIST_HEAD(&chunk->entry);
870 }
871 }
872 if (chunk->arr_len == 0) {
873 chunk->dest_paddr = dest_paddr;
874 tot_len = 0;
875 }
876 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
877 chunk->arr[chunk->arr_len].src_len = this_len;
878 chunk->arr_len++;
879
880 dest_prev = dest_paddr + this_len;
881 prev_in_place = in_place;
882 tot_len += this_len;
883
884 err = ablkcipher_walk_done(req, walk, nbytes - this_len);
885 if (err)
886 break;
887 }
888 if (!err && chunk->arr_len != 0) {
889 chunk->dest_final = dest_prev;
890 list_add_tail(&chunk->entry, &rctx->chunk_list);
891 }
892
893 return err;
894}
895
896static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
897{
898 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
899 struct n2_crypto_chunk *c, *tmp;
900
901 if (final_iv)
902 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
903
904 ablkcipher_walk_complete(&rctx->walk);
905 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
906 list_del(&c->entry);
907 if (unlikely(c != &rctx->chunk))
908 kfree(c);
909 }
910
911}
912
913static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
914{
915 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
916 struct crypto_tfm *tfm = req->base.tfm;
917 int err = n2_compute_chunks(req);
918 struct n2_crypto_chunk *c, *tmp;
919 unsigned long flags, hv_ret;
920 struct spu_queue *qp;
921
922 if (err)
923 return err;
924
925 qp = cpu_to_cwq[get_cpu()];
926 err = -ENODEV;
927 if (!qp)
928 goto out;
929
930 spin_lock_irqsave(&qp->lock, flags);
931
932 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
933 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
934 if (err)
935 break;
936 list_del(&c->entry);
937 if (unlikely(c != &rctx->chunk))
938 kfree(c);
939 }
940 if (!err) {
941 hv_ret = wait_for_tail(qp);
942 if (hv_ret != HV_EOK)
943 err = -EINVAL;
944 }
945
946 spin_unlock_irqrestore(&qp->lock, flags);
947
948 put_cpu();
949
950out:
951 n2_chunk_complete(req, NULL);
952 return err;
953}
954
955static int n2_encrypt_ecb(struct ablkcipher_request *req)
956{
957 return n2_do_ecb(req, true);
958}
959
960static int n2_decrypt_ecb(struct ablkcipher_request *req)
961{
962 return n2_do_ecb(req, false);
963}
964
965static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
966{
967 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
968 struct crypto_tfm *tfm = req->base.tfm;
969 unsigned long flags, hv_ret, iv_paddr;
970 int err = n2_compute_chunks(req);
971 struct n2_crypto_chunk *c, *tmp;
972 struct spu_queue *qp;
973 void *final_iv_addr;
974
975 final_iv_addr = NULL;
976
977 if (err)
978 return err;
979
980 qp = cpu_to_cwq[get_cpu()];
981 err = -ENODEV;
982 if (!qp)
983 goto out;
984
985 spin_lock_irqsave(&qp->lock, flags);
986
987 if (encrypt) {
988 iv_paddr = __pa(rctx->walk.iv);
989 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
990 entry) {
991 c->iv_paddr = iv_paddr;
992 err = __n2_crypt_chunk(tfm, c, qp, true);
993 if (err)
994 break;
995 iv_paddr = c->dest_final - rctx->walk.blocksize;
996 list_del(&c->entry);
997 if (unlikely(c != &rctx->chunk))
998 kfree(c);
999 }
1000 final_iv_addr = __va(iv_paddr);
1001 } else {
1002 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1003 entry) {
1004 if (c == &rctx->chunk) {
1005 iv_paddr = __pa(rctx->walk.iv);
1006 } else {
1007 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1008 tmp->arr[tmp->arr_len-1].src_len -
1009 rctx->walk.blocksize);
1010 }
1011 if (!final_iv_addr) {
1012 unsigned long pa;
1013
1014 pa = (c->arr[c->arr_len-1].src_paddr +
1015 c->arr[c->arr_len-1].src_len -
1016 rctx->walk.blocksize);
1017 final_iv_addr = rctx->temp_iv;
1018 memcpy(rctx->temp_iv, __va(pa),
1019 rctx->walk.blocksize);
1020 }
1021 c->iv_paddr = iv_paddr;
1022 err = __n2_crypt_chunk(tfm, c, qp, false);
1023 if (err)
1024 break;
1025 list_del(&c->entry);
1026 if (unlikely(c != &rctx->chunk))
1027 kfree(c);
1028 }
1029 }
1030 if (!err) {
1031 hv_ret = wait_for_tail(qp);
1032 if (hv_ret != HV_EOK)
1033 err = -EINVAL;
1034 }
1035
1036 spin_unlock_irqrestore(&qp->lock, flags);
1037
1038 put_cpu();
1039
1040out:
1041 n2_chunk_complete(req, err ? NULL : final_iv_addr);
1042 return err;
1043}
1044
1045static int n2_encrypt_chaining(struct ablkcipher_request *req)
1046{
1047 return n2_do_chaining(req, true);
1048}
1049
1050static int n2_decrypt_chaining(struct ablkcipher_request *req)
1051{
1052 return n2_do_chaining(req, false);
1053}
1054
1055struct n2_cipher_tmpl {
1056 const char *name;
1057 const char *drv_name;
1058 u8 block_size;
1059 u8 enc_type;
1060 struct ablkcipher_alg ablkcipher;
1061};
1062
1063static const struct n2_cipher_tmpl cipher_tmpls[] = {
1064 /* ARC4: only ECB is supported (chaining bits ignored) */
1065 { .name = "ecb(arc4)",
1066 .drv_name = "ecb-arc4",
1067 .block_size = 1,
1068 .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
1069 ENC_TYPE_CHAINING_ECB),
1070 .ablkcipher = {
1071 .min_keysize = 1,
1072 .max_keysize = 256,
1073 .setkey = n2_arc4_setkey,
1074 .encrypt = n2_encrypt_ecb,
1075 .decrypt = n2_decrypt_ecb,
1076 },
1077 },
1078
1079 /* DES: ECB CBC and CFB are supported */
1080 { .name = "ecb(des)",
1081 .drv_name = "ecb-des",
1082 .block_size = DES_BLOCK_SIZE,
1083 .enc_type = (ENC_TYPE_ALG_DES |
1084 ENC_TYPE_CHAINING_ECB),
1085 .ablkcipher = {
1086 .min_keysize = DES_KEY_SIZE,
1087 .max_keysize = DES_KEY_SIZE,
1088 .setkey = n2_des_setkey,
1089 .encrypt = n2_encrypt_ecb,
1090 .decrypt = n2_decrypt_ecb,
1091 },
1092 },
1093 { .name = "cbc(des)",
1094 .drv_name = "cbc-des",
1095 .block_size = DES_BLOCK_SIZE,
1096 .enc_type = (ENC_TYPE_ALG_DES |
1097 ENC_TYPE_CHAINING_CBC),
1098 .ablkcipher = {
1099 .ivsize = DES_BLOCK_SIZE,
1100 .min_keysize = DES_KEY_SIZE,
1101 .max_keysize = DES_KEY_SIZE,
1102 .setkey = n2_des_setkey,
1103 .encrypt = n2_encrypt_chaining,
1104 .decrypt = n2_decrypt_chaining,
1105 },
1106 },
1107 { .name = "cfb(des)",
1108 .drv_name = "cfb-des",
1109 .block_size = DES_BLOCK_SIZE,
1110 .enc_type = (ENC_TYPE_ALG_DES |
1111 ENC_TYPE_CHAINING_CFB),
1112 .ablkcipher = {
1113 .min_keysize = DES_KEY_SIZE,
1114 .max_keysize = DES_KEY_SIZE,
1115 .setkey = n2_des_setkey,
1116 .encrypt = n2_encrypt_chaining,
1117 .decrypt = n2_decrypt_chaining,
1118 },
1119 },
1120
1121 /* 3DES: ECB CBC and CFB are supported */
1122 { .name = "ecb(des3_ede)",
1123 .drv_name = "ecb-3des",
1124 .block_size = DES_BLOCK_SIZE,
1125 .enc_type = (ENC_TYPE_ALG_3DES |
1126 ENC_TYPE_CHAINING_ECB),
1127 .ablkcipher = {
1128 .min_keysize = 3 * DES_KEY_SIZE,
1129 .max_keysize = 3 * DES_KEY_SIZE,
1130 .setkey = n2_3des_setkey,
1131 .encrypt = n2_encrypt_ecb,
1132 .decrypt = n2_decrypt_ecb,
1133 },
1134 },
1135 { .name = "cbc(des3_ede)",
1136 .drv_name = "cbc-3des",
1137 .block_size = DES_BLOCK_SIZE,
1138 .enc_type = (ENC_TYPE_ALG_3DES |
1139 ENC_TYPE_CHAINING_CBC),
1140 .ablkcipher = {
1141 .ivsize = DES_BLOCK_SIZE,
1142 .min_keysize = 3 * DES_KEY_SIZE,
1143 .max_keysize = 3 * DES_KEY_SIZE,
1144 .setkey = n2_3des_setkey,
1145 .encrypt = n2_encrypt_chaining,
1146 .decrypt = n2_decrypt_chaining,
1147 },
1148 },
1149 { .name = "cfb(des3_ede)",
1150 .drv_name = "cfb-3des",
1151 .block_size = DES_BLOCK_SIZE,
1152 .enc_type = (ENC_TYPE_ALG_3DES |
1153 ENC_TYPE_CHAINING_CFB),
1154 .ablkcipher = {
1155 .min_keysize = 3 * DES_KEY_SIZE,
1156 .max_keysize = 3 * DES_KEY_SIZE,
1157 .setkey = n2_3des_setkey,
1158 .encrypt = n2_encrypt_chaining,
1159 .decrypt = n2_decrypt_chaining,
1160 },
1161 },
1162 /* AES: ECB CBC and CTR are supported */
1163 { .name = "ecb(aes)",
1164 .drv_name = "ecb-aes",
1165 .block_size = AES_BLOCK_SIZE,
1166 .enc_type = (ENC_TYPE_ALG_AES128 |
1167 ENC_TYPE_CHAINING_ECB),
1168 .ablkcipher = {
1169 .min_keysize = AES_MIN_KEY_SIZE,
1170 .max_keysize = AES_MAX_KEY_SIZE,
1171 .setkey = n2_aes_setkey,
1172 .encrypt = n2_encrypt_ecb,
1173 .decrypt = n2_decrypt_ecb,
1174 },
1175 },
1176 { .name = "cbc(aes)",
1177 .drv_name = "cbc-aes",
1178 .block_size = AES_BLOCK_SIZE,
1179 .enc_type = (ENC_TYPE_ALG_AES128 |
1180 ENC_TYPE_CHAINING_CBC),
1181 .ablkcipher = {
1182 .ivsize = AES_BLOCK_SIZE,
1183 .min_keysize = AES_MIN_KEY_SIZE,
1184 .max_keysize = AES_MAX_KEY_SIZE,
1185 .setkey = n2_aes_setkey,
1186 .encrypt = n2_encrypt_chaining,
1187 .decrypt = n2_decrypt_chaining,
1188 },
1189 },
1190 { .name = "ctr(aes)",
1191 .drv_name = "ctr-aes",
1192 .block_size = AES_BLOCK_SIZE,
1193 .enc_type = (ENC_TYPE_ALG_AES128 |
1194 ENC_TYPE_CHAINING_COUNTER),
1195 .ablkcipher = {
1196 .ivsize = AES_BLOCK_SIZE,
1197 .min_keysize = AES_MIN_KEY_SIZE,
1198 .max_keysize = AES_MAX_KEY_SIZE,
1199 .setkey = n2_aes_setkey,
1200 .encrypt = n2_encrypt_chaining,
1201 .decrypt = n2_encrypt_chaining,
1202 },
1203 },
1204
1205};
1206#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
1207
1208static LIST_HEAD(cipher_algs);
1209
1210struct n2_hash_tmpl {
1211 const char *name;
1212 int (*digest)(struct ahash_request *req);
1213 u8 digest_size;
1214 u8 block_size;
1215};
1216static const struct n2_hash_tmpl hash_tmpls[] = {
1217 { .name = "md5",
1218 .digest = n2_md5_async_digest,
1219 .digest_size = MD5_DIGEST_SIZE,
1220 .block_size = MD5_HMAC_BLOCK_SIZE },
1221 { .name = "sha1",
1222 .digest = n2_sha1_async_digest,
1223 .digest_size = SHA1_DIGEST_SIZE,
1224 .block_size = SHA1_BLOCK_SIZE },
1225 { .name = "sha256",
1226 .digest = n2_sha256_async_digest,
1227 .digest_size = SHA256_DIGEST_SIZE,
1228 .block_size = SHA256_BLOCK_SIZE },
1229 { .name = "sha224",
1230 .digest = n2_sha224_async_digest,
1231 .digest_size = SHA224_DIGEST_SIZE,
1232 .block_size = SHA224_BLOCK_SIZE },
1233};
1234#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1235
1236struct n2_ahash_alg {
1237 struct list_head entry;
1238 struct ahash_alg alg;
1239};
1240static LIST_HEAD(ahash_algs);
1241
1242static int algs_registered;
1243
1244static void __n2_unregister_algs(void)
1245{
1246 struct n2_cipher_alg *cipher, *cipher_tmp;
1247 struct n2_ahash_alg *alg, *alg_tmp;
1248
1249 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
1250 crypto_unregister_alg(&cipher->alg);
1251 list_del(&cipher->entry);
1252 kfree(cipher);
1253 }
1254 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1255 crypto_unregister_ahash(&alg->alg);
1256 list_del(&alg->entry);
1257 kfree(alg);
1258 }
1259}
1260
1261static int n2_cipher_cra_init(struct crypto_tfm *tfm)
1262{
1263 tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
1264 return 0;
1265}
1266
1267static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
1268{
1269 struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1270 struct crypto_alg *alg;
1271 int err;
1272
1273 if (!p)
1274 return -ENOMEM;
1275
1276 alg = &p->alg;
1277
1278 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1279 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1280 alg->cra_priority = N2_CRA_PRIORITY;
1281 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1282 alg->cra_blocksize = tmpl->block_size;
1283 p->enc_type = tmpl->enc_type;
1284 alg->cra_ctxsize = sizeof(struct n2_cipher_context);
1285 alg->cra_type = &crypto_ablkcipher_type;
1286 alg->cra_u.ablkcipher = tmpl->ablkcipher;
1287 alg->cra_init = n2_cipher_cra_init;
1288 alg->cra_module = THIS_MODULE;
1289
1290 list_add(&p->entry, &cipher_algs);
1291 err = crypto_register_alg(alg);
1292 if (err) {
1293 list_del(&p->entry);
1294 kfree(p);
1295 }
1296 return err;
1297}
1298
1299static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1300{
1301 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1302 struct hash_alg_common *halg;
1303 struct crypto_alg *base;
1304 struct ahash_alg *ahash;
1305 int err;
1306
1307 if (!p)
1308 return -ENOMEM;
1309
1310 ahash = &p->alg;
1311 ahash->init = n2_hash_async_init;
1312 ahash->update = n2_hash_async_update;
1313 ahash->final = n2_hash_async_final;
1314 ahash->finup = n2_hash_async_finup;
1315 ahash->digest = tmpl->digest;
1316
1317 halg = &ahash->halg;
1318 halg->digestsize = tmpl->digest_size;
1319
1320 base = &halg->base;
1321 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1322 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1323 base->cra_priority = N2_CRA_PRIORITY;
1324 base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK;
1325 base->cra_blocksize = tmpl->block_size;
1326 base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1327 base->cra_module = THIS_MODULE;
1328 base->cra_init = n2_hash_cra_init;
1329 base->cra_exit = n2_hash_cra_exit;
1330
1331 list_add(&p->entry, &ahash_algs);
1332 err = crypto_register_ahash(ahash);
1333 if (err) {
1334 list_del(&p->entry);
1335 kfree(p);
1336 }
1337 return err;
1338}
1339
1340static int __devinit n2_register_algs(void)
1341{
1342 int i, err = 0;
1343
1344 mutex_lock(&spu_lock);
1345 if (algs_registered++)
1346 goto out;
1347
1348 for (i = 0; i < NUM_HASH_TMPLS; i++) {
1349 err = __n2_register_one_ahash(&hash_tmpls[i]);
1350 if (err) {
1351 __n2_unregister_algs();
1352 goto out;
1353 }
1354 }
1355 for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1356 err = __n2_register_one_cipher(&cipher_tmpls[i]);
1357 if (err) {
1358 __n2_unregister_algs();
1359 goto out;
1360 }
1361 }
1362
1363out:
1364 mutex_unlock(&spu_lock);
1365 return err;
1366}
1367
1368static void __exit n2_unregister_algs(void)
1369{
1370 mutex_lock(&spu_lock);
1371 if (!--algs_registered)
1372 __n2_unregister_algs();
1373 mutex_unlock(&spu_lock);
1374}
1375
1376/* To map CWQ queues to interrupt sources, the hypervisor API provides
1377 * a devino. This isn't very useful to us because all of the
1378 * interrupts listed in the of_device node have been translated to
1379 * Linux virtual IRQ cookie numbers.
1380 *
1381 * So we have to back-translate, going through the 'intr' and 'ino'
1382 * property tables of the n2cp MDESC node, matching it with the OF
1383 * 'interrupts' property entries, in order to to figure out which
1384 * devino goes to which already-translated IRQ.
1385 */
1386static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
1387 unsigned long dev_ino)
1388{
1389 const unsigned int *dev_intrs;
1390 unsigned int intr;
1391 int i;
1392
1393 for (i = 0; i < ip->num_intrs; i++) {
1394 if (ip->ino_table[i].ino == dev_ino)
1395 break;
1396 }
1397 if (i == ip->num_intrs)
1398 return -ENODEV;
1399
1400 intr = ip->ino_table[i].intr;
1401
David S. Millerff6c7342010-05-25 17:37:08 -07001402 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
David S. Miller0a625fd2010-05-19 14:14:04 +10001403 if (!dev_intrs)
1404 return -ENODEV;
1405
1406 for (i = 0; i < dev->num_irqs; i++) {
1407 if (dev_intrs[i] == intr)
1408 return i;
1409 }
1410
1411 return -ENODEV;
1412}
1413
1414static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip,
1415 const char *irq_name, struct spu_queue *p,
1416 irq_handler_t handler)
1417{
1418 unsigned long herr;
1419 int index;
1420
1421 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1422 if (herr)
1423 return -EINVAL;
1424
1425 index = find_devino_index(dev, ip, p->devino);
1426 if (index < 0)
1427 return index;
1428
1429 p->irq = dev->irqs[index];
1430
1431 sprintf(p->irq_name, "%s-%d", irq_name, index);
1432
1433 return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
1434 p->irq_name, p);
1435}
1436
1437static struct kmem_cache *queue_cache[2];
1438
1439static void *new_queue(unsigned long q_type)
1440{
1441 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1442}
1443
1444static void free_queue(void *p, unsigned long q_type)
1445{
1446 return kmem_cache_free(queue_cache[q_type - 1], p);
1447}
1448
1449static int queue_cache_init(void)
1450{
1451 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1452 queue_cache[HV_NCS_QTYPE_MAU - 1] =
David S. Miller527b9522010-05-22 00:50:12 -07001453 kmem_cache_create("mau_queue",
David S. Miller0a625fd2010-05-19 14:14:04 +10001454 (MAU_NUM_ENTRIES *
1455 MAU_ENTRY_SIZE),
1456 MAU_ENTRY_SIZE, 0, NULL);
1457 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1458 return -ENOMEM;
1459
1460 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1461 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1462 kmem_cache_create("cwq_queue",
1463 (CWQ_NUM_ENTRIES *
1464 CWQ_ENTRY_SIZE),
1465 CWQ_ENTRY_SIZE, 0, NULL);
1466 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1467 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1468 return -ENOMEM;
1469 }
1470 return 0;
1471}
1472
1473static void queue_cache_destroy(void)
1474{
1475 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1476 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1477}
1478
1479static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1480{
1481 cpumask_var_t old_allowed;
1482 unsigned long hv_ret;
1483
1484 if (cpumask_empty(&p->sharing))
1485 return -EINVAL;
1486
1487 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
1488 return -ENOMEM;
1489
1490 cpumask_copy(old_allowed, &current->cpus_allowed);
1491
1492 set_cpus_allowed_ptr(current, &p->sharing);
1493
1494 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1495 CWQ_NUM_ENTRIES, &p->qhandle);
1496 if (!hv_ret)
1497 sun4v_ncs_sethead_marker(p->qhandle, 0);
1498
1499 set_cpus_allowed_ptr(current, old_allowed);
1500
1501 free_cpumask_var(old_allowed);
1502
1503 return (hv_ret ? -EINVAL : 0);
1504}
1505
1506static int spu_queue_setup(struct spu_queue *p)
1507{
1508 int err;
1509
1510 p->q = new_queue(p->q_type);
1511 if (!p->q)
1512 return -ENOMEM;
1513
1514 err = spu_queue_register(p, p->q_type);
1515 if (err) {
1516 free_queue(p->q, p->q_type);
1517 p->q = NULL;
1518 }
1519
1520 return err;
1521}
1522
1523static void spu_queue_destroy(struct spu_queue *p)
1524{
1525 unsigned long hv_ret;
1526
1527 if (!p->q)
1528 return;
1529
1530 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1531
1532 if (!hv_ret)
1533 free_queue(p->q, p->q_type);
1534}
1535
1536static void spu_list_destroy(struct list_head *list)
1537{
1538 struct spu_queue *p, *n;
1539
1540 list_for_each_entry_safe(p, n, list, list) {
1541 int i;
1542
1543 for (i = 0; i < NR_CPUS; i++) {
1544 if (cpu_to_cwq[i] == p)
1545 cpu_to_cwq[i] = NULL;
1546 }
1547
1548 if (p->irq) {
1549 free_irq(p->irq, p);
1550 p->irq = 0;
1551 }
1552 spu_queue_destroy(p);
1553 list_del(&p->list);
1554 kfree(p);
1555 }
1556}
1557
1558/* Walk the backward arcs of a CWQ 'exec-unit' node,
1559 * gathering cpu membership information.
1560 */
1561static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1562 struct of_device *dev,
1563 u64 node, struct spu_queue *p,
1564 struct spu_queue **table)
1565{
1566 u64 arc;
1567
1568 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1569 u64 tgt = mdesc_arc_target(mdesc, arc);
1570 const char *name = mdesc_node_name(mdesc, tgt);
1571 const u64 *id;
1572
1573 if (strcmp(name, "cpu"))
1574 continue;
1575 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1576 if (table[*id] != NULL) {
1577 dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
David S. Millerff6c7342010-05-25 17:37:08 -07001578 dev->dev.of_node->full_name);
David S. Miller0a625fd2010-05-19 14:14:04 +10001579 return -EINVAL;
1580 }
1581 cpu_set(*id, p->sharing);
1582 table[*id] = p;
1583 }
1584 return 0;
1585}
1586
1587/* Process an 'exec-unit' MDESC node of type 'cwq'. */
1588static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1589 struct of_device *dev, struct mdesc_handle *mdesc,
1590 u64 node, const char *iname, unsigned long q_type,
1591 irq_handler_t handler, struct spu_queue **table)
1592{
1593 struct spu_queue *p;
1594 int err;
1595
1596 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1597 if (!p) {
1598 dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
David S. Millerff6c7342010-05-25 17:37:08 -07001599 dev->dev.of_node->full_name);
David S. Miller0a625fd2010-05-19 14:14:04 +10001600 return -ENOMEM;
1601 }
1602
1603 cpus_clear(p->sharing);
1604 spin_lock_init(&p->lock);
1605 p->q_type = q_type;
1606 INIT_LIST_HEAD(&p->jobs);
1607 list_add(&p->list, list);
1608
1609 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1610 if (err)
1611 return err;
1612
1613 err = spu_queue_setup(p);
1614 if (err)
1615 return err;
1616
1617 return spu_map_ino(dev, ip, iname, p, handler);
1618}
1619
1620static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev,
1621 struct spu_mdesc_info *ip, struct list_head *list,
1622 const char *exec_name, unsigned long q_type,
1623 irq_handler_t handler, struct spu_queue **table)
1624{
1625 int err = 0;
1626 u64 node;
1627
1628 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1629 const char *type;
1630
1631 type = mdesc_get_property(mdesc, node, "type", NULL);
1632 if (!type || strcmp(type, exec_name))
1633 continue;
1634
1635 err = handle_exec_unit(ip, list, dev, mdesc, node,
1636 exec_name, q_type, handler, table);
1637 if (err) {
1638 spu_list_destroy(list);
1639 break;
1640 }
1641 }
1642
1643 return err;
1644}
1645
1646static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
1647 struct spu_mdesc_info *ip)
1648{
1649 const u64 *intr, *ino;
1650 int intr_len, ino_len;
1651 int i;
1652
1653 intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
1654 if (!intr)
1655 return -ENODEV;
1656
1657 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1658 if (!intr)
1659 return -ENODEV;
1660
1661 if (intr_len != ino_len)
1662 return -EINVAL;
1663
1664 ip->num_intrs = intr_len / sizeof(u64);
1665 ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1666 ip->num_intrs),
1667 GFP_KERNEL);
1668 if (!ip->ino_table)
1669 return -ENOMEM;
1670
1671 for (i = 0; i < ip->num_intrs; i++) {
1672 struct ino_blob *b = &ip->ino_table[i];
1673 b->intr = intr[i];
1674 b->ino = ino[i];
1675 }
1676
1677 return 0;
1678}
1679
1680static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1681 struct of_device *dev,
1682 struct spu_mdesc_info *ip,
1683 const char *node_name)
1684{
1685 const unsigned int *reg;
1686 u64 node;
1687
David S. Millerff6c7342010-05-25 17:37:08 -07001688 reg = of_get_property(dev->dev.of_node, "reg", NULL);
David S. Miller0a625fd2010-05-19 14:14:04 +10001689 if (!reg)
1690 return -ENODEV;
1691
1692 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1693 const char *name;
1694 const u64 *chdl;
1695
1696 name = mdesc_get_property(mdesc, node, "name", NULL);
1697 if (!name || strcmp(name, node_name))
1698 continue;
1699 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1700 if (!chdl || (*chdl != *reg))
1701 continue;
1702 ip->cfg_handle = *chdl;
1703 return get_irq_props(mdesc, node, ip);
1704 }
1705
1706 return -ENODEV;
1707}
1708
1709static unsigned long n2_spu_hvapi_major;
1710static unsigned long n2_spu_hvapi_minor;
1711
1712static int __devinit n2_spu_hvapi_register(void)
1713{
1714 int err;
1715
1716 n2_spu_hvapi_major = 2;
1717 n2_spu_hvapi_minor = 0;
1718
1719 err = sun4v_hvapi_register(HV_GRP_NCS,
1720 n2_spu_hvapi_major,
1721 &n2_spu_hvapi_minor);
1722
1723 if (!err)
1724 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1725 n2_spu_hvapi_major,
1726 n2_spu_hvapi_minor);
1727
1728 return err;
1729}
1730
1731static void n2_spu_hvapi_unregister(void)
1732{
1733 sun4v_hvapi_unregister(HV_GRP_NCS);
1734}
1735
1736static int global_ref;
1737
1738static int __devinit grab_global_resources(void)
1739{
1740 int err = 0;
1741
1742 mutex_lock(&spu_lock);
1743
1744 if (global_ref++)
1745 goto out;
1746
1747 err = n2_spu_hvapi_register();
1748 if (err)
1749 goto out;
1750
1751 err = queue_cache_init();
1752 if (err)
1753 goto out_hvapi_release;
1754
1755 err = -ENOMEM;
1756 cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1757 GFP_KERNEL);
1758 if (!cpu_to_cwq)
1759 goto out_queue_cache_destroy;
1760
1761 cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1762 GFP_KERNEL);
1763 if (!cpu_to_mau)
1764 goto out_free_cwq_table;
1765
1766 err = 0;
1767
1768out:
1769 if (err)
1770 global_ref--;
1771 mutex_unlock(&spu_lock);
1772 return err;
1773
1774out_free_cwq_table:
1775 kfree(cpu_to_cwq);
1776 cpu_to_cwq = NULL;
1777
1778out_queue_cache_destroy:
1779 queue_cache_destroy();
1780
1781out_hvapi_release:
1782 n2_spu_hvapi_unregister();
1783 goto out;
1784}
1785
1786static void release_global_resources(void)
1787{
1788 mutex_lock(&spu_lock);
1789 if (!--global_ref) {
1790 kfree(cpu_to_cwq);
1791 cpu_to_cwq = NULL;
1792
1793 kfree(cpu_to_mau);
1794 cpu_to_mau = NULL;
1795
1796 queue_cache_destroy();
1797 n2_spu_hvapi_unregister();
1798 }
1799 mutex_unlock(&spu_lock);
1800}
1801
1802static struct n2_crypto * __devinit alloc_n2cp(void)
1803{
1804 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1805
1806 if (np)
1807 INIT_LIST_HEAD(&np->cwq_list);
1808
1809 return np;
1810}
1811
1812static void free_n2cp(struct n2_crypto *np)
1813{
1814 if (np->cwq_info.ino_table) {
1815 kfree(np->cwq_info.ino_table);
1816 np->cwq_info.ino_table = NULL;
1817 }
1818
1819 kfree(np);
1820}
1821
1822static void __devinit n2_spu_driver_version(void)
1823{
1824 static int n2_spu_version_printed;
1825
1826 if (n2_spu_version_printed++ == 0)
1827 pr_info("%s", version);
1828}
1829
1830static int __devinit n2_crypto_probe(struct of_device *dev,
1831 const struct of_device_id *match)
1832{
1833 struct mdesc_handle *mdesc;
1834 const char *full_name;
1835 struct n2_crypto *np;
1836 int err;
1837
1838 n2_spu_driver_version();
1839
David S. Millerff6c7342010-05-25 17:37:08 -07001840 full_name = dev->dev.of_node->full_name;
David S. Miller0a625fd2010-05-19 14:14:04 +10001841 pr_info("Found N2CP at %s\n", full_name);
1842
1843 np = alloc_n2cp();
1844 if (!np) {
1845 dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
1846 full_name);
1847 return -ENOMEM;
1848 }
1849
1850 err = grab_global_resources();
1851 if (err) {
1852 dev_err(&dev->dev, "%s: Unable to grab "
1853 "global resources.\n", full_name);
1854 goto out_free_n2cp;
1855 }
1856
1857 mdesc = mdesc_grab();
1858
1859 if (!mdesc) {
1860 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
1861 full_name);
1862 err = -ENODEV;
1863 goto out_free_global;
1864 }
1865 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
1866 if (err) {
1867 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
1868 full_name);
1869 mdesc_release(mdesc);
1870 goto out_free_global;
1871 }
1872
1873 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
1874 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
1875 cpu_to_cwq);
1876 mdesc_release(mdesc);
1877
1878 if (err) {
1879 dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
1880 full_name);
1881 goto out_free_global;
1882 }
1883
1884 err = n2_register_algs();
1885 if (err) {
1886 dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
1887 full_name);
1888 goto out_free_spu_list;
1889 }
1890
1891 dev_set_drvdata(&dev->dev, np);
1892
1893 return 0;
1894
1895out_free_spu_list:
1896 spu_list_destroy(&np->cwq_list);
1897
1898out_free_global:
1899 release_global_resources();
1900
1901out_free_n2cp:
1902 free_n2cp(np);
1903
1904 return err;
1905}
1906
1907static int __devexit n2_crypto_remove(struct of_device *dev)
1908{
1909 struct n2_crypto *np = dev_get_drvdata(&dev->dev);
1910
1911 n2_unregister_algs();
1912
1913 spu_list_destroy(&np->cwq_list);
1914
1915 release_global_resources();
1916
1917 free_n2cp(np);
1918
1919 return 0;
1920}
1921
1922static struct n2_mau * __devinit alloc_ncp(void)
1923{
1924 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
1925
1926 if (mp)
1927 INIT_LIST_HEAD(&mp->mau_list);
1928
1929 return mp;
1930}
1931
1932static void free_ncp(struct n2_mau *mp)
1933{
1934 if (mp->mau_info.ino_table) {
1935 kfree(mp->mau_info.ino_table);
1936 mp->mau_info.ino_table = NULL;
1937 }
1938
1939 kfree(mp);
1940}
1941
1942static int __devinit n2_mau_probe(struct of_device *dev,
1943 const struct of_device_id *match)
1944{
1945 struct mdesc_handle *mdesc;
1946 const char *full_name;
1947 struct n2_mau *mp;
1948 int err;
1949
1950 n2_spu_driver_version();
1951
David S. Millerff6c7342010-05-25 17:37:08 -07001952 full_name = dev->dev.of_node->full_name;
David S. Miller0a625fd2010-05-19 14:14:04 +10001953 pr_info("Found NCP at %s\n", full_name);
1954
1955 mp = alloc_ncp();
1956 if (!mp) {
1957 dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
1958 full_name);
1959 return -ENOMEM;
1960 }
1961
1962 err = grab_global_resources();
1963 if (err) {
1964 dev_err(&dev->dev, "%s: Unable to grab "
1965 "global resources.\n", full_name);
1966 goto out_free_ncp;
1967 }
1968
1969 mdesc = mdesc_grab();
1970
1971 if (!mdesc) {
1972 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
1973 full_name);
1974 err = -ENODEV;
1975 goto out_free_global;
1976 }
1977
1978 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
1979 if (err) {
1980 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
1981 full_name);
1982 mdesc_release(mdesc);
1983 goto out_free_global;
1984 }
1985
1986 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
1987 "mau", HV_NCS_QTYPE_MAU, mau_intr,
1988 cpu_to_mau);
1989 mdesc_release(mdesc);
1990
1991 if (err) {
1992 dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
1993 full_name);
1994 goto out_free_global;
1995 }
1996
1997 dev_set_drvdata(&dev->dev, mp);
1998
1999 return 0;
2000
2001out_free_global:
2002 release_global_resources();
2003
2004out_free_ncp:
2005 free_ncp(mp);
2006
2007 return err;
2008}
2009
2010static int __devexit n2_mau_remove(struct of_device *dev)
2011{
2012 struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2013
2014 spu_list_destroy(&mp->mau_list);
2015
2016 release_global_resources();
2017
2018 free_ncp(mp);
2019
2020 return 0;
2021}
2022
2023static struct of_device_id n2_crypto_match[] = {
2024 {
2025 .name = "n2cp",
2026 .compatible = "SUNW,n2-cwq",
2027 },
2028 {
2029 .name = "n2cp",
2030 .compatible = "SUNW,vf-cwq",
2031 },
2032 {},
2033};
2034
2035MODULE_DEVICE_TABLE(of, n2_crypto_match);
2036
2037static struct of_platform_driver n2_crypto_driver = {
David S. Millerff6c7342010-05-25 17:37:08 -07002038 .driver = {
2039 .name = "n2cp",
2040 .owner = THIS_MODULE,
2041 .of_match_table = n2_crypto_match,
2042 },
David S. Miller0a625fd2010-05-19 14:14:04 +10002043 .probe = n2_crypto_probe,
2044 .remove = __devexit_p(n2_crypto_remove),
2045};
2046
2047static struct of_device_id n2_mau_match[] = {
2048 {
2049 .name = "ncp",
2050 .compatible = "SUNW,n2-mau",
2051 },
2052 {
2053 .name = "ncp",
2054 .compatible = "SUNW,vf-mau",
2055 },
2056 {},
2057};
2058
2059MODULE_DEVICE_TABLE(of, n2_mau_match);
2060
2061static struct of_platform_driver n2_mau_driver = {
David S. Millerff6c7342010-05-25 17:37:08 -07002062 .driver = {
2063 .name = "ncp",
2064 .owner = THIS_MODULE,
2065 .of_match_table = n2_mau_match,
2066 },
David S. Miller0a625fd2010-05-19 14:14:04 +10002067 .probe = n2_mau_probe,
2068 .remove = __devexit_p(n2_mau_remove),
2069};
2070
2071static int __init n2_init(void)
2072{
2073 int err = of_register_driver(&n2_crypto_driver, &of_bus_type);
2074
2075 if (!err) {
2076 err = of_register_driver(&n2_mau_driver, &of_bus_type);
2077 if (err)
2078 of_unregister_driver(&n2_crypto_driver);
2079 }
2080 return err;
2081}
2082
2083static void __exit n2_exit(void)
2084{
2085 of_unregister_driver(&n2_mau_driver);
2086 of_unregister_driver(&n2_crypto_driver);
2087}
2088
2089module_init(n2_init);
2090module_exit(n2_exit);