blob: afd136b45f49b1fa0a933cd4a33412f830298bcb [file] [log] [blame]
David S. Miller0a625fd2010-05-19 14:14:04 +10001/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
2 *
David S. Millereb7caf32011-07-28 01:30:07 -07003 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
David S. Miller0a625fd2010-05-19 14:14:04 +10004 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_device.h>
12#include <linux/cpumask.h>
13#include <linux/slab.h>
14#include <linux/interrupt.h>
15#include <linux/crypto.h>
16#include <crypto/md5.h>
17#include <crypto/sha.h>
18#include <crypto/aes.h>
19#include <crypto/des.h>
20#include <linux/mutex.h>
21#include <linux/delay.h>
22#include <linux/sched.h>
23
24#include <crypto/internal/hash.h>
25#include <crypto/scatterwalk.h>
26#include <crypto/algapi.h>
27
28#include <asm/hypervisor.h>
29#include <asm/mdesc.h>
30
31#include "n2_core.h"
32
33#define DRV_MODULE_NAME "n2_crypto"
David S. Millereb7caf32011-07-28 01:30:07 -070034#define DRV_MODULE_VERSION "0.2"
35#define DRV_MODULE_RELDATE "July 28, 2011"
David S. Miller0a625fd2010-05-19 14:14:04 +100036
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -080037static char version[] =
David S. Miller0a625fd2010-05-19 14:14:04 +100038 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
39
40MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
41MODULE_DESCRIPTION("Niagara2 Crypto driver");
42MODULE_LICENSE("GPL");
43MODULE_VERSION(DRV_MODULE_VERSION);
44
David S. Miller10803622012-09-15 09:06:30 -070045#define N2_CRA_PRIORITY 200
David S. Miller0a625fd2010-05-19 14:14:04 +100046
47static DEFINE_MUTEX(spu_lock);
48
49struct spu_queue {
50 cpumask_t sharing;
51 unsigned long qhandle;
52
53 spinlock_t lock;
54 u8 q_type;
55 void *q;
56 unsigned long head;
57 unsigned long tail;
58 struct list_head jobs;
59
60 unsigned long devino;
61
62 char irq_name[32];
63 unsigned int irq;
64
65 struct list_head list;
66};
67
68static struct spu_queue **cpu_to_cwq;
69static struct spu_queue **cpu_to_mau;
70
71static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
72{
73 if (q->q_type == HV_NCS_QTYPE_MAU) {
74 off += MAU_ENTRY_SIZE;
75 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
76 off = 0;
77 } else {
78 off += CWQ_ENTRY_SIZE;
79 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
80 off = 0;
81 }
82 return off;
83}
84
85struct n2_request_common {
86 struct list_head entry;
87 unsigned int offset;
88};
89#define OFFSET_NOT_RUNNING (~(unsigned int)0)
90
91/* An async job request records the final tail value it used in
92 * n2_request_common->offset, test to see if that offset is in
93 * the range old_head, new_head, inclusive.
94 */
95static inline bool job_finished(struct spu_queue *q, unsigned int offset,
96 unsigned long old_head, unsigned long new_head)
97{
98 if (old_head <= new_head) {
99 if (offset > old_head && offset <= new_head)
100 return true;
101 } else {
102 if (offset > old_head || offset <= new_head)
103 return true;
104 }
105 return false;
106}
107
108/* When the HEAD marker is unequal to the actual HEAD, we get
109 * a virtual device INO interrupt. We should process the
110 * completed CWQ entries and adjust the HEAD marker to clear
111 * the IRQ.
112 */
113static irqreturn_t cwq_intr(int irq, void *dev_id)
114{
115 unsigned long off, new_head, hv_ret;
116 struct spu_queue *q = dev_id;
117
118 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
119 smp_processor_id(), q->qhandle);
120
121 spin_lock(&q->lock);
122
123 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
124
125 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
126 smp_processor_id(), new_head, hv_ret);
127
128 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
129 /* XXX ... XXX */
130 }
131
132 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
133 if (hv_ret == HV_EOK)
134 q->head = new_head;
135
136 spin_unlock(&q->lock);
137
138 return IRQ_HANDLED;
139}
140
141static irqreturn_t mau_intr(int irq, void *dev_id)
142{
143 struct spu_queue *q = dev_id;
144 unsigned long head, hv_ret;
145
146 spin_lock(&q->lock);
147
148 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
149 smp_processor_id(), q->qhandle);
150
151 hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
152
153 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
154 smp_processor_id(), head, hv_ret);
155
156 sun4v_ncs_sethead_marker(q->qhandle, head);
157
158 spin_unlock(&q->lock);
159
160 return IRQ_HANDLED;
161}
162
163static void *spu_queue_next(struct spu_queue *q, void *cur)
164{
165 return q->q + spu_next_offset(q, cur - q->q);
166}
167
168static int spu_queue_num_free(struct spu_queue *q)
169{
170 unsigned long head = q->head;
171 unsigned long tail = q->tail;
172 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
173 unsigned long diff;
174
175 if (head > tail)
176 diff = head - tail;
177 else
178 diff = (end - tail) + head;
179
180 return (diff / CWQ_ENTRY_SIZE) - 1;
181}
182
183static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
184{
185 int avail = spu_queue_num_free(q);
186
187 if (avail >= num_entries)
188 return q->q + q->tail;
189
190 return NULL;
191}
192
193static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
194{
195 unsigned long hv_ret, new_tail;
196
197 new_tail = spu_next_offset(q, last - q->q);
198
199 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
200 if (hv_ret == HV_EOK)
201 q->tail = new_tail;
202 return hv_ret;
203}
204
205static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
206 int enc_type, int auth_type,
207 unsigned int hash_len,
208 bool sfas, bool sob, bool eob, bool encrypt,
209 int opcode)
210{
211 u64 word = (len - 1) & CONTROL_LEN;
212
213 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
214 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
215 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
216 if (sfas)
217 word |= CONTROL_STORE_FINAL_AUTH_STATE;
218 if (sob)
219 word |= CONTROL_START_OF_BLOCK;
220 if (eob)
221 word |= CONTROL_END_OF_BLOCK;
222 if (encrypt)
223 word |= CONTROL_ENCRYPT;
224 if (hmac_key_len)
225 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
226 if (hash_len)
227 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
228
229 return word;
230}
231
232#if 0
233static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
234{
235 if (this_len >= 64 ||
236 qp->head != qp->tail)
237 return true;
238 return false;
239}
240#endif
241
David S. Miller3a2c0342010-05-22 02:45:56 -0700242struct n2_ahash_alg {
243 struct list_head entry;
244 const char *hash_zero;
245 const u32 *hash_init;
246 u8 hw_op_hashsz;
247 u8 digest_size;
248 u8 auth_type;
David S. Millerdc4ccfd2010-05-22 22:53:09 -0700249 u8 hmac_type;
David S. Miller3a2c0342010-05-22 02:45:56 -0700250 struct ahash_alg alg;
251};
252
253static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
254{
255 struct crypto_alg *alg = tfm->__crt_alg;
256 struct ahash_alg *ahash_alg;
257
258 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
259
260 return container_of(ahash_alg, struct n2_ahash_alg, alg);
261}
262
David S. Millerdc4ccfd2010-05-22 22:53:09 -0700263struct n2_hmac_alg {
264 const char *child_alg;
265 struct n2_ahash_alg derived;
266};
267
268static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
269{
270 struct crypto_alg *alg = tfm->__crt_alg;
271 struct ahash_alg *ahash_alg;
272
273 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
274
275 return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
276}
277
David S. Miller0a625fd2010-05-19 14:14:04 +1000278struct n2_hash_ctx {
David S. Millerc9aa55e2010-05-22 01:09:04 -0700279 struct crypto_ahash *fallback_tfm;
280};
David S. Miller0a625fd2010-05-19 14:14:04 +1000281
David S. Millerdc4ccfd2010-05-22 22:53:09 -0700282#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
283
284struct n2_hmac_ctx {
285 struct n2_hash_ctx base;
286
287 struct crypto_shash *child_shash;
288
289 int hash_key_len;
290 unsigned char hash_key[N2_HASH_KEY_MAX];
291};
292
David S. Millerc9aa55e2010-05-22 01:09:04 -0700293struct n2_hash_req_ctx {
David S. Miller0a625fd2010-05-19 14:14:04 +1000294 union {
295 struct md5_state md5;
296 struct sha1_state sha1;
297 struct sha256_state sha256;
298 } u;
299
David S. Millerc9aa55e2010-05-22 01:09:04 -0700300 struct ahash_request fallback_req;
David S. Miller0a625fd2010-05-19 14:14:04 +1000301};
302
303static int n2_hash_async_init(struct ahash_request *req)
304{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700305 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000306 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
307 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
308
David S. Millerc9aa55e2010-05-22 01:09:04 -0700309 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
310 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
David S. Miller0a625fd2010-05-19 14:14:04 +1000311
David S. Millerc9aa55e2010-05-22 01:09:04 -0700312 return crypto_ahash_init(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000313}
314
315static int n2_hash_async_update(struct ahash_request *req)
316{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700317 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000318 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
319 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
320
David S. Millerc9aa55e2010-05-22 01:09:04 -0700321 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
322 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
323 rctx->fallback_req.nbytes = req->nbytes;
324 rctx->fallback_req.src = req->src;
David S. Miller0a625fd2010-05-19 14:14:04 +1000325
David S. Millerc9aa55e2010-05-22 01:09:04 -0700326 return crypto_ahash_update(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000327}
328
329static int n2_hash_async_final(struct ahash_request *req)
330{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700331 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000332 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
333 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
334
David S. Millerc9aa55e2010-05-22 01:09:04 -0700335 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
336 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
337 rctx->fallback_req.result = req->result;
David S. Miller0a625fd2010-05-19 14:14:04 +1000338
David S. Millerc9aa55e2010-05-22 01:09:04 -0700339 return crypto_ahash_final(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000340}
341
342static int n2_hash_async_finup(struct ahash_request *req)
343{
David S. Millerc9aa55e2010-05-22 01:09:04 -0700344 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000345 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
346 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
347
David S. Millerc9aa55e2010-05-22 01:09:04 -0700348 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
349 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
350 rctx->fallback_req.nbytes = req->nbytes;
351 rctx->fallback_req.src = req->src;
352 rctx->fallback_req.result = req->result;
David S. Miller0a625fd2010-05-19 14:14:04 +1000353
David S. Millerc9aa55e2010-05-22 01:09:04 -0700354 return crypto_ahash_finup(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000355}
356
357static int n2_hash_cra_init(struct crypto_tfm *tfm)
358{
Marek Vasut5837af02014-05-14 11:41:01 +0200359 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
David S. Miller0a625fd2010-05-19 14:14:04 +1000360 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
361 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
362 struct crypto_ahash *fallback_tfm;
363 int err;
364
365 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
366 CRYPTO_ALG_NEED_FALLBACK);
367 if (IS_ERR(fallback_tfm)) {
368 pr_warning("Fallback driver '%s' could not be loaded!\n",
369 fallback_driver_name);
370 err = PTR_ERR(fallback_tfm);
371 goto out;
372 }
373
David S. Millerc9aa55e2010-05-22 01:09:04 -0700374 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
375 crypto_ahash_reqsize(fallback_tfm)));
376
377 ctx->fallback_tfm = fallback_tfm;
David S. Miller0a625fd2010-05-19 14:14:04 +1000378 return 0;
379
380out:
381 return err;
382}
383
384static void n2_hash_cra_exit(struct crypto_tfm *tfm)
385{
386 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
387 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
388
David S. Millerc9aa55e2010-05-22 01:09:04 -0700389 crypto_free_ahash(ctx->fallback_tfm);
David S. Miller0a625fd2010-05-19 14:14:04 +1000390}
391
David S. Millerdc4ccfd2010-05-22 22:53:09 -0700392static int n2_hmac_cra_init(struct crypto_tfm *tfm)
393{
Marek Vasut5837af02014-05-14 11:41:01 +0200394 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
David S. Millerdc4ccfd2010-05-22 22:53:09 -0700395 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
396 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
397 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
398 struct crypto_ahash *fallback_tfm;
399 struct crypto_shash *child_shash;
400 int err;
401
402 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
403 CRYPTO_ALG_NEED_FALLBACK);
404 if (IS_ERR(fallback_tfm)) {
405 pr_warning("Fallback driver '%s' could not be loaded!\n",
406 fallback_driver_name);
407 err = PTR_ERR(fallback_tfm);
408 goto out;
409 }
410
411 child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
412 if (IS_ERR(child_shash)) {
413 pr_warning("Child shash '%s' could not be loaded!\n",
414 n2alg->child_alg);
415 err = PTR_ERR(child_shash);
416 goto out_free_fallback;
417 }
418
419 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
420 crypto_ahash_reqsize(fallback_tfm)));
421
422 ctx->child_shash = child_shash;
423 ctx->base.fallback_tfm = fallback_tfm;
424 return 0;
425
426out_free_fallback:
427 crypto_free_ahash(fallback_tfm);
428
429out:
430 return err;
431}
432
433static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
434{
435 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
436 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
437
438 crypto_free_ahash(ctx->base.fallback_tfm);
439 crypto_free_shash(ctx->child_shash);
440}
441
442static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
443 unsigned int keylen)
444{
445 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
446 struct crypto_shash *child_shash = ctx->child_shash;
447 struct crypto_ahash *fallback_tfm;
Behan Websterce1f3e472014-04-04 18:18:00 -0300448 SHASH_DESC_ON_STACK(shash, child_shash);
David S. Millerdc4ccfd2010-05-22 22:53:09 -0700449 int err, bs, ds;
450
451 fallback_tfm = ctx->base.fallback_tfm;
452 err = crypto_ahash_setkey(fallback_tfm, key, keylen);
453 if (err)
454 return err;
455
Behan Websterce1f3e472014-04-04 18:18:00 -0300456 shash->tfm = child_shash;
457 shash->flags = crypto_ahash_get_flags(tfm) &
David S. Millerdc4ccfd2010-05-22 22:53:09 -0700458 CRYPTO_TFM_REQ_MAY_SLEEP;
459
460 bs = crypto_shash_blocksize(child_shash);
461 ds = crypto_shash_digestsize(child_shash);
462 BUG_ON(ds > N2_HASH_KEY_MAX);
463 if (keylen > bs) {
Behan Websterce1f3e472014-04-04 18:18:00 -0300464 err = crypto_shash_digest(shash, key, keylen,
David S. Millerdc4ccfd2010-05-22 22:53:09 -0700465 ctx->hash_key);
466 if (err)
467 return err;
468 keylen = ds;
469 } else if (keylen <= N2_HASH_KEY_MAX)
470 memcpy(ctx->hash_key, key, keylen);
471
472 ctx->hash_key_len = keylen;
473
474 return err;
475}
476
David S. Miller0a625fd2010-05-19 14:14:04 +1000477static unsigned long wait_for_tail(struct spu_queue *qp)
478{
479 unsigned long head, hv_ret;
480
481 do {
482 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
483 if (hv_ret != HV_EOK) {
484 pr_err("Hypervisor error on gethead\n");
485 break;
486 }
487 if (head == qp->tail) {
488 qp->head = head;
489 break;
490 }
491 } while (1);
492 return hv_ret;
493}
494
495static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
496 struct cwq_initial_entry *ent)
497{
498 unsigned long hv_ret = spu_queue_submit(qp, ent);
499
500 if (hv_ret == HV_EOK)
501 hv_ret = wait_for_tail(qp);
502
503 return hv_ret;
504}
505
David S. Miller3a2c0342010-05-22 02:45:56 -0700506static int n2_do_async_digest(struct ahash_request *req,
507 unsigned int auth_type, unsigned int digest_size,
David S. Millerdc4ccfd2010-05-22 22:53:09 -0700508 unsigned int result_size, void *hash_loc,
509 unsigned long auth_key, unsigned int auth_key_len)
David S. Miller0a625fd2010-05-19 14:14:04 +1000510{
511 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000512 struct cwq_initial_entry *ent;
513 struct crypto_hash_walk walk;
514 struct spu_queue *qp;
515 unsigned long flags;
516 int err = -ENODEV;
517 int nbytes, cpu;
518
519 /* The total effective length of the operation may not
520 * exceed 2^16.
521 */
522 if (unlikely(req->nbytes > (1 << 16))) {
David S. Millerc9aa55e2010-05-22 01:09:04 -0700523 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller65a23d62010-05-22 01:11:03 -0700524 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
David S. Miller0a625fd2010-05-19 14:14:04 +1000525
David S. Millerc9aa55e2010-05-22 01:09:04 -0700526 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
527 rctx->fallback_req.base.flags =
528 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
529 rctx->fallback_req.nbytes = req->nbytes;
530 rctx->fallback_req.src = req->src;
531 rctx->fallback_req.result = req->result;
532
533 return crypto_ahash_digest(&rctx->fallback_req);
David S. Miller0a625fd2010-05-19 14:14:04 +1000534 }
535
David S. Miller0a625fd2010-05-19 14:14:04 +1000536 nbytes = crypto_hash_walk_first(req, &walk);
537
538 cpu = get_cpu();
539 qp = cpu_to_cwq[cpu];
540 if (!qp)
541 goto out;
542
543 spin_lock_irqsave(&qp->lock, flags);
544
545 /* XXX can do better, improve this later by doing a by-hand scatterlist
546 * XXX walk, etc.
547 */
548 ent = qp->q + qp->tail;
549
David S. Millerdc4ccfd2010-05-22 22:53:09 -0700550 ent->control = control_word_base(nbytes, auth_key_len, 0,
David S. Miller0a625fd2010-05-19 14:14:04 +1000551 auth_type, digest_size,
552 false, true, false, false,
553 OPCODE_INPLACE_BIT |
554 OPCODE_AUTH_MAC);
555 ent->src_addr = __pa(walk.data);
David S. Millerdc4ccfd2010-05-22 22:53:09 -0700556 ent->auth_key_addr = auth_key;
David S. Miller0a625fd2010-05-19 14:14:04 +1000557 ent->auth_iv_addr = __pa(hash_loc);
558 ent->final_auth_state_addr = 0UL;
559 ent->enc_key_addr = 0UL;
560 ent->enc_iv_addr = 0UL;
561 ent->dest_addr = __pa(hash_loc);
562
563 nbytes = crypto_hash_walk_done(&walk, 0);
564 while (nbytes > 0) {
565 ent = spu_queue_next(qp, ent);
566
567 ent->control = (nbytes - 1);
568 ent->src_addr = __pa(walk.data);
569 ent->auth_key_addr = 0UL;
570 ent->auth_iv_addr = 0UL;
571 ent->final_auth_state_addr = 0UL;
572 ent->enc_key_addr = 0UL;
573 ent->enc_iv_addr = 0UL;
574 ent->dest_addr = 0UL;
575
576 nbytes = crypto_hash_walk_done(&walk, 0);
577 }
578 ent->control |= CONTROL_END_OF_BLOCK;
579
580 if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
581 err = -EINVAL;
582 else
583 err = 0;
584
585 spin_unlock_irqrestore(&qp->lock, flags);
586
587 if (!err)
588 memcpy(req->result, hash_loc, result_size);
589out:
590 put_cpu();
591
592 return err;
593}
594
David S. Miller3a2c0342010-05-22 02:45:56 -0700595static int n2_hash_async_digest(struct ahash_request *req)
David S. Miller0a625fd2010-05-19 14:14:04 +1000596{
David S. Miller3a2c0342010-05-22 02:45:56 -0700597 struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
David S. Millerc9aa55e2010-05-22 01:09:04 -0700598 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
David S. Miller3a2c0342010-05-22 02:45:56 -0700599 int ds;
David S. Miller0a625fd2010-05-19 14:14:04 +1000600
David S. Miller3a2c0342010-05-22 02:45:56 -0700601 ds = n2alg->digest_size;
David S. Miller0a625fd2010-05-19 14:14:04 +1000602 if (unlikely(req->nbytes == 0)) {
David S. Miller3a2c0342010-05-22 02:45:56 -0700603 memcpy(req->result, n2alg->hash_zero, ds);
David S. Miller0a625fd2010-05-19 14:14:04 +1000604 return 0;
605 }
David S. Miller3a2c0342010-05-22 02:45:56 -0700606 memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
David S. Miller0a625fd2010-05-19 14:14:04 +1000607
David S. Miller3a2c0342010-05-22 02:45:56 -0700608 return n2_do_async_digest(req, n2alg->auth_type,
609 n2alg->hw_op_hashsz, ds,
David S. Millerdc4ccfd2010-05-22 22:53:09 -0700610 &rctx->u, 0UL, 0);
611}
612
613static int n2_hmac_async_digest(struct ahash_request *req)
614{
615 struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
616 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
617 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
618 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
619 int ds;
620
621 ds = n2alg->derived.digest_size;
622 if (unlikely(req->nbytes == 0) ||
623 unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
624 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
625 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
626
627 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
628 rctx->fallback_req.base.flags =
629 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
630 rctx->fallback_req.nbytes = req->nbytes;
631 rctx->fallback_req.src = req->src;
632 rctx->fallback_req.result = req->result;
633
634 return crypto_ahash_digest(&rctx->fallback_req);
635 }
636 memcpy(&rctx->u, n2alg->derived.hash_init,
637 n2alg->derived.hw_op_hashsz);
638
639 return n2_do_async_digest(req, n2alg->derived.hmac_type,
640 n2alg->derived.hw_op_hashsz, ds,
641 &rctx->u,
642 __pa(&ctx->hash_key),
643 ctx->hash_key_len);
David S. Miller0a625fd2010-05-19 14:14:04 +1000644}
645
646struct n2_cipher_context {
647 int key_len;
648 int enc_type;
649 union {
650 u8 aes[AES_MAX_KEY_SIZE];
651 u8 des[DES_KEY_SIZE];
652 u8 des3[3 * DES_KEY_SIZE];
653 u8 arc4[258]; /* S-box, X, Y */
654 } key;
655};
656
657#define N2_CHUNK_ARR_LEN 16
658
659struct n2_crypto_chunk {
660 struct list_head entry;
661 unsigned long iv_paddr : 44;
662 unsigned long arr_len : 20;
663 unsigned long dest_paddr;
664 unsigned long dest_final;
665 struct {
666 unsigned long src_paddr : 44;
667 unsigned long src_len : 20;
668 } arr[N2_CHUNK_ARR_LEN];
669};
670
671struct n2_request_context {
672 struct ablkcipher_walk walk;
673 struct list_head chunk_list;
674 struct n2_crypto_chunk chunk;
675 u8 temp_iv[16];
676};
677
678/* The SPU allows some level of flexibility for partial cipher blocks
679 * being specified in a descriptor.
680 *
681 * It merely requires that every descriptor's length field is at least
682 * as large as the cipher block size. This means that a cipher block
683 * can span at most 2 descriptors. However, this does not allow a
684 * partial block to span into the final descriptor as that would
685 * violate the rule (since every descriptor's length must be at lest
686 * the block size). So, for example, assuming an 8 byte block size:
687 *
688 * 0xe --> 0xa --> 0x8
689 *
690 * is a valid length sequence, whereas:
691 *
692 * 0xe --> 0xb --> 0x7
693 *
694 * is not a valid sequence.
695 */
696
697struct n2_cipher_alg {
698 struct list_head entry;
699 u8 enc_type;
700 struct crypto_alg alg;
701};
702
703static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
704{
705 struct crypto_alg *alg = tfm->__crt_alg;
706
707 return container_of(alg, struct n2_cipher_alg, alg);
708}
709
710struct n2_cipher_request_context {
711 struct ablkcipher_walk walk;
712};
713
714static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
715 unsigned int keylen)
716{
717 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
718 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
719 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
720
721 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
722
723 switch (keylen) {
724 case AES_KEYSIZE_128:
725 ctx->enc_type |= ENC_TYPE_ALG_AES128;
726 break;
727 case AES_KEYSIZE_192:
728 ctx->enc_type |= ENC_TYPE_ALG_AES192;
729 break;
730 case AES_KEYSIZE_256:
731 ctx->enc_type |= ENC_TYPE_ALG_AES256;
732 break;
733 default:
734 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
735 return -EINVAL;
736 }
737
738 ctx->key_len = keylen;
739 memcpy(ctx->key.aes, key, keylen);
740 return 0;
741}
742
743static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
744 unsigned int keylen)
745{
746 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
747 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
748 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
749 u32 tmp[DES_EXPKEY_WORDS];
750 int err;
751
752 ctx->enc_type = n2alg->enc_type;
753
754 if (keylen != DES_KEY_SIZE) {
755 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
756 return -EINVAL;
757 }
758
759 err = des_ekey(tmp, key);
760 if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
761 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
762 return -EINVAL;
763 }
764
765 ctx->key_len = keylen;
766 memcpy(ctx->key.des, key, keylen);
767 return 0;
768}
769
770static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
771 unsigned int keylen)
772{
773 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
774 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
775 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
776
777 ctx->enc_type = n2alg->enc_type;
778
779 if (keylen != (3 * DES_KEY_SIZE)) {
780 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
781 return -EINVAL;
782 }
783 ctx->key_len = keylen;
784 memcpy(ctx->key.des3, key, keylen);
785 return 0;
786}
787
788static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
789 unsigned int keylen)
790{
791 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
792 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
793 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
794 u8 *s = ctx->key.arc4;
795 u8 *x = s + 256;
796 u8 *y = x + 1;
797 int i, j, k;
798
799 ctx->enc_type = n2alg->enc_type;
800
801 j = k = 0;
802 *x = 0;
803 *y = 0;
804 for (i = 0; i < 256; i++)
805 s[i] = i;
806 for (i = 0; i < 256; i++) {
807 u8 a = s[i];
808 j = (j + key[k] + a) & 0xff;
809 s[i] = s[j];
810 s[j] = a;
811 if (++k >= keylen)
812 k = 0;
813 }
814
815 return 0;
816}
817
818static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
819{
820 int this_len = nbytes;
821
822 this_len -= (nbytes & (block_size - 1));
823 return this_len > (1 << 16) ? (1 << 16) : this_len;
824}
825
826static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
827 struct spu_queue *qp, bool encrypt)
828{
829 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
830 struct cwq_initial_entry *ent;
831 bool in_place;
832 int i;
833
834 ent = spu_queue_alloc(qp, cp->arr_len);
835 if (!ent) {
836 pr_info("queue_alloc() of %d fails\n",
837 cp->arr_len);
838 return -EBUSY;
839 }
840
841 in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
842
843 ent->control = control_word_base(cp->arr[0].src_len,
844 0, ctx->enc_type, 0, 0,
845 false, true, false, encrypt,
846 OPCODE_ENCRYPT |
847 (in_place ? OPCODE_INPLACE_BIT : 0));
848 ent->src_addr = cp->arr[0].src_paddr;
849 ent->auth_key_addr = 0UL;
850 ent->auth_iv_addr = 0UL;
851 ent->final_auth_state_addr = 0UL;
852 ent->enc_key_addr = __pa(&ctx->key);
853 ent->enc_iv_addr = cp->iv_paddr;
854 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
855
856 for (i = 1; i < cp->arr_len; i++) {
857 ent = spu_queue_next(qp, ent);
858
859 ent->control = cp->arr[i].src_len - 1;
860 ent->src_addr = cp->arr[i].src_paddr;
861 ent->auth_key_addr = 0UL;
862 ent->auth_iv_addr = 0UL;
863 ent->final_auth_state_addr = 0UL;
864 ent->enc_key_addr = 0UL;
865 ent->enc_iv_addr = 0UL;
866 ent->dest_addr = 0UL;
867 }
868 ent->control |= CONTROL_END_OF_BLOCK;
869
870 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
871}
872
873static int n2_compute_chunks(struct ablkcipher_request *req)
874{
875 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
876 struct ablkcipher_walk *walk = &rctx->walk;
877 struct n2_crypto_chunk *chunk;
878 unsigned long dest_prev;
879 unsigned int tot_len;
880 bool prev_in_place;
881 int err, nbytes;
882
883 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
884 err = ablkcipher_walk_phys(req, walk);
885 if (err)
886 return err;
887
888 INIT_LIST_HEAD(&rctx->chunk_list);
889
890 chunk = &rctx->chunk;
891 INIT_LIST_HEAD(&chunk->entry);
892
893 chunk->iv_paddr = 0UL;
894 chunk->arr_len = 0;
895 chunk->dest_paddr = 0UL;
896
897 prev_in_place = false;
898 dest_prev = ~0UL;
899 tot_len = 0;
900
901 while ((nbytes = walk->nbytes) != 0) {
902 unsigned long dest_paddr, src_paddr;
903 bool in_place;
904 int this_len;
905
906 src_paddr = (page_to_phys(walk->src.page) +
907 walk->src.offset);
908 dest_paddr = (page_to_phys(walk->dst.page) +
909 walk->dst.offset);
910 in_place = (src_paddr == dest_paddr);
911 this_len = cipher_descriptor_len(nbytes, walk->blocksize);
912
913 if (chunk->arr_len != 0) {
914 if (in_place != prev_in_place ||
915 (!prev_in_place &&
916 dest_paddr != dest_prev) ||
917 chunk->arr_len == N2_CHUNK_ARR_LEN ||
918 tot_len + this_len > (1 << 16)) {
919 chunk->dest_final = dest_prev;
920 list_add_tail(&chunk->entry,
921 &rctx->chunk_list);
922 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
923 if (!chunk) {
924 err = -ENOMEM;
925 break;
926 }
927 INIT_LIST_HEAD(&chunk->entry);
928 }
929 }
930 if (chunk->arr_len == 0) {
931 chunk->dest_paddr = dest_paddr;
932 tot_len = 0;
933 }
934 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
935 chunk->arr[chunk->arr_len].src_len = this_len;
936 chunk->arr_len++;
937
938 dest_prev = dest_paddr + this_len;
939 prev_in_place = in_place;
940 tot_len += this_len;
941
942 err = ablkcipher_walk_done(req, walk, nbytes - this_len);
943 if (err)
944 break;
945 }
946 if (!err && chunk->arr_len != 0) {
947 chunk->dest_final = dest_prev;
948 list_add_tail(&chunk->entry, &rctx->chunk_list);
949 }
950
951 return err;
952}
953
954static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
955{
956 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
957 struct n2_crypto_chunk *c, *tmp;
958
959 if (final_iv)
960 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
961
962 ablkcipher_walk_complete(&rctx->walk);
963 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
964 list_del(&c->entry);
965 if (unlikely(c != &rctx->chunk))
966 kfree(c);
967 }
968
969}
970
971static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
972{
973 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
974 struct crypto_tfm *tfm = req->base.tfm;
975 int err = n2_compute_chunks(req);
976 struct n2_crypto_chunk *c, *tmp;
977 unsigned long flags, hv_ret;
978 struct spu_queue *qp;
979
980 if (err)
981 return err;
982
983 qp = cpu_to_cwq[get_cpu()];
984 err = -ENODEV;
985 if (!qp)
986 goto out;
987
988 spin_lock_irqsave(&qp->lock, flags);
989
990 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
991 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
992 if (err)
993 break;
994 list_del(&c->entry);
995 if (unlikely(c != &rctx->chunk))
996 kfree(c);
997 }
998 if (!err) {
999 hv_ret = wait_for_tail(qp);
1000 if (hv_ret != HV_EOK)
1001 err = -EINVAL;
1002 }
1003
1004 spin_unlock_irqrestore(&qp->lock, flags);
1005
Thomas Meyere27303b2011-08-15 15:20:19 +08001006out:
David S. Miller0a625fd2010-05-19 14:14:04 +10001007 put_cpu();
1008
David S. Miller0a625fd2010-05-19 14:14:04 +10001009 n2_chunk_complete(req, NULL);
1010 return err;
1011}
1012
1013static int n2_encrypt_ecb(struct ablkcipher_request *req)
1014{
1015 return n2_do_ecb(req, true);
1016}
1017
1018static int n2_decrypt_ecb(struct ablkcipher_request *req)
1019{
1020 return n2_do_ecb(req, false);
1021}
1022
1023static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
1024{
1025 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
1026 struct crypto_tfm *tfm = req->base.tfm;
1027 unsigned long flags, hv_ret, iv_paddr;
1028 int err = n2_compute_chunks(req);
1029 struct n2_crypto_chunk *c, *tmp;
1030 struct spu_queue *qp;
1031 void *final_iv_addr;
1032
1033 final_iv_addr = NULL;
1034
1035 if (err)
1036 return err;
1037
1038 qp = cpu_to_cwq[get_cpu()];
1039 err = -ENODEV;
1040 if (!qp)
1041 goto out;
1042
1043 spin_lock_irqsave(&qp->lock, flags);
1044
1045 if (encrypt) {
1046 iv_paddr = __pa(rctx->walk.iv);
1047 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1048 entry) {
1049 c->iv_paddr = iv_paddr;
1050 err = __n2_crypt_chunk(tfm, c, qp, true);
1051 if (err)
1052 break;
1053 iv_paddr = c->dest_final - rctx->walk.blocksize;
1054 list_del(&c->entry);
1055 if (unlikely(c != &rctx->chunk))
1056 kfree(c);
1057 }
1058 final_iv_addr = __va(iv_paddr);
1059 } else {
1060 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1061 entry) {
1062 if (c == &rctx->chunk) {
1063 iv_paddr = __pa(rctx->walk.iv);
1064 } else {
1065 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1066 tmp->arr[tmp->arr_len-1].src_len -
1067 rctx->walk.blocksize);
1068 }
1069 if (!final_iv_addr) {
1070 unsigned long pa;
1071
1072 pa = (c->arr[c->arr_len-1].src_paddr +
1073 c->arr[c->arr_len-1].src_len -
1074 rctx->walk.blocksize);
1075 final_iv_addr = rctx->temp_iv;
1076 memcpy(rctx->temp_iv, __va(pa),
1077 rctx->walk.blocksize);
1078 }
1079 c->iv_paddr = iv_paddr;
1080 err = __n2_crypt_chunk(tfm, c, qp, false);
1081 if (err)
1082 break;
1083 list_del(&c->entry);
1084 if (unlikely(c != &rctx->chunk))
1085 kfree(c);
1086 }
1087 }
1088 if (!err) {
1089 hv_ret = wait_for_tail(qp);
1090 if (hv_ret != HV_EOK)
1091 err = -EINVAL;
1092 }
1093
1094 spin_unlock_irqrestore(&qp->lock, flags);
1095
Thomas Meyere27303b2011-08-15 15:20:19 +08001096out:
David S. Miller0a625fd2010-05-19 14:14:04 +10001097 put_cpu();
1098
David S. Miller0a625fd2010-05-19 14:14:04 +10001099 n2_chunk_complete(req, err ? NULL : final_iv_addr);
1100 return err;
1101}
1102
1103static int n2_encrypt_chaining(struct ablkcipher_request *req)
1104{
1105 return n2_do_chaining(req, true);
1106}
1107
1108static int n2_decrypt_chaining(struct ablkcipher_request *req)
1109{
1110 return n2_do_chaining(req, false);
1111}
1112
1113struct n2_cipher_tmpl {
1114 const char *name;
1115 const char *drv_name;
1116 u8 block_size;
1117 u8 enc_type;
1118 struct ablkcipher_alg ablkcipher;
1119};
1120
1121static const struct n2_cipher_tmpl cipher_tmpls[] = {
1122 /* ARC4: only ECB is supported (chaining bits ignored) */
1123 { .name = "ecb(arc4)",
1124 .drv_name = "ecb-arc4",
1125 .block_size = 1,
1126 .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
1127 ENC_TYPE_CHAINING_ECB),
1128 .ablkcipher = {
1129 .min_keysize = 1,
1130 .max_keysize = 256,
1131 .setkey = n2_arc4_setkey,
1132 .encrypt = n2_encrypt_ecb,
1133 .decrypt = n2_decrypt_ecb,
1134 },
1135 },
1136
1137 /* DES: ECB CBC and CFB are supported */
1138 { .name = "ecb(des)",
1139 .drv_name = "ecb-des",
1140 .block_size = DES_BLOCK_SIZE,
1141 .enc_type = (ENC_TYPE_ALG_DES |
1142 ENC_TYPE_CHAINING_ECB),
1143 .ablkcipher = {
1144 .min_keysize = DES_KEY_SIZE,
1145 .max_keysize = DES_KEY_SIZE,
1146 .setkey = n2_des_setkey,
1147 .encrypt = n2_encrypt_ecb,
1148 .decrypt = n2_decrypt_ecb,
1149 },
1150 },
1151 { .name = "cbc(des)",
1152 .drv_name = "cbc-des",
1153 .block_size = DES_BLOCK_SIZE,
1154 .enc_type = (ENC_TYPE_ALG_DES |
1155 ENC_TYPE_CHAINING_CBC),
1156 .ablkcipher = {
1157 .ivsize = DES_BLOCK_SIZE,
1158 .min_keysize = DES_KEY_SIZE,
1159 .max_keysize = DES_KEY_SIZE,
1160 .setkey = n2_des_setkey,
1161 .encrypt = n2_encrypt_chaining,
1162 .decrypt = n2_decrypt_chaining,
1163 },
1164 },
1165 { .name = "cfb(des)",
1166 .drv_name = "cfb-des",
1167 .block_size = DES_BLOCK_SIZE,
1168 .enc_type = (ENC_TYPE_ALG_DES |
1169 ENC_TYPE_CHAINING_CFB),
1170 .ablkcipher = {
1171 .min_keysize = DES_KEY_SIZE,
1172 .max_keysize = DES_KEY_SIZE,
1173 .setkey = n2_des_setkey,
1174 .encrypt = n2_encrypt_chaining,
1175 .decrypt = n2_decrypt_chaining,
1176 },
1177 },
1178
1179 /* 3DES: ECB CBC and CFB are supported */
1180 { .name = "ecb(des3_ede)",
1181 .drv_name = "ecb-3des",
1182 .block_size = DES_BLOCK_SIZE,
1183 .enc_type = (ENC_TYPE_ALG_3DES |
1184 ENC_TYPE_CHAINING_ECB),
1185 .ablkcipher = {
1186 .min_keysize = 3 * DES_KEY_SIZE,
1187 .max_keysize = 3 * DES_KEY_SIZE,
1188 .setkey = n2_3des_setkey,
1189 .encrypt = n2_encrypt_ecb,
1190 .decrypt = n2_decrypt_ecb,
1191 },
1192 },
1193 { .name = "cbc(des3_ede)",
1194 .drv_name = "cbc-3des",
1195 .block_size = DES_BLOCK_SIZE,
1196 .enc_type = (ENC_TYPE_ALG_3DES |
1197 ENC_TYPE_CHAINING_CBC),
1198 .ablkcipher = {
1199 .ivsize = DES_BLOCK_SIZE,
1200 .min_keysize = 3 * DES_KEY_SIZE,
1201 .max_keysize = 3 * DES_KEY_SIZE,
1202 .setkey = n2_3des_setkey,
1203 .encrypt = n2_encrypt_chaining,
1204 .decrypt = n2_decrypt_chaining,
1205 },
1206 },
1207 { .name = "cfb(des3_ede)",
1208 .drv_name = "cfb-3des",
1209 .block_size = DES_BLOCK_SIZE,
1210 .enc_type = (ENC_TYPE_ALG_3DES |
1211 ENC_TYPE_CHAINING_CFB),
1212 .ablkcipher = {
1213 .min_keysize = 3 * DES_KEY_SIZE,
1214 .max_keysize = 3 * DES_KEY_SIZE,
1215 .setkey = n2_3des_setkey,
1216 .encrypt = n2_encrypt_chaining,
1217 .decrypt = n2_decrypt_chaining,
1218 },
1219 },
1220 /* AES: ECB CBC and CTR are supported */
1221 { .name = "ecb(aes)",
1222 .drv_name = "ecb-aes",
1223 .block_size = AES_BLOCK_SIZE,
1224 .enc_type = (ENC_TYPE_ALG_AES128 |
1225 ENC_TYPE_CHAINING_ECB),
1226 .ablkcipher = {
1227 .min_keysize = AES_MIN_KEY_SIZE,
1228 .max_keysize = AES_MAX_KEY_SIZE,
1229 .setkey = n2_aes_setkey,
1230 .encrypt = n2_encrypt_ecb,
1231 .decrypt = n2_decrypt_ecb,
1232 },
1233 },
1234 { .name = "cbc(aes)",
1235 .drv_name = "cbc-aes",
1236 .block_size = AES_BLOCK_SIZE,
1237 .enc_type = (ENC_TYPE_ALG_AES128 |
1238 ENC_TYPE_CHAINING_CBC),
1239 .ablkcipher = {
1240 .ivsize = AES_BLOCK_SIZE,
1241 .min_keysize = AES_MIN_KEY_SIZE,
1242 .max_keysize = AES_MAX_KEY_SIZE,
1243 .setkey = n2_aes_setkey,
1244 .encrypt = n2_encrypt_chaining,
1245 .decrypt = n2_decrypt_chaining,
1246 },
1247 },
1248 { .name = "ctr(aes)",
1249 .drv_name = "ctr-aes",
1250 .block_size = AES_BLOCK_SIZE,
1251 .enc_type = (ENC_TYPE_ALG_AES128 |
1252 ENC_TYPE_CHAINING_COUNTER),
1253 .ablkcipher = {
1254 .ivsize = AES_BLOCK_SIZE,
1255 .min_keysize = AES_MIN_KEY_SIZE,
1256 .max_keysize = AES_MAX_KEY_SIZE,
1257 .setkey = n2_aes_setkey,
1258 .encrypt = n2_encrypt_chaining,
1259 .decrypt = n2_encrypt_chaining,
1260 },
1261 },
1262
1263};
1264#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
1265
1266static LIST_HEAD(cipher_algs);
1267
1268struct n2_hash_tmpl {
1269 const char *name;
David S. Miller3a2c0342010-05-22 02:45:56 -07001270 const char *hash_zero;
1271 const u32 *hash_init;
1272 u8 hw_op_hashsz;
David S. Miller0a625fd2010-05-19 14:14:04 +10001273 u8 digest_size;
1274 u8 block_size;
David S. Miller3a2c0342010-05-22 02:45:56 -07001275 u8 auth_type;
David S. Millerdc4ccfd2010-05-22 22:53:09 -07001276 u8 hmac_type;
David S. Miller0a625fd2010-05-19 14:14:04 +10001277};
David S. Miller3a2c0342010-05-22 02:45:56 -07001278
1279static const char md5_zero[MD5_DIGEST_SIZE] = {
1280 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
1281 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
1282};
1283static const u32 md5_init[MD5_HASH_WORDS] = {
1284 cpu_to_le32(0x67452301),
1285 cpu_to_le32(0xefcdab89),
1286 cpu_to_le32(0x98badcfe),
1287 cpu_to_le32(0x10325476),
1288};
1289static const char sha1_zero[SHA1_DIGEST_SIZE] = {
1290 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
1291 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
1292 0x07, 0x09
1293};
1294static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
1295 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1296};
1297static const char sha256_zero[SHA256_DIGEST_SIZE] = {
1298 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
1299 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
1300 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
1301 0x1b, 0x78, 0x52, 0xb8, 0x55
1302};
1303static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
1304 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1305 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1306};
1307static const char sha224_zero[SHA224_DIGEST_SIZE] = {
1308 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
1309 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
1310 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
1311 0x2f
1312};
1313static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
1314 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1315 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1316};
1317
David S. Miller0a625fd2010-05-19 14:14:04 +10001318static const struct n2_hash_tmpl hash_tmpls[] = {
1319 { .name = "md5",
David S. Miller3a2c0342010-05-22 02:45:56 -07001320 .hash_zero = md5_zero,
1321 .hash_init = md5_init,
1322 .auth_type = AUTH_TYPE_MD5,
David S. Millerdc4ccfd2010-05-22 22:53:09 -07001323 .hmac_type = AUTH_TYPE_HMAC_MD5,
David S. Miller3a2c0342010-05-22 02:45:56 -07001324 .hw_op_hashsz = MD5_DIGEST_SIZE,
David S. Miller0a625fd2010-05-19 14:14:04 +10001325 .digest_size = MD5_DIGEST_SIZE,
1326 .block_size = MD5_HMAC_BLOCK_SIZE },
1327 { .name = "sha1",
David S. Miller3a2c0342010-05-22 02:45:56 -07001328 .hash_zero = sha1_zero,
1329 .hash_init = sha1_init,
1330 .auth_type = AUTH_TYPE_SHA1,
David S. Millerdc4ccfd2010-05-22 22:53:09 -07001331 .hmac_type = AUTH_TYPE_HMAC_SHA1,
David S. Miller3a2c0342010-05-22 02:45:56 -07001332 .hw_op_hashsz = SHA1_DIGEST_SIZE,
David S. Miller0a625fd2010-05-19 14:14:04 +10001333 .digest_size = SHA1_DIGEST_SIZE,
1334 .block_size = SHA1_BLOCK_SIZE },
1335 { .name = "sha256",
David S. Miller3a2c0342010-05-22 02:45:56 -07001336 .hash_zero = sha256_zero,
1337 .hash_init = sha256_init,
1338 .auth_type = AUTH_TYPE_SHA256,
David S. Millerdc4ccfd2010-05-22 22:53:09 -07001339 .hmac_type = AUTH_TYPE_HMAC_SHA256,
David S. Miller3a2c0342010-05-22 02:45:56 -07001340 .hw_op_hashsz = SHA256_DIGEST_SIZE,
David S. Miller0a625fd2010-05-19 14:14:04 +10001341 .digest_size = SHA256_DIGEST_SIZE,
1342 .block_size = SHA256_BLOCK_SIZE },
1343 { .name = "sha224",
David S. Miller3a2c0342010-05-22 02:45:56 -07001344 .hash_zero = sha224_zero,
1345 .hash_init = sha224_init,
1346 .auth_type = AUTH_TYPE_SHA256,
David S. Millerdc4ccfd2010-05-22 22:53:09 -07001347 .hmac_type = AUTH_TYPE_RESERVED,
David S. Miller3a2c0342010-05-22 02:45:56 -07001348 .hw_op_hashsz = SHA256_DIGEST_SIZE,
David S. Miller0a625fd2010-05-19 14:14:04 +10001349 .digest_size = SHA224_DIGEST_SIZE,
1350 .block_size = SHA224_BLOCK_SIZE },
1351};
1352#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1353
David S. Miller0a625fd2010-05-19 14:14:04 +10001354static LIST_HEAD(ahash_algs);
David S. Millerdc4ccfd2010-05-22 22:53:09 -07001355static LIST_HEAD(hmac_algs);
David S. Miller0a625fd2010-05-19 14:14:04 +10001356
1357static int algs_registered;
1358
1359static void __n2_unregister_algs(void)
1360{
1361 struct n2_cipher_alg *cipher, *cipher_tmp;
1362 struct n2_ahash_alg *alg, *alg_tmp;
David S. Millerdc4ccfd2010-05-22 22:53:09 -07001363 struct n2_hmac_alg *hmac, *hmac_tmp;
David S. Miller0a625fd2010-05-19 14:14:04 +10001364
1365 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
1366 crypto_unregister_alg(&cipher->alg);
1367 list_del(&cipher->entry);
1368 kfree(cipher);
1369 }
David S. Millerdc4ccfd2010-05-22 22:53:09 -07001370 list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1371 crypto_unregister_ahash(&hmac->derived.alg);
1372 list_del(&hmac->derived.entry);
1373 kfree(hmac);
1374 }
David S. Miller0a625fd2010-05-19 14:14:04 +10001375 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1376 crypto_unregister_ahash(&alg->alg);
1377 list_del(&alg->entry);
1378 kfree(alg);
1379 }
1380}
1381
1382static int n2_cipher_cra_init(struct crypto_tfm *tfm)
1383{
1384 tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
1385 return 0;
1386}
1387
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001388static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
David S. Miller0a625fd2010-05-19 14:14:04 +10001389{
1390 struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1391 struct crypto_alg *alg;
1392 int err;
1393
1394 if (!p)
1395 return -ENOMEM;
1396
1397 alg = &p->alg;
1398
1399 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1400 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1401 alg->cra_priority = N2_CRA_PRIORITY;
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001402 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1403 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
David S. Miller0a625fd2010-05-19 14:14:04 +10001404 alg->cra_blocksize = tmpl->block_size;
1405 p->enc_type = tmpl->enc_type;
1406 alg->cra_ctxsize = sizeof(struct n2_cipher_context);
1407 alg->cra_type = &crypto_ablkcipher_type;
1408 alg->cra_u.ablkcipher = tmpl->ablkcipher;
1409 alg->cra_init = n2_cipher_cra_init;
1410 alg->cra_module = THIS_MODULE;
1411
1412 list_add(&p->entry, &cipher_algs);
1413 err = crypto_register_alg(alg);
1414 if (err) {
David S. Miller38511102010-05-19 23:16:05 -07001415 pr_err("%s alg registration failed\n", alg->cra_name);
David S. Miller0a625fd2010-05-19 14:14:04 +10001416 list_del(&p->entry);
1417 kfree(p);
David S. Miller38511102010-05-19 23:16:05 -07001418 } else {
1419 pr_info("%s alg registered\n", alg->cra_name);
David S. Miller0a625fd2010-05-19 14:14:04 +10001420 }
1421 return err;
1422}
1423
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001424static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
David S. Millerdc4ccfd2010-05-22 22:53:09 -07001425{
1426 struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1427 struct ahash_alg *ahash;
1428 struct crypto_alg *base;
1429 int err;
1430
1431 if (!p)
1432 return -ENOMEM;
1433
1434 p->child_alg = n2ahash->alg.halg.base.cra_name;
1435 memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1436 INIT_LIST_HEAD(&p->derived.entry);
1437
1438 ahash = &p->derived.alg;
1439 ahash->digest = n2_hmac_async_digest;
1440 ahash->setkey = n2_hmac_async_setkey;
1441
1442 base = &ahash->halg.base;
1443 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1444 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1445
1446 base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1447 base->cra_init = n2_hmac_cra_init;
1448 base->cra_exit = n2_hmac_cra_exit;
1449
1450 list_add(&p->derived.entry, &hmac_algs);
1451 err = crypto_register_ahash(ahash);
1452 if (err) {
1453 pr_err("%s alg registration failed\n", base->cra_name);
1454 list_del(&p->derived.entry);
1455 kfree(p);
1456 } else {
1457 pr_info("%s alg registered\n", base->cra_name);
1458 }
1459 return err;
1460}
1461
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001462static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
David S. Miller0a625fd2010-05-19 14:14:04 +10001463{
1464 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1465 struct hash_alg_common *halg;
1466 struct crypto_alg *base;
1467 struct ahash_alg *ahash;
1468 int err;
1469
1470 if (!p)
1471 return -ENOMEM;
1472
David S. Miller3a2c0342010-05-22 02:45:56 -07001473 p->hash_zero = tmpl->hash_zero;
1474 p->hash_init = tmpl->hash_init;
1475 p->auth_type = tmpl->auth_type;
David S. Millerdc4ccfd2010-05-22 22:53:09 -07001476 p->hmac_type = tmpl->hmac_type;
David S. Miller3a2c0342010-05-22 02:45:56 -07001477 p->hw_op_hashsz = tmpl->hw_op_hashsz;
1478 p->digest_size = tmpl->digest_size;
1479
David S. Miller0a625fd2010-05-19 14:14:04 +10001480 ahash = &p->alg;
1481 ahash->init = n2_hash_async_init;
1482 ahash->update = n2_hash_async_update;
1483 ahash->final = n2_hash_async_final;
1484 ahash->finup = n2_hash_async_finup;
David S. Miller3a2c0342010-05-22 02:45:56 -07001485 ahash->digest = n2_hash_async_digest;
David S. Miller0a625fd2010-05-19 14:14:04 +10001486
1487 halg = &ahash->halg;
1488 halg->digestsize = tmpl->digest_size;
1489
1490 base = &halg->base;
1491 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1492 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1493 base->cra_priority = N2_CRA_PRIORITY;
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001494 base->cra_flags = CRYPTO_ALG_TYPE_AHASH |
1495 CRYPTO_ALG_KERN_DRIVER_ONLY |
1496 CRYPTO_ALG_NEED_FALLBACK;
David S. Miller0a625fd2010-05-19 14:14:04 +10001497 base->cra_blocksize = tmpl->block_size;
1498 base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1499 base->cra_module = THIS_MODULE;
1500 base->cra_init = n2_hash_cra_init;
1501 base->cra_exit = n2_hash_cra_exit;
1502
1503 list_add(&p->entry, &ahash_algs);
1504 err = crypto_register_ahash(ahash);
1505 if (err) {
David S. Miller38511102010-05-19 23:16:05 -07001506 pr_err("%s alg registration failed\n", base->cra_name);
David S. Miller0a625fd2010-05-19 14:14:04 +10001507 list_del(&p->entry);
1508 kfree(p);
David S. Miller38511102010-05-19 23:16:05 -07001509 } else {
1510 pr_info("%s alg registered\n", base->cra_name);
David S. Miller0a625fd2010-05-19 14:14:04 +10001511 }
David S. Millerdc4ccfd2010-05-22 22:53:09 -07001512 if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1513 err = __n2_register_one_hmac(p);
David S. Miller0a625fd2010-05-19 14:14:04 +10001514 return err;
1515}
1516
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001517static int n2_register_algs(void)
David S. Miller0a625fd2010-05-19 14:14:04 +10001518{
1519 int i, err = 0;
1520
1521 mutex_lock(&spu_lock);
1522 if (algs_registered++)
1523 goto out;
1524
1525 for (i = 0; i < NUM_HASH_TMPLS; i++) {
1526 err = __n2_register_one_ahash(&hash_tmpls[i]);
1527 if (err) {
1528 __n2_unregister_algs();
1529 goto out;
1530 }
1531 }
1532 for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1533 err = __n2_register_one_cipher(&cipher_tmpls[i]);
1534 if (err) {
1535 __n2_unregister_algs();
1536 goto out;
1537 }
1538 }
1539
1540out:
1541 mutex_unlock(&spu_lock);
1542 return err;
1543}
1544
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001545static void n2_unregister_algs(void)
David S. Miller0a625fd2010-05-19 14:14:04 +10001546{
1547 mutex_lock(&spu_lock);
1548 if (!--algs_registered)
1549 __n2_unregister_algs();
1550 mutex_unlock(&spu_lock);
1551}
1552
1553/* To map CWQ queues to interrupt sources, the hypervisor API provides
1554 * a devino. This isn't very useful to us because all of the
Grant Likely2dc11582010-08-06 09:25:50 -06001555 * interrupts listed in the device_node have been translated to
David S. Miller0a625fd2010-05-19 14:14:04 +10001556 * Linux virtual IRQ cookie numbers.
1557 *
1558 * So we have to back-translate, going through the 'intr' and 'ino'
1559 * property tables of the n2cp MDESC node, matching it with the OF
1560 * 'interrupts' property entries, in order to to figure out which
1561 * devino goes to which already-translated IRQ.
1562 */
Grant Likely2dc11582010-08-06 09:25:50 -06001563static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
David S. Miller0a625fd2010-05-19 14:14:04 +10001564 unsigned long dev_ino)
1565{
1566 const unsigned int *dev_intrs;
1567 unsigned int intr;
1568 int i;
1569
1570 for (i = 0; i < ip->num_intrs; i++) {
1571 if (ip->ino_table[i].ino == dev_ino)
1572 break;
1573 }
1574 if (i == ip->num_intrs)
1575 return -ENODEV;
1576
1577 intr = ip->ino_table[i].intr;
1578
David S. Millerff6c7342010-05-25 17:37:08 -07001579 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
David S. Miller0a625fd2010-05-19 14:14:04 +10001580 if (!dev_intrs)
1581 return -ENODEV;
1582
Grant Likely19e48752010-08-08 00:23:26 -06001583 for (i = 0; i < dev->archdata.num_irqs; i++) {
David S. Miller0a625fd2010-05-19 14:14:04 +10001584 if (dev_intrs[i] == intr)
1585 return i;
1586 }
1587
1588 return -ENODEV;
1589}
1590
Grant Likely2dc11582010-08-06 09:25:50 -06001591static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
David S. Miller0a625fd2010-05-19 14:14:04 +10001592 const char *irq_name, struct spu_queue *p,
1593 irq_handler_t handler)
1594{
1595 unsigned long herr;
1596 int index;
1597
1598 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1599 if (herr)
1600 return -EINVAL;
1601
1602 index = find_devino_index(dev, ip, p->devino);
1603 if (index < 0)
1604 return index;
1605
Grant Likely19e48752010-08-08 00:23:26 -06001606 p->irq = dev->archdata.irqs[index];
David S. Miller0a625fd2010-05-19 14:14:04 +10001607
1608 sprintf(p->irq_name, "%s-%d", irq_name, index);
1609
Theodore Ts'o9751bfd2012-07-17 13:42:34 -04001610 return request_irq(p->irq, handler, 0, p->irq_name, p);
David S. Miller0a625fd2010-05-19 14:14:04 +10001611}
1612
1613static struct kmem_cache *queue_cache[2];
1614
1615static void *new_queue(unsigned long q_type)
1616{
1617 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1618}
1619
1620static void free_queue(void *p, unsigned long q_type)
1621{
1622 return kmem_cache_free(queue_cache[q_type - 1], p);
1623}
1624
1625static int queue_cache_init(void)
1626{
1627 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1628 queue_cache[HV_NCS_QTYPE_MAU - 1] =
David S. Miller527b9522010-05-22 00:50:12 -07001629 kmem_cache_create("mau_queue",
David S. Miller0a625fd2010-05-19 14:14:04 +10001630 (MAU_NUM_ENTRIES *
1631 MAU_ENTRY_SIZE),
1632 MAU_ENTRY_SIZE, 0, NULL);
1633 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1634 return -ENOMEM;
1635
1636 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1637 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1638 kmem_cache_create("cwq_queue",
1639 (CWQ_NUM_ENTRIES *
1640 CWQ_ENTRY_SIZE),
1641 CWQ_ENTRY_SIZE, 0, NULL);
1642 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1643 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1644 return -ENOMEM;
1645 }
1646 return 0;
1647}
1648
1649static void queue_cache_destroy(void)
1650{
1651 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1652 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1653}
1654
1655static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1656{
1657 cpumask_var_t old_allowed;
1658 unsigned long hv_ret;
1659
1660 if (cpumask_empty(&p->sharing))
1661 return -EINVAL;
1662
1663 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
1664 return -ENOMEM;
1665
1666 cpumask_copy(old_allowed, &current->cpus_allowed);
1667
1668 set_cpus_allowed_ptr(current, &p->sharing);
1669
1670 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1671 CWQ_NUM_ENTRIES, &p->qhandle);
1672 if (!hv_ret)
1673 sun4v_ncs_sethead_marker(p->qhandle, 0);
1674
1675 set_cpus_allowed_ptr(current, old_allowed);
1676
1677 free_cpumask_var(old_allowed);
1678
1679 return (hv_ret ? -EINVAL : 0);
1680}
1681
1682static int spu_queue_setup(struct spu_queue *p)
1683{
1684 int err;
1685
1686 p->q = new_queue(p->q_type);
1687 if (!p->q)
1688 return -ENOMEM;
1689
1690 err = spu_queue_register(p, p->q_type);
1691 if (err) {
1692 free_queue(p->q, p->q_type);
1693 p->q = NULL;
1694 }
1695
1696 return err;
1697}
1698
1699static void spu_queue_destroy(struct spu_queue *p)
1700{
1701 unsigned long hv_ret;
1702
1703 if (!p->q)
1704 return;
1705
1706 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1707
1708 if (!hv_ret)
1709 free_queue(p->q, p->q_type);
1710}
1711
1712static void spu_list_destroy(struct list_head *list)
1713{
1714 struct spu_queue *p, *n;
1715
1716 list_for_each_entry_safe(p, n, list, list) {
1717 int i;
1718
1719 for (i = 0; i < NR_CPUS; i++) {
1720 if (cpu_to_cwq[i] == p)
1721 cpu_to_cwq[i] = NULL;
1722 }
1723
1724 if (p->irq) {
1725 free_irq(p->irq, p);
1726 p->irq = 0;
1727 }
1728 spu_queue_destroy(p);
1729 list_del(&p->list);
1730 kfree(p);
1731 }
1732}
1733
1734/* Walk the backward arcs of a CWQ 'exec-unit' node,
1735 * gathering cpu membership information.
1736 */
1737static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
Grant Likely2dc11582010-08-06 09:25:50 -06001738 struct platform_device *dev,
David S. Miller0a625fd2010-05-19 14:14:04 +10001739 u64 node, struct spu_queue *p,
1740 struct spu_queue **table)
1741{
1742 u64 arc;
1743
1744 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1745 u64 tgt = mdesc_arc_target(mdesc, arc);
1746 const char *name = mdesc_node_name(mdesc, tgt);
1747 const u64 *id;
1748
1749 if (strcmp(name, "cpu"))
1750 continue;
1751 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1752 if (table[*id] != NULL) {
1753 dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
David S. Millerff6c7342010-05-25 17:37:08 -07001754 dev->dev.of_node->full_name);
David S. Miller0a625fd2010-05-19 14:14:04 +10001755 return -EINVAL;
1756 }
1757 cpu_set(*id, p->sharing);
1758 table[*id] = p;
1759 }
1760 return 0;
1761}
1762
1763/* Process an 'exec-unit' MDESC node of type 'cwq'. */
1764static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
Grant Likely2dc11582010-08-06 09:25:50 -06001765 struct platform_device *dev, struct mdesc_handle *mdesc,
David S. Miller0a625fd2010-05-19 14:14:04 +10001766 u64 node, const char *iname, unsigned long q_type,
1767 irq_handler_t handler, struct spu_queue **table)
1768{
1769 struct spu_queue *p;
1770 int err;
1771
1772 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1773 if (!p) {
1774 dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
David S. Millerff6c7342010-05-25 17:37:08 -07001775 dev->dev.of_node->full_name);
David S. Miller0a625fd2010-05-19 14:14:04 +10001776 return -ENOMEM;
1777 }
1778
1779 cpus_clear(p->sharing);
1780 spin_lock_init(&p->lock);
1781 p->q_type = q_type;
1782 INIT_LIST_HEAD(&p->jobs);
1783 list_add(&p->list, list);
1784
1785 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1786 if (err)
1787 return err;
1788
1789 err = spu_queue_setup(p);
1790 if (err)
1791 return err;
1792
1793 return spu_map_ino(dev, ip, iname, p, handler);
1794}
1795
Grant Likely2dc11582010-08-06 09:25:50 -06001796static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
David S. Miller0a625fd2010-05-19 14:14:04 +10001797 struct spu_mdesc_info *ip, struct list_head *list,
1798 const char *exec_name, unsigned long q_type,
1799 irq_handler_t handler, struct spu_queue **table)
1800{
1801 int err = 0;
1802 u64 node;
1803
1804 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1805 const char *type;
1806
1807 type = mdesc_get_property(mdesc, node, "type", NULL);
1808 if (!type || strcmp(type, exec_name))
1809 continue;
1810
1811 err = handle_exec_unit(ip, list, dev, mdesc, node,
1812 exec_name, q_type, handler, table);
1813 if (err) {
1814 spu_list_destroy(list);
1815 break;
1816 }
1817 }
1818
1819 return err;
1820}
1821
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001822static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1823 struct spu_mdesc_info *ip)
David S. Miller0a625fd2010-05-19 14:14:04 +10001824{
David S. Millereb7caf32011-07-28 01:30:07 -07001825 const u64 *ino;
1826 int ino_len;
David S. Miller0a625fd2010-05-19 14:14:04 +10001827 int i;
1828
David S. Miller0a625fd2010-05-19 14:14:04 +10001829 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
David S. Millereb7caf32011-07-28 01:30:07 -07001830 if (!ino) {
1831 printk("NO 'ino'\n");
David S. Miller0a625fd2010-05-19 14:14:04 +10001832 return -ENODEV;
David S. Millereb7caf32011-07-28 01:30:07 -07001833 }
David S. Miller0a625fd2010-05-19 14:14:04 +10001834
David S. Millereb7caf32011-07-28 01:30:07 -07001835 ip->num_intrs = ino_len / sizeof(u64);
David S. Miller0a625fd2010-05-19 14:14:04 +10001836 ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1837 ip->num_intrs),
1838 GFP_KERNEL);
1839 if (!ip->ino_table)
1840 return -ENOMEM;
1841
1842 for (i = 0; i < ip->num_intrs; i++) {
1843 struct ino_blob *b = &ip->ino_table[i];
David S. Millereb7caf32011-07-28 01:30:07 -07001844 b->intr = i + 1;
David S. Miller0a625fd2010-05-19 14:14:04 +10001845 b->ino = ino[i];
1846 }
1847
1848 return 0;
1849}
1850
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001851static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1852 struct platform_device *dev,
1853 struct spu_mdesc_info *ip,
1854 const char *node_name)
David S. Miller0a625fd2010-05-19 14:14:04 +10001855{
1856 const unsigned int *reg;
1857 u64 node;
1858
David S. Millerff6c7342010-05-25 17:37:08 -07001859 reg = of_get_property(dev->dev.of_node, "reg", NULL);
David S. Miller0a625fd2010-05-19 14:14:04 +10001860 if (!reg)
1861 return -ENODEV;
1862
1863 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1864 const char *name;
1865 const u64 *chdl;
1866
1867 name = mdesc_get_property(mdesc, node, "name", NULL);
1868 if (!name || strcmp(name, node_name))
1869 continue;
1870 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1871 if (!chdl || (*chdl != *reg))
1872 continue;
1873 ip->cfg_handle = *chdl;
1874 return get_irq_props(mdesc, node, ip);
1875 }
1876
1877 return -ENODEV;
1878}
1879
1880static unsigned long n2_spu_hvapi_major;
1881static unsigned long n2_spu_hvapi_minor;
1882
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001883static int n2_spu_hvapi_register(void)
David S. Miller0a625fd2010-05-19 14:14:04 +10001884{
1885 int err;
1886
1887 n2_spu_hvapi_major = 2;
1888 n2_spu_hvapi_minor = 0;
1889
1890 err = sun4v_hvapi_register(HV_GRP_NCS,
1891 n2_spu_hvapi_major,
1892 &n2_spu_hvapi_minor);
1893
1894 if (!err)
1895 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1896 n2_spu_hvapi_major,
1897 n2_spu_hvapi_minor);
1898
1899 return err;
1900}
1901
1902static void n2_spu_hvapi_unregister(void)
1903{
1904 sun4v_hvapi_unregister(HV_GRP_NCS);
1905}
1906
1907static int global_ref;
1908
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001909static int grab_global_resources(void)
David S. Miller0a625fd2010-05-19 14:14:04 +10001910{
1911 int err = 0;
1912
1913 mutex_lock(&spu_lock);
1914
1915 if (global_ref++)
1916 goto out;
1917
1918 err = n2_spu_hvapi_register();
1919 if (err)
1920 goto out;
1921
1922 err = queue_cache_init();
1923 if (err)
1924 goto out_hvapi_release;
1925
1926 err = -ENOMEM;
1927 cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1928 GFP_KERNEL);
1929 if (!cpu_to_cwq)
1930 goto out_queue_cache_destroy;
1931
1932 cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1933 GFP_KERNEL);
1934 if (!cpu_to_mau)
1935 goto out_free_cwq_table;
1936
1937 err = 0;
1938
1939out:
1940 if (err)
1941 global_ref--;
1942 mutex_unlock(&spu_lock);
1943 return err;
1944
1945out_free_cwq_table:
1946 kfree(cpu_to_cwq);
1947 cpu_to_cwq = NULL;
1948
1949out_queue_cache_destroy:
1950 queue_cache_destroy();
1951
1952out_hvapi_release:
1953 n2_spu_hvapi_unregister();
1954 goto out;
1955}
1956
1957static void release_global_resources(void)
1958{
1959 mutex_lock(&spu_lock);
1960 if (!--global_ref) {
1961 kfree(cpu_to_cwq);
1962 cpu_to_cwq = NULL;
1963
1964 kfree(cpu_to_mau);
1965 cpu_to_mau = NULL;
1966
1967 queue_cache_destroy();
1968 n2_spu_hvapi_unregister();
1969 }
1970 mutex_unlock(&spu_lock);
1971}
1972
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001973static struct n2_crypto *alloc_n2cp(void)
David S. Miller0a625fd2010-05-19 14:14:04 +10001974{
1975 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1976
1977 if (np)
1978 INIT_LIST_HEAD(&np->cwq_list);
1979
1980 return np;
1981}
1982
1983static void free_n2cp(struct n2_crypto *np)
1984{
1985 if (np->cwq_info.ino_table) {
1986 kfree(np->cwq_info.ino_table);
1987 np->cwq_info.ino_table = NULL;
1988 }
1989
1990 kfree(np);
1991}
1992
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001993static void n2_spu_driver_version(void)
David S. Miller0a625fd2010-05-19 14:14:04 +10001994{
1995 static int n2_spu_version_printed;
1996
1997 if (n2_spu_version_printed++ == 0)
1998 pr_info("%s", version);
1999}
2000
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002001static int n2_crypto_probe(struct platform_device *dev)
David S. Miller0a625fd2010-05-19 14:14:04 +10002002{
2003 struct mdesc_handle *mdesc;
2004 const char *full_name;
2005 struct n2_crypto *np;
2006 int err;
2007
2008 n2_spu_driver_version();
2009
David S. Millerff6c7342010-05-25 17:37:08 -07002010 full_name = dev->dev.of_node->full_name;
David S. Miller0a625fd2010-05-19 14:14:04 +10002011 pr_info("Found N2CP at %s\n", full_name);
2012
2013 np = alloc_n2cp();
2014 if (!np) {
2015 dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
2016 full_name);
2017 return -ENOMEM;
2018 }
2019
2020 err = grab_global_resources();
2021 if (err) {
2022 dev_err(&dev->dev, "%s: Unable to grab "
2023 "global resources.\n", full_name);
2024 goto out_free_n2cp;
2025 }
2026
2027 mdesc = mdesc_grab();
2028
2029 if (!mdesc) {
2030 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
2031 full_name);
2032 err = -ENODEV;
2033 goto out_free_global;
2034 }
2035 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
2036 if (err) {
2037 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
2038 full_name);
2039 mdesc_release(mdesc);
2040 goto out_free_global;
2041 }
2042
2043 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
2044 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
2045 cpu_to_cwq);
2046 mdesc_release(mdesc);
2047
2048 if (err) {
2049 dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
2050 full_name);
2051 goto out_free_global;
2052 }
2053
2054 err = n2_register_algs();
2055 if (err) {
2056 dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
2057 full_name);
2058 goto out_free_spu_list;
2059 }
2060
2061 dev_set_drvdata(&dev->dev, np);
2062
2063 return 0;
2064
2065out_free_spu_list:
2066 spu_list_destroy(&np->cwq_list);
2067
2068out_free_global:
2069 release_global_resources();
2070
2071out_free_n2cp:
2072 free_n2cp(np);
2073
2074 return err;
2075}
2076
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002077static int n2_crypto_remove(struct platform_device *dev)
David S. Miller0a625fd2010-05-19 14:14:04 +10002078{
2079 struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2080
2081 n2_unregister_algs();
2082
2083 spu_list_destroy(&np->cwq_list);
2084
2085 release_global_resources();
2086
2087 free_n2cp(np);
2088
2089 return 0;
2090}
2091
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002092static struct n2_mau *alloc_ncp(void)
David S. Miller0a625fd2010-05-19 14:14:04 +10002093{
2094 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2095
2096 if (mp)
2097 INIT_LIST_HEAD(&mp->mau_list);
2098
2099 return mp;
2100}
2101
2102static void free_ncp(struct n2_mau *mp)
2103{
2104 if (mp->mau_info.ino_table) {
2105 kfree(mp->mau_info.ino_table);
2106 mp->mau_info.ino_table = NULL;
2107 }
2108
2109 kfree(mp);
2110}
2111
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002112static int n2_mau_probe(struct platform_device *dev)
David S. Miller0a625fd2010-05-19 14:14:04 +10002113{
2114 struct mdesc_handle *mdesc;
2115 const char *full_name;
2116 struct n2_mau *mp;
2117 int err;
2118
2119 n2_spu_driver_version();
2120
David S. Millerff6c7342010-05-25 17:37:08 -07002121 full_name = dev->dev.of_node->full_name;
David S. Miller0a625fd2010-05-19 14:14:04 +10002122 pr_info("Found NCP at %s\n", full_name);
2123
2124 mp = alloc_ncp();
2125 if (!mp) {
2126 dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
2127 full_name);
2128 return -ENOMEM;
2129 }
2130
2131 err = grab_global_resources();
2132 if (err) {
2133 dev_err(&dev->dev, "%s: Unable to grab "
2134 "global resources.\n", full_name);
2135 goto out_free_ncp;
2136 }
2137
2138 mdesc = mdesc_grab();
2139
2140 if (!mdesc) {
2141 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
2142 full_name);
2143 err = -ENODEV;
2144 goto out_free_global;
2145 }
2146
2147 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2148 if (err) {
2149 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
2150 full_name);
2151 mdesc_release(mdesc);
2152 goto out_free_global;
2153 }
2154
2155 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2156 "mau", HV_NCS_QTYPE_MAU, mau_intr,
2157 cpu_to_mau);
2158 mdesc_release(mdesc);
2159
2160 if (err) {
2161 dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
2162 full_name);
2163 goto out_free_global;
2164 }
2165
2166 dev_set_drvdata(&dev->dev, mp);
2167
2168 return 0;
2169
2170out_free_global:
2171 release_global_resources();
2172
2173out_free_ncp:
2174 free_ncp(mp);
2175
2176 return err;
2177}
2178
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002179static int n2_mau_remove(struct platform_device *dev)
David S. Miller0a625fd2010-05-19 14:14:04 +10002180{
2181 struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2182
2183 spu_list_destroy(&mp->mau_list);
2184
2185 release_global_resources();
2186
2187 free_ncp(mp);
2188
2189 return 0;
2190}
2191
2192static struct of_device_id n2_crypto_match[] = {
2193 {
2194 .name = "n2cp",
2195 .compatible = "SUNW,n2-cwq",
2196 },
2197 {
2198 .name = "n2cp",
2199 .compatible = "SUNW,vf-cwq",
2200 },
David S. Millereb7caf32011-07-28 01:30:07 -07002201 {
2202 .name = "n2cp",
2203 .compatible = "SUNW,kt-cwq",
2204 },
David S. Miller0a625fd2010-05-19 14:14:04 +10002205 {},
2206};
2207
2208MODULE_DEVICE_TABLE(of, n2_crypto_match);
2209
Grant Likely4ebb24f2011-02-22 20:01:33 -07002210static struct platform_driver n2_crypto_driver = {
David S. Millerff6c7342010-05-25 17:37:08 -07002211 .driver = {
2212 .name = "n2cp",
David S. Millerff6c7342010-05-25 17:37:08 -07002213 .of_match_table = n2_crypto_match,
2214 },
David S. Miller0a625fd2010-05-19 14:14:04 +10002215 .probe = n2_crypto_probe,
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002216 .remove = n2_crypto_remove,
David S. Miller0a625fd2010-05-19 14:14:04 +10002217};
2218
2219static struct of_device_id n2_mau_match[] = {
2220 {
2221 .name = "ncp",
2222 .compatible = "SUNW,n2-mau",
2223 },
2224 {
2225 .name = "ncp",
2226 .compatible = "SUNW,vf-mau",
2227 },
David S. Millereb7caf32011-07-28 01:30:07 -07002228 {
2229 .name = "ncp",
2230 .compatible = "SUNW,kt-mau",
2231 },
David S. Miller0a625fd2010-05-19 14:14:04 +10002232 {},
2233};
2234
2235MODULE_DEVICE_TABLE(of, n2_mau_match);
2236
Grant Likely4ebb24f2011-02-22 20:01:33 -07002237static struct platform_driver n2_mau_driver = {
David S. Millerff6c7342010-05-25 17:37:08 -07002238 .driver = {
2239 .name = "ncp",
David S. Millerff6c7342010-05-25 17:37:08 -07002240 .of_match_table = n2_mau_match,
2241 },
David S. Miller0a625fd2010-05-19 14:14:04 +10002242 .probe = n2_mau_probe,
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002243 .remove = n2_mau_remove,
David S. Miller0a625fd2010-05-19 14:14:04 +10002244};
2245
2246static int __init n2_init(void)
2247{
Grant Likely4ebb24f2011-02-22 20:01:33 -07002248 int err = platform_driver_register(&n2_crypto_driver);
David S. Miller0a625fd2010-05-19 14:14:04 +10002249
2250 if (!err) {
Grant Likely4ebb24f2011-02-22 20:01:33 -07002251 err = platform_driver_register(&n2_mau_driver);
David S. Miller0a625fd2010-05-19 14:14:04 +10002252 if (err)
Grant Likely4ebb24f2011-02-22 20:01:33 -07002253 platform_driver_unregister(&n2_crypto_driver);
David S. Miller0a625fd2010-05-19 14:14:04 +10002254 }
2255 return err;
2256}
2257
2258static void __exit n2_exit(void)
2259{
Grant Likely4ebb24f2011-02-22 20:01:33 -07002260 platform_driver_unregister(&n2_mau_driver);
2261 platform_driver_unregister(&n2_crypto_driver);
David S. Miller0a625fd2010-05-19 14:14:04 +10002262}
2263
2264module_init(n2_init);
2265module_exit(n2_exit);