blob: c6e6171eb6d31f764ab8e944174323f3a8ee5006 [file] [log] [blame]
Tom Lendacky63b94502013-11-12 11:46:16 -06001/*
2 * AMD Cryptographic Coprocessor (CCP) driver
3 *
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/kthread.h>
16#include <linux/sched.h>
17#include <linux/interrupt.h>
18#include <linux/spinlock.h>
19#include <linux/mutex.h>
20#include <linux/delay.h>
21#include <linux/hw_random.h>
22#include <linux/cpu.h>
Tom Lendackyc4f4b322014-06-05 10:17:57 -050023#ifdef CONFIG_X86
Tom Lendacky63b94502013-11-12 11:46:16 -060024#include <asm/cpu_device_id.h>
Tom Lendackyc4f4b322014-06-05 10:17:57 -050025#endif
Tom Lendacky63b94502013-11-12 11:46:16 -060026#include <linux/ccp.h>
27
28#include "ccp-dev.h"
29
30MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
31MODULE_LICENSE("GPL");
32MODULE_VERSION("1.0.0");
33MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
34
Tom Lendacky530abd82014-01-24 16:18:14 -060035struct ccp_tasklet_data {
36 struct completion completion;
37 struct ccp_cmd *cmd;
38};
39
Tom Lendacky63b94502013-11-12 11:46:16 -060040
41static struct ccp_device *ccp_dev;
42static inline struct ccp_device *ccp_get_device(void)
43{
44 return ccp_dev;
45}
46
47static inline void ccp_add_device(struct ccp_device *ccp)
48{
49 ccp_dev = ccp;
50}
51
52static inline void ccp_del_device(struct ccp_device *ccp)
53{
54 ccp_dev = NULL;
55}
56
57/**
Tom Lendackyc9f21cb2014-09-05 10:31:09 -050058 * ccp_present - check if a CCP device is present
59 *
60 * Returns zero if a CCP device is present, -ENODEV otherwise.
61 */
62int ccp_present(void)
63{
64 if (ccp_get_device())
65 return 0;
66
67 return -ENODEV;
68}
69EXPORT_SYMBOL_GPL(ccp_present);
70
71/**
Tom Lendacky63b94502013-11-12 11:46:16 -060072 * ccp_enqueue_cmd - queue an operation for processing by the CCP
73 *
74 * @cmd: ccp_cmd struct to be processed
75 *
76 * Queue a cmd to be processed by the CCP. If queueing the cmd
77 * would exceed the defined length of the cmd queue the cmd will
78 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
79 * result in a return code of -EBUSY.
80 *
81 * The callback routine specified in the ccp_cmd struct will be
82 * called to notify the caller of completion (if the cmd was not
83 * backlogged) or advancement out of the backlog. If the cmd has
84 * advanced out of the backlog the "err" value of the callback
85 * will be -EINPROGRESS. Any other "err" value during callback is
86 * the result of the operation.
87 *
88 * The cmd has been successfully queued if:
89 * the return code is -EINPROGRESS or
90 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
91 */
92int ccp_enqueue_cmd(struct ccp_cmd *cmd)
93{
94 struct ccp_device *ccp = ccp_get_device();
95 unsigned long flags;
96 unsigned int i;
97 int ret;
98
99 if (!ccp)
100 return -ENODEV;
101
102 /* Caller must supply a callback routine */
103 if (!cmd->callback)
104 return -EINVAL;
105
106 cmd->ccp = ccp;
107
108 spin_lock_irqsave(&ccp->cmd_lock, flags);
109
110 i = ccp->cmd_q_count;
111
112 if (ccp->cmd_count >= MAX_CMD_QLEN) {
113 ret = -EBUSY;
114 if (cmd->flags & CCP_CMD_MAY_BACKLOG)
115 list_add_tail(&cmd->entry, &ccp->backlog);
116 } else {
117 ret = -EINPROGRESS;
118 ccp->cmd_count++;
119 list_add_tail(&cmd->entry, &ccp->cmd);
120
121 /* Find an idle queue */
122 if (!ccp->suspending) {
123 for (i = 0; i < ccp->cmd_q_count; i++) {
124 if (ccp->cmd_q[i].active)
125 continue;
126
127 break;
128 }
129 }
130 }
131
132 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
133
134 /* If we found an idle queue, wake it up */
135 if (i < ccp->cmd_q_count)
136 wake_up_process(ccp->cmd_q[i].kthread);
137
138 return ret;
139}
140EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
141
142static void ccp_do_cmd_backlog(struct work_struct *work)
143{
144 struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
145 struct ccp_device *ccp = cmd->ccp;
146 unsigned long flags;
147 unsigned int i;
148
149 cmd->callback(cmd->data, -EINPROGRESS);
150
151 spin_lock_irqsave(&ccp->cmd_lock, flags);
152
153 ccp->cmd_count++;
154 list_add_tail(&cmd->entry, &ccp->cmd);
155
156 /* Find an idle queue */
157 for (i = 0; i < ccp->cmd_q_count; i++) {
158 if (ccp->cmd_q[i].active)
159 continue;
160
161 break;
162 }
163
164 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
165
166 /* If we found an idle queue, wake it up */
167 if (i < ccp->cmd_q_count)
168 wake_up_process(ccp->cmd_q[i].kthread);
169}
170
171static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
172{
173 struct ccp_device *ccp = cmd_q->ccp;
174 struct ccp_cmd *cmd = NULL;
175 struct ccp_cmd *backlog = NULL;
176 unsigned long flags;
177
178 spin_lock_irqsave(&ccp->cmd_lock, flags);
179
180 cmd_q->active = 0;
181
182 if (ccp->suspending) {
183 cmd_q->suspended = 1;
184
185 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
186 wake_up_interruptible(&ccp->suspend_queue);
187
188 return NULL;
189 }
190
191 if (ccp->cmd_count) {
192 cmd_q->active = 1;
193
194 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
195 list_del(&cmd->entry);
196
197 ccp->cmd_count--;
198 }
199
200 if (!list_empty(&ccp->backlog)) {
201 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
202 entry);
203 list_del(&backlog->entry);
204 }
205
206 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
207
208 if (backlog) {
209 INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
210 schedule_work(&backlog->work);
211 }
212
213 return cmd;
214}
215
Tom Lendacky530abd82014-01-24 16:18:14 -0600216static void ccp_do_cmd_complete(unsigned long data)
Tom Lendacky63b94502013-11-12 11:46:16 -0600217{
Tom Lendacky530abd82014-01-24 16:18:14 -0600218 struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
219 struct ccp_cmd *cmd = tdata->cmd;
Tom Lendacky63b94502013-11-12 11:46:16 -0600220
221 cmd->callback(cmd->data, cmd->ret);
Tom Lendacky530abd82014-01-24 16:18:14 -0600222 complete(&tdata->completion);
Tom Lendacky63b94502013-11-12 11:46:16 -0600223}
224
225static int ccp_cmd_queue_thread(void *data)
226{
227 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
228 struct ccp_cmd *cmd;
Tom Lendacky530abd82014-01-24 16:18:14 -0600229 struct ccp_tasklet_data tdata;
230 struct tasklet_struct tasklet;
231
232 tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
Tom Lendacky63b94502013-11-12 11:46:16 -0600233
234 set_current_state(TASK_INTERRUPTIBLE);
235 while (!kthread_should_stop()) {
236 schedule();
237
238 set_current_state(TASK_INTERRUPTIBLE);
239
240 cmd = ccp_dequeue_cmd(cmd_q);
241 if (!cmd)
242 continue;
243
244 __set_current_state(TASK_RUNNING);
245
246 /* Execute the command */
247 cmd->ret = ccp_run_cmd(cmd_q, cmd);
248
249 /* Schedule the completion callback */
Tom Lendacky530abd82014-01-24 16:18:14 -0600250 tdata.cmd = cmd;
251 init_completion(&tdata.completion);
252 tasklet_schedule(&tasklet);
253 wait_for_completion(&tdata.completion);
Tom Lendacky63b94502013-11-12 11:46:16 -0600254 }
255
256 __set_current_state(TASK_RUNNING);
257
258 return 0;
259}
260
261static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
262{
263 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
264 u32 trng_value;
265 int len = min_t(int, sizeof(trng_value), max);
266
267 /*
268 * Locking is provided by the caller so we can update device
269 * hwrng-related fields safely
270 */
271 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
272 if (!trng_value) {
273 /* Zero is returned if not data is available or if a
274 * bad-entropy error is present. Assume an error if
275 * we exceed TRNG_RETRIES reads of zero.
276 */
277 if (ccp->hwrng_retries++ > TRNG_RETRIES)
278 return -EIO;
279
280 return 0;
281 }
282
283 /* Reset the counter and save the rng value */
284 ccp->hwrng_retries = 0;
285 memcpy(data, &trng_value, len);
286
287 return len;
288}
289
290/**
291 * ccp_alloc_struct - allocate and initialize the ccp_device struct
292 *
293 * @dev: device struct of the CCP
294 */
295struct ccp_device *ccp_alloc_struct(struct device *dev)
296{
297 struct ccp_device *ccp;
298
299 ccp = kzalloc(sizeof(*ccp), GFP_KERNEL);
300 if (ccp == NULL) {
301 dev_err(dev, "unable to allocate device struct\n");
302 return NULL;
303 }
304 ccp->dev = dev;
305
306 INIT_LIST_HEAD(&ccp->cmd);
307 INIT_LIST_HEAD(&ccp->backlog);
308
309 spin_lock_init(&ccp->cmd_lock);
310 mutex_init(&ccp->req_mutex);
311 mutex_init(&ccp->ksb_mutex);
312 ccp->ksb_count = KSB_COUNT;
313 ccp->ksb_start = 0;
314
315 return ccp;
316}
317
318/**
319 * ccp_init - initialize the CCP device
320 *
321 * @ccp: ccp_device struct
322 */
323int ccp_init(struct ccp_device *ccp)
324{
325 struct device *dev = ccp->dev;
326 struct ccp_cmd_queue *cmd_q;
327 struct dma_pool *dma_pool;
328 char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
329 unsigned int qmr, qim, i;
330 int ret;
331
332 /* Find available queues */
333 qim = 0;
334 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
335 for (i = 0; i < MAX_HW_QUEUES; i++) {
336 if (!(qmr & (1 << i)))
337 continue;
338
339 /* Allocate a dma pool for this queue */
340 snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
341 dma_pool = dma_pool_create(dma_pool_name, dev,
342 CCP_DMAPOOL_MAX_SIZE,
343 CCP_DMAPOOL_ALIGN, 0);
344 if (!dma_pool) {
345 dev_err(dev, "unable to allocate dma pool\n");
346 ret = -ENOMEM;
347 goto e_pool;
348 }
349
350 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
351 ccp->cmd_q_count++;
352
353 cmd_q->ccp = ccp;
354 cmd_q->id = i;
355 cmd_q->dma_pool = dma_pool;
356
357 /* Reserve 2 KSB regions for the queue */
358 cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
359 cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
360 ccp->ksb_count -= 2;
361
362 /* Preset some register values and masks that are queue
363 * number dependent
364 */
365 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
366 (CMD_Q_STATUS_INCR * i);
367 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
368 (CMD_Q_STATUS_INCR * i);
369 cmd_q->int_ok = 1 << (i * 2);
370 cmd_q->int_err = 1 << ((i * 2) + 1);
371
372 cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
373
374 init_waitqueue_head(&cmd_q->int_queue);
375
376 /* Build queue interrupt mask (two interrupts per queue) */
377 qim |= cmd_q->int_ok | cmd_q->int_err;
378
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500379#ifdef CONFIG_ARM64
380 /* For arm64 set the recommended queue cache settings */
Tom Lendacky126ae9a2014-07-10 10:58:35 -0500381 iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500382 (CMD_Q_CACHE_INC * i));
383#endif
384
Tom Lendacky63b94502013-11-12 11:46:16 -0600385 dev_dbg(dev, "queue #%u available\n", i);
386 }
387 if (ccp->cmd_q_count == 0) {
388 dev_notice(dev, "no command queues available\n");
389 ret = -EIO;
390 goto e_pool;
391 }
392 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
393
394 /* Disable and clear interrupts until ready */
395 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
396 for (i = 0; i < ccp->cmd_q_count; i++) {
397 cmd_q = &ccp->cmd_q[i];
398
399 ioread32(cmd_q->reg_int_status);
400 ioread32(cmd_q->reg_status);
401 }
402 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
403
404 /* Request an irq */
405 ret = ccp->get_irq(ccp);
406 if (ret) {
407 dev_err(dev, "unable to allocate an IRQ\n");
408 goto e_pool;
409 }
410
411 /* Initialize the queues used to wait for KSB space and suspend */
412 init_waitqueue_head(&ccp->ksb_queue);
413 init_waitqueue_head(&ccp->suspend_queue);
414
415 /* Create a kthread for each queue */
416 for (i = 0; i < ccp->cmd_q_count; i++) {
417 struct task_struct *kthread;
418
419 cmd_q = &ccp->cmd_q[i];
420
421 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
422 "ccp-q%u", cmd_q->id);
423 if (IS_ERR(kthread)) {
424 dev_err(dev, "error creating queue thread (%ld)\n",
425 PTR_ERR(kthread));
426 ret = PTR_ERR(kthread);
427 goto e_kthread;
428 }
429
430 cmd_q->kthread = kthread;
431 wake_up_process(kthread);
432 }
433
434 /* Register the RNG */
435 ccp->hwrng.name = "ccp-rng";
436 ccp->hwrng.read = ccp_trng_read;
437 ret = hwrng_register(&ccp->hwrng);
438 if (ret) {
439 dev_err(dev, "error registering hwrng (%d)\n", ret);
440 goto e_kthread;
441 }
442
443 /* Make the device struct available before enabling interrupts */
444 ccp_add_device(ccp);
445
446 /* Enable interrupts */
447 iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
448
449 return 0;
450
451e_kthread:
452 for (i = 0; i < ccp->cmd_q_count; i++)
453 if (ccp->cmd_q[i].kthread)
454 kthread_stop(ccp->cmd_q[i].kthread);
455
456 ccp->free_irq(ccp);
457
458e_pool:
459 for (i = 0; i < ccp->cmd_q_count; i++)
460 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
461
462 return ret;
463}
464
465/**
466 * ccp_destroy - tear down the CCP device
467 *
468 * @ccp: ccp_device struct
469 */
470void ccp_destroy(struct ccp_device *ccp)
471{
472 struct ccp_cmd_queue *cmd_q;
473 struct ccp_cmd *cmd;
474 unsigned int qim, i;
475
476 /* Remove general access to the device struct */
477 ccp_del_device(ccp);
478
479 /* Unregister the RNG */
480 hwrng_unregister(&ccp->hwrng);
481
482 /* Stop the queue kthreads */
483 for (i = 0; i < ccp->cmd_q_count; i++)
484 if (ccp->cmd_q[i].kthread)
485 kthread_stop(ccp->cmd_q[i].kthread);
486
487 /* Build queue interrupt mask (two interrupt masks per queue) */
488 qim = 0;
489 for (i = 0; i < ccp->cmd_q_count; i++) {
490 cmd_q = &ccp->cmd_q[i];
491 qim |= cmd_q->int_ok | cmd_q->int_err;
492 }
493
494 /* Disable and clear interrupts */
495 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
496 for (i = 0; i < ccp->cmd_q_count; i++) {
497 cmd_q = &ccp->cmd_q[i];
498
499 ioread32(cmd_q->reg_int_status);
500 ioread32(cmd_q->reg_status);
501 }
502 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
503
504 ccp->free_irq(ccp);
505
506 for (i = 0; i < ccp->cmd_q_count; i++)
507 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
508
509 /* Flush the cmd and backlog queue */
510 while (!list_empty(&ccp->cmd)) {
511 /* Invoke the callback directly with an error code */
512 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
513 list_del(&cmd->entry);
514 cmd->callback(cmd->data, -ENODEV);
515 }
516 while (!list_empty(&ccp->backlog)) {
517 /* Invoke the callback directly with an error code */
518 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
519 list_del(&cmd->entry);
520 cmd->callback(cmd->data, -ENODEV);
521 }
522}
523
524/**
525 * ccp_irq_handler - handle interrupts generated by the CCP device
526 *
527 * @irq: the irq associated with the interrupt
528 * @data: the data value supplied when the irq was created
529 */
530irqreturn_t ccp_irq_handler(int irq, void *data)
531{
532 struct device *dev = data;
533 struct ccp_device *ccp = dev_get_drvdata(dev);
534 struct ccp_cmd_queue *cmd_q;
535 u32 q_int, status;
536 unsigned int i;
537
538 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
539
540 for (i = 0; i < ccp->cmd_q_count; i++) {
541 cmd_q = &ccp->cmd_q[i];
542
543 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
544 if (q_int) {
545 cmd_q->int_status = status;
546 cmd_q->q_status = ioread32(cmd_q->reg_status);
547 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
548
549 /* On error, only save the first error value */
550 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
551 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
552
553 cmd_q->int_rcvd = 1;
554
555 /* Acknowledge the interrupt and wake the kthread */
556 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
557 wake_up_interruptible(&cmd_q->int_queue);
558 }
559 }
560
561 return IRQ_HANDLED;
562}
563
564#ifdef CONFIG_PM
565bool ccp_queues_suspended(struct ccp_device *ccp)
566{
567 unsigned int suspended = 0;
568 unsigned long flags;
569 unsigned int i;
570
571 spin_lock_irqsave(&ccp->cmd_lock, flags);
572
573 for (i = 0; i < ccp->cmd_q_count; i++)
574 if (ccp->cmd_q[i].suspended)
575 suspended++;
576
577 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
578
579 return ccp->cmd_q_count == suspended;
580}
581#endif
582
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500583#ifdef CONFIG_X86
Tom Lendacky63b94502013-11-12 11:46:16 -0600584static const struct x86_cpu_id ccp_support[] = {
585 { X86_VENDOR_AMD, 22, },
586};
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500587#endif
Tom Lendacky63b94502013-11-12 11:46:16 -0600588
589static int __init ccp_mod_init(void)
590{
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500591#ifdef CONFIG_X86
Tom Lendacky63b94502013-11-12 11:46:16 -0600592 struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
Tom Lendackydb34cf92014-01-06 13:34:29 -0600593 int ret;
Tom Lendacky63b94502013-11-12 11:46:16 -0600594
595 if (!x86_match_cpu(ccp_support))
596 return -ENODEV;
597
598 switch (cpuinfo->x86) {
599 case 22:
600 if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
601 return -ENODEV;
Tom Lendackydb34cf92014-01-06 13:34:29 -0600602
603 ret = ccp_pci_init();
604 if (ret)
605 return ret;
606
607 /* Don't leave the driver loaded if init failed */
608 if (!ccp_get_device()) {
609 ccp_pci_exit();
610 return -ENODEV;
611 }
612
613 return 0;
614
Tom Lendacky63b94502013-11-12 11:46:16 -0600615 break;
Fengguang Wud1dd2062013-12-09 20:08:19 +0800616 }
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500617#endif
618
619#ifdef CONFIG_ARM64
620 int ret;
621
622 ret = ccp_platform_init();
623 if (ret)
624 return ret;
625
626 /* Don't leave the driver loaded if init failed */
627 if (!ccp_get_device()) {
628 ccp_platform_exit();
629 return -ENODEV;
630 }
631
632 return 0;
633#endif
Tom Lendacky63b94502013-11-12 11:46:16 -0600634
635 return -ENODEV;
636}
637
638static void __exit ccp_mod_exit(void)
639{
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500640#ifdef CONFIG_X86
Tom Lendacky63b94502013-11-12 11:46:16 -0600641 struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
642
643 switch (cpuinfo->x86) {
644 case 22:
645 ccp_pci_exit();
646 break;
Fengguang Wud1dd2062013-12-09 20:08:19 +0800647 }
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500648#endif
649
650#ifdef CONFIG_ARM64
651 ccp_platform_exit();
652#endif
Tom Lendacky63b94502013-11-12 11:46:16 -0600653}
654
655module_init(ccp_mod_init);
656module_exit(ccp_mod_exit);