blob: b0a2806908f1587e75947ac2b8f68523c5baeeb4 [file] [log] [blame]
Tom Lendackyc4f4b322014-06-05 10:17:57 -05001/*
2 * AMD Cryptographic Coprocessor (CCP) driver
3 *
4 * Copyright (C) 2014 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/device.h>
16#include <linux/platform_device.h>
17#include <linux/ioport.h>
18#include <linux/dma-mapping.h>
19#include <linux/kthread.h>
20#include <linux/sched.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/ccp.h>
Tom Lendacky126ae9a2014-07-10 10:58:35 -050025#include <linux/of.h>
Tom Lendackyc4f4b322014-06-05 10:17:57 -050026
27#include "ccp-dev.h"
28
29
30static int ccp_get_irq(struct ccp_device *ccp)
31{
32 struct device *dev = ccp->dev;
33 struct platform_device *pdev = container_of(dev,
34 struct platform_device, dev);
35 int ret;
36
37 ret = platform_get_irq(pdev, 0);
38 if (ret < 0)
39 return ret;
40
41 ccp->irq = ret;
42 ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
43 if (ret) {
44 dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
45 return ret;
46 }
47
48 return 0;
49}
50
51static int ccp_get_irqs(struct ccp_device *ccp)
52{
53 struct device *dev = ccp->dev;
54 int ret;
55
56 ret = ccp_get_irq(ccp);
57 if (!ret)
58 return 0;
59
60 /* Couldn't get an interrupt */
61 dev_notice(dev, "could not enable interrupts (%d)\n", ret);
62
63 return ret;
64}
65
66static void ccp_free_irqs(struct ccp_device *ccp)
67{
68 struct device *dev = ccp->dev;
69
70 free_irq(ccp->irq, dev);
71}
72
73static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
74{
75 struct device *dev = ccp->dev;
76 struct platform_device *pdev = container_of(dev,
77 struct platform_device, dev);
78 struct resource *ior;
79
80 ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
81 if (ior && (resource_size(ior) >= 0x800))
82 return ior;
83
84 return NULL;
85}
86
87static int ccp_platform_probe(struct platform_device *pdev)
88{
89 struct ccp_device *ccp;
90 struct device *dev = &pdev->dev;
91 struct resource *ior;
92 int ret;
93
94 ret = -ENOMEM;
95 ccp = ccp_alloc_struct(dev);
96 if (!ccp)
97 goto e_err;
98
99 ccp->dev_specific = NULL;
100 ccp->get_irq = ccp_get_irqs;
101 ccp->free_irq = ccp_free_irqs;
102
103 ior = ccp_find_mmio_area(ccp);
104 ccp->io_map = devm_ioremap_resource(dev, ior);
105 if (IS_ERR(ccp->io_map)) {
106 ret = PTR_ERR(ccp->io_map);
107 goto e_free;
108 }
109 ccp->io_regs = ccp->io_map;
110
111 if (!dev->dma_mask)
112 dev->dma_mask = &dev->coherent_dma_mask;
113 *(dev->dma_mask) = DMA_BIT_MASK(48);
114 dev->coherent_dma_mask = DMA_BIT_MASK(48);
115
Tom Lendacky126ae9a2014-07-10 10:58:35 -0500116 if (of_property_read_bool(dev->of_node, "dma-coherent"))
117 ccp->axcache = CACHE_WB_NO_ALLOC;
118 else
119 ccp->axcache = CACHE_NONE;
120
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500121 dev_set_drvdata(dev, ccp);
122
123 ret = ccp_init(ccp);
124 if (ret)
125 goto e_free;
126
127 dev_notice(dev, "enabled\n");
128
129 return 0;
130
131e_free:
132 kfree(ccp);
133
134e_err:
135 dev_notice(dev, "initialization failed\n");
136 return ret;
137}
138
139static int ccp_platform_remove(struct platform_device *pdev)
140{
141 struct device *dev = &pdev->dev;
142 struct ccp_device *ccp = dev_get_drvdata(dev);
143
144 ccp_destroy(ccp);
145
146 kfree(ccp);
147
148 dev_notice(dev, "disabled\n");
149
150 return 0;
151}
152
153#ifdef CONFIG_PM
154static int ccp_platform_suspend(struct platform_device *pdev,
155 pm_message_t state)
156{
157 struct device *dev = &pdev->dev;
158 struct ccp_device *ccp = dev_get_drvdata(dev);
159 unsigned long flags;
160 unsigned int i;
161
162 spin_lock_irqsave(&ccp->cmd_lock, flags);
163
164 ccp->suspending = 1;
165
166 /* Wake all the queue kthreads to prepare for suspend */
167 for (i = 0; i < ccp->cmd_q_count; i++)
168 wake_up_process(ccp->cmd_q[i].kthread);
169
170 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
171
172 /* Wait for all queue kthreads to say they're done */
173 while (!ccp_queues_suspended(ccp))
174 wait_event_interruptible(ccp->suspend_queue,
175 ccp_queues_suspended(ccp));
176
177 return 0;
178}
179
180static int ccp_platform_resume(struct platform_device *pdev)
181{
182 struct device *dev = &pdev->dev;
183 struct ccp_device *ccp = dev_get_drvdata(dev);
184 unsigned long flags;
185 unsigned int i;
186
187 spin_lock_irqsave(&ccp->cmd_lock, flags);
188
189 ccp->suspending = 0;
190
191 /* Wake up all the kthreads */
192 for (i = 0; i < ccp->cmd_q_count; i++) {
193 ccp->cmd_q[i].suspended = 0;
194 wake_up_process(ccp->cmd_q[i].kthread);
195 }
196
197 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
198
199 return 0;
200}
201#endif
202
203static const struct of_device_id ccp_platform_ids[] = {
204 { .compatible = "amd,ccp-seattle-v1a" },
205 { },
206};
207
208static struct platform_driver ccp_platform_driver = {
209 .driver = {
210 .name = "AMD Cryptographic Coprocessor",
211 .owner = THIS_MODULE,
212 .of_match_table = ccp_platform_ids,
213 },
214 .probe = ccp_platform_probe,
215 .remove = ccp_platform_remove,
216#ifdef CONFIG_PM
217 .suspend = ccp_platform_suspend,
218 .resume = ccp_platform_resume,
219#endif
220};
221
222int ccp_platform_init(void)
223{
224 return platform_driver_register(&ccp_platform_driver);
225}
226
227void ccp_platform_exit(void)
228{
229 platform_driver_unregister(&ccp_platform_driver);
230}