blob: 46d18f39fa7bec379e9a413e8749eb525f082f0e [file] [log] [blame]
Tom Lendacky63b94502013-11-12 11:46:16 -06001/*
2 * AMD Cryptographic Coprocessor (CCP) driver
3 *
Gary R Hook3f19ce22016-03-01 13:48:54 -06004 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
Tom Lendacky63b94502013-11-12 11:46:16 -06005 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
Gary R Hook956ee212016-07-26 19:09:40 -05007 * Author: Gary R Hook <gary.hook@amd.com>
Tom Lendacky63b94502013-11-12 11:46:16 -06008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/kthread.h>
17#include <linux/sched.h>
18#include <linux/interrupt.h>
19#include <linux/spinlock.h>
Mike Galbraith7587c402016-04-05 15:03:21 +020020#include <linux/spinlock_types.h>
Gary R Hook553d2372016-03-01 13:49:04 -060021#include <linux/types.h>
Tom Lendacky63b94502013-11-12 11:46:16 -060022#include <linux/mutex.h>
23#include <linux/delay.h>
24#include <linux/hw_random.h>
25#include <linux/cpu.h>
Tom Lendackyc4f4b322014-06-05 10:17:57 -050026#ifdef CONFIG_X86
Tom Lendacky63b94502013-11-12 11:46:16 -060027#include <asm/cpu_device_id.h>
Tom Lendackyc4f4b322014-06-05 10:17:57 -050028#endif
Tom Lendacky63b94502013-11-12 11:46:16 -060029#include <linux/ccp.h>
30
31#include "ccp-dev.h"
32
33MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
34MODULE_LICENSE("GPL");
35MODULE_VERSION("1.0.0");
36MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
37
Tom Lendacky530abd82014-01-24 16:18:14 -060038struct ccp_tasklet_data {
39 struct completion completion;
40 struct ccp_cmd *cmd;
41};
42
Hook, Gary029cc4f2019-06-27 16:16:23 +000043 /* Human-readable error strings */
44#define CCP_MAX_ERROR_CODE 64
45 static char *ccp_error_codes[] = {
46 "",
47 "ILLEGAL_ENGINE",
48 "ILLEGAL_KEY_ID",
49 "ILLEGAL_FUNCTION_TYPE",
50 "ILLEGAL_FUNCTION_MODE",
51 "ILLEGAL_FUNCTION_ENCRYPT",
52 "ILLEGAL_FUNCTION_SIZE",
53 "Zlib_MISSING_INIT_EOM",
54 "ILLEGAL_FUNCTION_RSVD",
55 "ILLEGAL_BUFFER_LENGTH",
56 "VLSB_FAULT",
57 "ILLEGAL_MEM_ADDR",
58 "ILLEGAL_MEM_SEL",
59 "ILLEGAL_CONTEXT_ID",
60 "ILLEGAL_KEY_ADDR",
61 "0xF Reserved",
62 "Zlib_ILLEGAL_MULTI_QUEUE",
63 "Zlib_ILLEGAL_JOBID_CHANGE",
64 "CMD_TIMEOUT",
65 "IDMA0_AXI_SLVERR",
66 "IDMA0_AXI_DECERR",
67 "0x15 Reserved",
68 "IDMA1_AXI_SLAVE_FAULT",
69 "IDMA1_AIXI_DECERR",
70 "0x18 Reserved",
71 "ZLIBVHB_AXI_SLVERR",
72 "ZLIBVHB_AXI_DECERR",
73 "0x1B Reserved",
74 "ZLIB_UNEXPECTED_EOM",
75 "ZLIB_EXTRA_DATA",
76 "ZLIB_BTYPE",
77 "ZLIB_UNDEFINED_SYMBOL",
78 "ZLIB_UNDEFINED_DISTANCE_S",
79 "ZLIB_CODE_LENGTH_SYMBOL",
80 "ZLIB _VHB_ILLEGAL_FETCH",
81 "ZLIB_UNCOMPRESSED_LEN",
82 "ZLIB_LIMIT_REACHED",
83 "ZLIB_CHECKSUM_MISMATCH0",
84 "ODMA0_AXI_SLVERR",
85 "ODMA0_AXI_DECERR",
86 "0x28 Reserved",
87 "ODMA1_AXI_SLVERR",
88 "ODMA1_AXI_DECERR",
Gary R Hook81422ba2016-09-28 11:53:56 -050089};
90
Hook, Gary029cc4f2019-06-27 16:16:23 +000091void ccp_log_error(struct ccp_device *d, unsigned int e)
Gary R Hook81422ba2016-09-28 11:53:56 -050092{
Hook, Gary029cc4f2019-06-27 16:16:23 +000093 if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
94 return;
95
96 if (e < ARRAY_SIZE(ccp_error_codes))
97 dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
98 else
99 dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
Gary R Hook81422ba2016-09-28 11:53:56 -0500100}
101
Gary R Hook553d2372016-03-01 13:49:04 -0600102/* List of CCPs, CCP count, read-write access lock, and access functions
103 *
104 * Lock structure: get ccp_unit_lock for reading whenever we need to
105 * examine the CCP list. While holding it for reading we can acquire
106 * the RR lock to update the round-robin next-CCP pointer. The unit lock
107 * must be acquired before the RR lock.
108 *
109 * If the unit-lock is acquired for writing, we have total control over
110 * the list, so there's no value in getting the RR lock.
111 */
112static DEFINE_RWLOCK(ccp_unit_lock);
113static LIST_HEAD(ccp_units);
114
115/* Round-robin counter */
Gary R Hook03a6f292016-03-16 09:02:26 -0500116static DEFINE_SPINLOCK(ccp_rr_lock);
Gary R Hook553d2372016-03-01 13:49:04 -0600117static struct ccp_device *ccp_rr;
118
119/* Ever-increasing value to produce unique unit numbers */
120static atomic_t ccp_unit_ordinal;
Wei Yongjundabc7902016-08-12 00:00:09 +0000121static unsigned int ccp_increment_unit_ordinal(void)
Tom Lendacky63b94502013-11-12 11:46:16 -0600122{
Gary R Hook553d2372016-03-01 13:49:04 -0600123 return atomic_inc_return(&ccp_unit_ordinal);
Tom Lendacky63b94502013-11-12 11:46:16 -0600124}
125
Gary R Hookea0375a2016-03-01 13:49:25 -0600126/**
127 * ccp_add_device - add a CCP device to the list
128 *
129 * @ccp: ccp_device struct pointer
130 *
Gary R Hook553d2372016-03-01 13:49:04 -0600131 * Put this CCP on the unit list, which makes it available
132 * for use.
Gary R Hookea0375a2016-03-01 13:49:25 -0600133 *
134 * Returns zero if a CCP device is present, -ENODEV otherwise.
Gary R Hook553d2372016-03-01 13:49:04 -0600135 */
Gary R Hookea0375a2016-03-01 13:49:25 -0600136void ccp_add_device(struct ccp_device *ccp)
Tom Lendacky63b94502013-11-12 11:46:16 -0600137{
Gary R Hook553d2372016-03-01 13:49:04 -0600138 unsigned long flags;
139
140 write_lock_irqsave(&ccp_unit_lock, flags);
141 list_add_tail(&ccp->entry, &ccp_units);
142 if (!ccp_rr)
143 /* We already have the list lock (we're first) so this
144 * pointer can't change on us. Set its initial value.
145 */
146 ccp_rr = ccp;
147 write_unlock_irqrestore(&ccp_unit_lock, flags);
Tom Lendacky63b94502013-11-12 11:46:16 -0600148}
149
Gary R Hookea0375a2016-03-01 13:49:25 -0600150/**
151 * ccp_del_device - remove a CCP device from the list
152 *
153 * @ccp: ccp_device struct pointer
154 *
155 * Remove this unit from the list of devices. If the next device
Gary R Hook553d2372016-03-01 13:49:04 -0600156 * up for use is this one, adjust the pointer. If this is the last
157 * device, NULL the pointer.
158 */
Gary R Hookea0375a2016-03-01 13:49:25 -0600159void ccp_del_device(struct ccp_device *ccp)
Tom Lendacky63b94502013-11-12 11:46:16 -0600160{
Gary R Hook553d2372016-03-01 13:49:04 -0600161 unsigned long flags;
162
163 write_lock_irqsave(&ccp_unit_lock, flags);
164 if (ccp_rr == ccp) {
165 /* ccp_unit_lock is read/write; any read access
166 * will be suspended while we make changes to the
167 * list and RR pointer.
168 */
169 if (list_is_last(&ccp_rr->entry, &ccp_units))
170 ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
171 entry);
172 else
173 ccp_rr = list_next_entry(ccp_rr, entry);
174 }
175 list_del(&ccp->entry);
176 if (list_empty(&ccp_units))
177 ccp_rr = NULL;
178 write_unlock_irqrestore(&ccp_unit_lock, flags);
179}
180
Gary R Hook084935b2016-07-26 19:10:31 -0500181
182
183int ccp_register_rng(struct ccp_device *ccp)
184{
185 int ret = 0;
186
187 dev_dbg(ccp->dev, "Registering RNG...\n");
188 /* Register an RNG */
189 ccp->hwrng.name = ccp->rngname;
190 ccp->hwrng.read = ccp_trng_read;
191 ret = hwrng_register(&ccp->hwrng);
192 if (ret)
193 dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
194
195 return ret;
196}
197
198void ccp_unregister_rng(struct ccp_device *ccp)
199{
200 if (ccp->hwrng.name)
201 hwrng_unregister(&ccp->hwrng);
202}
203
Gary R Hook553d2372016-03-01 13:49:04 -0600204static struct ccp_device *ccp_get_device(void)
205{
206 unsigned long flags;
207 struct ccp_device *dp = NULL;
208
209 /* We round-robin through the unit list.
210 * The (ccp_rr) pointer refers to the next unit to use.
211 */
212 read_lock_irqsave(&ccp_unit_lock, flags);
213 if (!list_empty(&ccp_units)) {
Gary R Hook03a6f292016-03-16 09:02:26 -0500214 spin_lock(&ccp_rr_lock);
Gary R Hook553d2372016-03-01 13:49:04 -0600215 dp = ccp_rr;
216 if (list_is_last(&ccp_rr->entry, &ccp_units))
217 ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
218 entry);
219 else
220 ccp_rr = list_next_entry(ccp_rr, entry);
Gary R Hook03a6f292016-03-16 09:02:26 -0500221 spin_unlock(&ccp_rr_lock);
Gary R Hook553d2372016-03-01 13:49:04 -0600222 }
223 read_unlock_irqrestore(&ccp_unit_lock, flags);
224
225 return dp;
Tom Lendacky63b94502013-11-12 11:46:16 -0600226}
227
228/**
Tom Lendackyc9f21cb2014-09-05 10:31:09 -0500229 * ccp_present - check if a CCP device is present
230 *
231 * Returns zero if a CCP device is present, -ENODEV otherwise.
232 */
233int ccp_present(void)
234{
Gary R Hook553d2372016-03-01 13:49:04 -0600235 unsigned long flags;
236 int ret;
Tom Lendackyc9f21cb2014-09-05 10:31:09 -0500237
Gary R Hook553d2372016-03-01 13:49:04 -0600238 read_lock_irqsave(&ccp_unit_lock, flags);
239 ret = list_empty(&ccp_units);
240 read_unlock_irqrestore(&ccp_unit_lock, flags);
241
242 return ret ? -ENODEV : 0;
Tom Lendackyc9f21cb2014-09-05 10:31:09 -0500243}
244EXPORT_SYMBOL_GPL(ccp_present);
245
246/**
Gary R Hookc7019c42016-03-01 13:49:15 -0600247 * ccp_version - get the version of the CCP device
248 *
249 * Returns the version from the first unit on the list;
250 * otherwise a zero if no CCP device is present
251 */
252unsigned int ccp_version(void)
253{
254 struct ccp_device *dp;
255 unsigned long flags;
256 int ret = 0;
257
258 read_lock_irqsave(&ccp_unit_lock, flags);
259 if (!list_empty(&ccp_units)) {
260 dp = list_first_entry(&ccp_units, struct ccp_device, entry);
261 ret = dp->vdata->version;
262 }
263 read_unlock_irqrestore(&ccp_unit_lock, flags);
264
265 return ret;
266}
267EXPORT_SYMBOL_GPL(ccp_version);
268
269/**
Tom Lendacky63b94502013-11-12 11:46:16 -0600270 * ccp_enqueue_cmd - queue an operation for processing by the CCP
271 *
272 * @cmd: ccp_cmd struct to be processed
273 *
274 * Queue a cmd to be processed by the CCP. If queueing the cmd
275 * would exceed the defined length of the cmd queue the cmd will
276 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
277 * result in a return code of -EBUSY.
278 *
279 * The callback routine specified in the ccp_cmd struct will be
280 * called to notify the caller of completion (if the cmd was not
281 * backlogged) or advancement out of the backlog. If the cmd has
282 * advanced out of the backlog the "err" value of the callback
283 * will be -EINPROGRESS. Any other "err" value during callback is
284 * the result of the operation.
285 *
286 * The cmd has been successfully queued if:
287 * the return code is -EINPROGRESS or
288 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
289 */
290int ccp_enqueue_cmd(struct ccp_cmd *cmd)
291{
Gary R Hookb1ef9da2017-03-10 12:28:18 -0600292 struct ccp_device *ccp;
Tom Lendacky63b94502013-11-12 11:46:16 -0600293 unsigned long flags;
294 unsigned int i;
295 int ret;
296
Gary R Hookb1ef9da2017-03-10 12:28:18 -0600297 /* Some commands might need to be sent to a specific device */
298 ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
299
Tom Lendacky63b94502013-11-12 11:46:16 -0600300 if (!ccp)
301 return -ENODEV;
302
303 /* Caller must supply a callback routine */
304 if (!cmd->callback)
305 return -EINVAL;
306
307 cmd->ccp = ccp;
308
309 spin_lock_irqsave(&ccp->cmd_lock, flags);
310
311 i = ccp->cmd_q_count;
312
313 if (ccp->cmd_count >= MAX_CMD_QLEN) {
314 ret = -EBUSY;
315 if (cmd->flags & CCP_CMD_MAY_BACKLOG)
316 list_add_tail(&cmd->entry, &ccp->backlog);
317 } else {
318 ret = -EINPROGRESS;
319 ccp->cmd_count++;
320 list_add_tail(&cmd->entry, &ccp->cmd);
321
322 /* Find an idle queue */
323 if (!ccp->suspending) {
324 for (i = 0; i < ccp->cmd_q_count; i++) {
325 if (ccp->cmd_q[i].active)
326 continue;
327
328 break;
329 }
330 }
331 }
332
333 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
334
335 /* If we found an idle queue, wake it up */
336 if (i < ccp->cmd_q_count)
337 wake_up_process(ccp->cmd_q[i].kthread);
338
339 return ret;
340}
341EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
342
343static void ccp_do_cmd_backlog(struct work_struct *work)
344{
345 struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
346 struct ccp_device *ccp = cmd->ccp;
347 unsigned long flags;
348 unsigned int i;
349
350 cmd->callback(cmd->data, -EINPROGRESS);
351
352 spin_lock_irqsave(&ccp->cmd_lock, flags);
353
354 ccp->cmd_count++;
355 list_add_tail(&cmd->entry, &ccp->cmd);
356
357 /* Find an idle queue */
358 for (i = 0; i < ccp->cmd_q_count; i++) {
359 if (ccp->cmd_q[i].active)
360 continue;
361
362 break;
363 }
364
365 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
366
367 /* If we found an idle queue, wake it up */
368 if (i < ccp->cmd_q_count)
369 wake_up_process(ccp->cmd_q[i].kthread);
370}
371
372static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
373{
374 struct ccp_device *ccp = cmd_q->ccp;
375 struct ccp_cmd *cmd = NULL;
376 struct ccp_cmd *backlog = NULL;
377 unsigned long flags;
378
379 spin_lock_irqsave(&ccp->cmd_lock, flags);
380
381 cmd_q->active = 0;
382
383 if (ccp->suspending) {
384 cmd_q->suspended = 1;
385
386 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
387 wake_up_interruptible(&ccp->suspend_queue);
388
389 return NULL;
390 }
391
392 if (ccp->cmd_count) {
393 cmd_q->active = 1;
394
395 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
396 list_del(&cmd->entry);
397
398 ccp->cmd_count--;
399 }
400
401 if (!list_empty(&ccp->backlog)) {
402 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
403 entry);
404 list_del(&backlog->entry);
405 }
406
407 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
408
409 if (backlog) {
410 INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
411 schedule_work(&backlog->work);
412 }
413
414 return cmd;
415}
416
Tom Lendacky530abd82014-01-24 16:18:14 -0600417static void ccp_do_cmd_complete(unsigned long data)
Tom Lendacky63b94502013-11-12 11:46:16 -0600418{
Tom Lendacky530abd82014-01-24 16:18:14 -0600419 struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
420 struct ccp_cmd *cmd = tdata->cmd;
Tom Lendacky63b94502013-11-12 11:46:16 -0600421
422 cmd->callback(cmd->data, cmd->ret);
Tom Lendacky530abd82014-01-24 16:18:14 -0600423 complete(&tdata->completion);
Tom Lendacky63b94502013-11-12 11:46:16 -0600424}
425
Gary R Hookea0375a2016-03-01 13:49:25 -0600426/**
427 * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
428 *
429 * @data: thread-specific data
430 */
431int ccp_cmd_queue_thread(void *data)
Tom Lendacky63b94502013-11-12 11:46:16 -0600432{
433 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
434 struct ccp_cmd *cmd;
Tom Lendacky530abd82014-01-24 16:18:14 -0600435 struct ccp_tasklet_data tdata;
436 struct tasklet_struct tasklet;
437
438 tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
Tom Lendacky63b94502013-11-12 11:46:16 -0600439
440 set_current_state(TASK_INTERRUPTIBLE);
441 while (!kthread_should_stop()) {
442 schedule();
443
444 set_current_state(TASK_INTERRUPTIBLE);
445
446 cmd = ccp_dequeue_cmd(cmd_q);
447 if (!cmd)
448 continue;
449
450 __set_current_state(TASK_RUNNING);
451
452 /* Execute the command */
453 cmd->ret = ccp_run_cmd(cmd_q, cmd);
454
455 /* Schedule the completion callback */
Tom Lendacky530abd82014-01-24 16:18:14 -0600456 tdata.cmd = cmd;
457 init_completion(&tdata.completion);
458 tasklet_schedule(&tasklet);
459 wait_for_completion(&tdata.completion);
Tom Lendacky63b94502013-11-12 11:46:16 -0600460 }
461
462 __set_current_state(TASK_RUNNING);
463
464 return 0;
465}
466
Tom Lendacky63b94502013-11-12 11:46:16 -0600467/**
468 * ccp_alloc_struct - allocate and initialize the ccp_device struct
469 *
470 * @dev: device struct of the CCP
471 */
472struct ccp_device *ccp_alloc_struct(struct device *dev)
473{
474 struct ccp_device *ccp;
475
Tom Lendackybe03a3a2015-02-03 13:07:23 -0600476 ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
Tom Lendacky8db88462015-02-03 13:07:05 -0600477 if (!ccp)
Tom Lendacky63b94502013-11-12 11:46:16 -0600478 return NULL;
Tom Lendacky63b94502013-11-12 11:46:16 -0600479 ccp->dev = dev;
480
481 INIT_LIST_HEAD(&ccp->cmd);
482 INIT_LIST_HEAD(&ccp->backlog);
483
484 spin_lock_init(&ccp->cmd_lock);
485 mutex_init(&ccp->req_mutex);
Gary R Hook956ee212016-07-26 19:09:40 -0500486 mutex_init(&ccp->sb_mutex);
487 ccp->sb_count = KSB_COUNT;
488 ccp->sb_start = 0;
Tom Lendacky63b94502013-11-12 11:46:16 -0600489
Gary R Hook553d2372016-03-01 13:49:04 -0600490 ccp->ord = ccp_increment_unit_ordinal();
491 snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
492 snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
493
Tom Lendacky63b94502013-11-12 11:46:16 -0600494 return ccp;
495}
496
Gary R Hook8256e682016-07-26 19:10:02 -0500497int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
498{
499 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
500 u32 trng_value;
501 int len = min_t(int, sizeof(trng_value), max);
502
503 /* Locking is provided by the caller so we can update device
504 * hwrng-related fields safely
505 */
506 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
507 if (!trng_value) {
508 /* Zero is returned if not data is available or if a
509 * bad-entropy error is present. Assume an error if
510 * we exceed TRNG_RETRIES reads of zero.
511 */
512 if (ccp->hwrng_retries++ > TRNG_RETRIES)
513 return -EIO;
514
515 return 0;
516 }
517
518 /* Reset the counter and save the rng value */
519 ccp->hwrng_retries = 0;
520 memcpy(data, &trng_value, len);
521
522 return len;
523}
524
Tom Lendacky63b94502013-11-12 11:46:16 -0600525#ifdef CONFIG_PM
526bool ccp_queues_suspended(struct ccp_device *ccp)
527{
528 unsigned int suspended = 0;
529 unsigned long flags;
530 unsigned int i;
531
532 spin_lock_irqsave(&ccp->cmd_lock, flags);
533
534 for (i = 0; i < ccp->cmd_q_count; i++)
535 if (ccp->cmd_q[i].suspended)
536 suspended++;
537
538 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
539
540 return ccp->cmd_q_count == suspended;
541}
542#endif
543
Tom Lendacky63b94502013-11-12 11:46:16 -0600544static int __init ccp_mod_init(void)
545{
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500546#ifdef CONFIG_X86
Tom Lendackydb34cf92014-01-06 13:34:29 -0600547 int ret;
Tom Lendacky63b94502013-11-12 11:46:16 -0600548
Gary R Hook3f19ce22016-03-01 13:48:54 -0600549 ret = ccp_pci_init();
550 if (ret)
551 return ret;
552
553 /* Don't leave the driver loaded if init failed */
Gary R Hook553d2372016-03-01 13:49:04 -0600554 if (ccp_present() != 0) {
Gary R Hook3f19ce22016-03-01 13:48:54 -0600555 ccp_pci_exit();
Tom Lendacky63b94502013-11-12 11:46:16 -0600556 return -ENODEV;
Fengguang Wud1dd2062013-12-09 20:08:19 +0800557 }
Gary R Hook3f19ce22016-03-01 13:48:54 -0600558
559 return 0;
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500560#endif
561
562#ifdef CONFIG_ARM64
563 int ret;
564
565 ret = ccp_platform_init();
566 if (ret)
567 return ret;
568
569 /* Don't leave the driver loaded if init failed */
Gary R Hook553d2372016-03-01 13:49:04 -0600570 if (ccp_present() != 0) {
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500571 ccp_platform_exit();
572 return -ENODEV;
573 }
574
575 return 0;
576#endif
Tom Lendacky63b94502013-11-12 11:46:16 -0600577
578 return -ENODEV;
579}
580
581static void __exit ccp_mod_exit(void)
582{
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500583#ifdef CONFIG_X86
Gary R Hook3f19ce22016-03-01 13:48:54 -0600584 ccp_pci_exit();
Tom Lendackyc4f4b322014-06-05 10:17:57 -0500585#endif
586
587#ifdef CONFIG_ARM64
588 ccp_platform_exit();
589#endif
Tom Lendacky63b94502013-11-12 11:46:16 -0600590}
591
592module_init(ccp_mod_init);
593module_exit(ccp_mod_exit);