Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 1 | /* |
| 2 | * AMD Cryptographic Coprocessor (CCP) crypto API support |
| 3 | * |
| 4 | * Copyright (C) 2013 Advanced Micro Devices, Inc. |
| 5 | * |
| 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as |
| 10 | * published by the Free Software Foundation. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/module.h> |
Tom Lendacky | d81ed65 | 2014-01-24 16:17:56 -0600 | [diff] [blame] | 14 | #include <linux/moduleparam.h> |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 15 | #include <linux/kernel.h> |
| 16 | #include <linux/list.h> |
| 17 | #include <linux/ccp.h> |
| 18 | #include <linux/scatterlist.h> |
| 19 | #include <crypto/internal/hash.h> |
| 20 | |
| 21 | #include "ccp-crypto.h" |
| 22 | |
| 23 | MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); |
| 24 | MODULE_LICENSE("GPL"); |
| 25 | MODULE_VERSION("1.0.0"); |
| 26 | MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support"); |
| 27 | |
Tom Lendacky | d81ed65 | 2014-01-24 16:17:56 -0600 | [diff] [blame] | 28 | static unsigned int aes_disable; |
| 29 | module_param(aes_disable, uint, 0444); |
| 30 | MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value"); |
| 31 | |
| 32 | static unsigned int sha_disable; |
| 33 | module_param(sha_disable, uint, 0444); |
| 34 | MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value"); |
| 35 | |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 36 | |
| 37 | /* List heads for the supported algorithms */ |
| 38 | static LIST_HEAD(hash_algs); |
| 39 | static LIST_HEAD(cipher_algs); |
| 40 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 41 | /* For any tfm, requests for that tfm must be returned on the order |
| 42 | * received. With multiple queues available, the CCP can process more |
| 43 | * than one cmd at a time. Therefore we must maintain a cmd list to insure |
| 44 | * the proper ordering of requests on a given tfm. |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 45 | */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 46 | struct ccp_crypto_queue { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 47 | struct list_head cmds; |
| 48 | struct list_head *backlog; |
| 49 | unsigned int cmd_count; |
| 50 | }; |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 51 | #define CCP_CRYPTO_MAX_QLEN 100 |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 52 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 53 | static struct ccp_crypto_queue req_queue; |
| 54 | static spinlock_t req_queue_lock; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 55 | |
| 56 | struct ccp_crypto_cmd { |
| 57 | struct list_head entry; |
| 58 | |
| 59 | struct ccp_cmd *cmd; |
| 60 | |
| 61 | /* Save the crypto_tfm and crypto_async_request addresses |
| 62 | * separately to avoid any reference to a possibly invalid |
| 63 | * crypto_async_request structure after invoking the request |
| 64 | * callback |
| 65 | */ |
| 66 | struct crypto_async_request *req; |
| 67 | struct crypto_tfm *tfm; |
| 68 | |
| 69 | /* Used for held command processing to determine state */ |
| 70 | int ret; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 71 | }; |
| 72 | |
| 73 | struct ccp_crypto_cpu { |
| 74 | struct work_struct work; |
| 75 | struct completion completion; |
| 76 | struct ccp_crypto_cmd *crypto_cmd; |
| 77 | int err; |
| 78 | }; |
| 79 | |
| 80 | |
| 81 | static inline bool ccp_crypto_success(int err) |
| 82 | { |
| 83 | if (err && (err != -EINPROGRESS) && (err != -EBUSY)) |
| 84 | return false; |
| 85 | |
| 86 | return true; |
| 87 | } |
| 88 | |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 89 | static struct ccp_crypto_cmd *ccp_crypto_cmd_complete( |
| 90 | struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) |
| 91 | { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 92 | struct ccp_crypto_cmd *held = NULL, *tmp; |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 93 | unsigned long flags; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 94 | |
| 95 | *backlog = NULL; |
| 96 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 97 | spin_lock_irqsave(&req_queue_lock, flags); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 98 | |
| 99 | /* Held cmds will be after the current cmd in the queue so start |
| 100 | * searching for a cmd with a matching tfm for submission. |
| 101 | */ |
| 102 | tmp = crypto_cmd; |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 103 | list_for_each_entry_continue(tmp, &req_queue.cmds, entry) { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 104 | if (crypto_cmd->tfm != tmp->tfm) |
| 105 | continue; |
| 106 | held = tmp; |
| 107 | break; |
| 108 | } |
| 109 | |
| 110 | /* Process the backlog: |
| 111 | * Because cmds can be executed from any point in the cmd list |
| 112 | * special precautions have to be taken when handling the backlog. |
| 113 | */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 114 | if (req_queue.backlog != &req_queue.cmds) { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 115 | /* Skip over this cmd if it is the next backlog cmd */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 116 | if (req_queue.backlog == &crypto_cmd->entry) |
| 117 | req_queue.backlog = crypto_cmd->entry.next; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 118 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 119 | *backlog = container_of(req_queue.backlog, |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 120 | struct ccp_crypto_cmd, entry); |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 121 | req_queue.backlog = req_queue.backlog->next; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 122 | |
| 123 | /* Skip over this cmd if it is now the next backlog cmd */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 124 | if (req_queue.backlog == &crypto_cmd->entry) |
| 125 | req_queue.backlog = crypto_cmd->entry.next; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | /* Remove the cmd entry from the list of cmds */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 129 | req_queue.cmd_count--; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 130 | list_del(&crypto_cmd->entry); |
| 131 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 132 | spin_unlock_irqrestore(&req_queue_lock, flags); |
| 133 | |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 134 | return held; |
| 135 | } |
| 136 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 137 | static void ccp_crypto_complete(void *data, int err) |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 138 | { |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 139 | struct ccp_crypto_cmd *crypto_cmd = data; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 140 | struct ccp_crypto_cmd *held, *next, *backlog; |
| 141 | struct crypto_async_request *req = crypto_cmd->req; |
| 142 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm); |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 143 | int ret; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 144 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 145 | if (err == -EINPROGRESS) { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 146 | /* Only propogate the -EINPROGRESS if necessary */ |
| 147 | if (crypto_cmd->ret == -EBUSY) { |
| 148 | crypto_cmd->ret = -EINPROGRESS; |
| 149 | req->complete(req, -EINPROGRESS); |
| 150 | } |
| 151 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 152 | return; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 153 | } |
| 154 | |
| 155 | /* Operation has completed - update the queue before invoking |
| 156 | * the completion callbacks and retrieve the next cmd (cmd with |
| 157 | * a matching tfm) that can be submitted to the CCP. |
| 158 | */ |
| 159 | held = ccp_crypto_cmd_complete(crypto_cmd, &backlog); |
| 160 | if (backlog) { |
| 161 | backlog->ret = -EINPROGRESS; |
| 162 | backlog->req->complete(backlog->req, -EINPROGRESS); |
| 163 | } |
| 164 | |
| 165 | /* Transition the state from -EBUSY to -EINPROGRESS first */ |
| 166 | if (crypto_cmd->ret == -EBUSY) |
| 167 | req->complete(req, -EINPROGRESS); |
| 168 | |
| 169 | /* Completion callbacks */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 170 | ret = err; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 171 | if (ctx->complete) |
| 172 | ret = ctx->complete(req, ret); |
| 173 | req->complete(req, ret); |
| 174 | |
| 175 | /* Submit the next cmd */ |
| 176 | while (held) { |
Tom Lendacky | 0611451 | 2014-02-24 08:42:02 -0600 | [diff] [blame] | 177 | /* Since we have already queued the cmd, we must indicate that |
| 178 | * we can backlog so as not to "lose" this request. |
| 179 | */ |
| 180 | held->cmd->flags |= CCP_CMD_MAY_BACKLOG; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 181 | ret = ccp_enqueue_cmd(held->cmd); |
| 182 | if (ccp_crypto_success(ret)) |
| 183 | break; |
| 184 | |
| 185 | /* Error occurred, report it and get the next entry */ |
Tom Lendacky | 950b10b | 2014-02-24 08:42:08 -0600 | [diff] [blame] | 186 | ctx = crypto_tfm_ctx(held->req->tfm); |
| 187 | if (ctx->complete) |
| 188 | ret = ctx->complete(held->req, ret); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 189 | held->req->complete(held->req, ret); |
| 190 | |
| 191 | next = ccp_crypto_cmd_complete(held, &backlog); |
| 192 | if (backlog) { |
| 193 | backlog->ret = -EINPROGRESS; |
| 194 | backlog->req->complete(backlog->req, -EINPROGRESS); |
| 195 | } |
| 196 | |
| 197 | kfree(held); |
| 198 | held = next; |
| 199 | } |
| 200 | |
| 201 | kfree(crypto_cmd); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 202 | } |
| 203 | |
| 204 | static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) |
| 205 | { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 206 | struct ccp_crypto_cmd *active = NULL, *tmp; |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 207 | unsigned long flags; |
Tom Lendacky | c65a52f | 2014-02-24 08:42:14 -0600 | [diff] [blame] | 208 | bool free_cmd = true; |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 209 | int ret; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 210 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 211 | spin_lock_irqsave(&req_queue_lock, flags); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 212 | |
| 213 | /* Check if the cmd can/should be queued */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 214 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 215 | ret = -EBUSY; |
| 216 | if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 217 | goto e_lock; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 218 | } |
| 219 | |
| 220 | /* Look for an entry with the same tfm. If there is a cmd |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 221 | * with the same tfm in the list then the current cmd cannot |
| 222 | * be submitted to the CCP yet. |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 223 | */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 224 | list_for_each_entry(tmp, &req_queue.cmds, entry) { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 225 | if (crypto_cmd->tfm != tmp->tfm) |
| 226 | continue; |
| 227 | active = tmp; |
| 228 | break; |
| 229 | } |
| 230 | |
| 231 | ret = -EINPROGRESS; |
| 232 | if (!active) { |
| 233 | ret = ccp_enqueue_cmd(crypto_cmd->cmd); |
| 234 | if (!ccp_crypto_success(ret)) |
Tom Lendacky | c65a52f | 2014-02-24 08:42:14 -0600 | [diff] [blame] | 235 | goto e_lock; /* Error, don't queue it */ |
| 236 | if ((ret == -EBUSY) && |
| 237 | !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) |
| 238 | goto e_lock; /* Not backlogging, don't queue it */ |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 239 | } |
| 240 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 241 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 242 | ret = -EBUSY; |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 243 | if (req_queue.backlog == &req_queue.cmds) |
| 244 | req_queue.backlog = &crypto_cmd->entry; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 245 | } |
| 246 | crypto_cmd->ret = ret; |
| 247 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 248 | req_queue.cmd_count++; |
| 249 | list_add_tail(&crypto_cmd->entry, &req_queue.cmds); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 250 | |
Tom Lendacky | c65a52f | 2014-02-24 08:42:14 -0600 | [diff] [blame] | 251 | free_cmd = false; |
| 252 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 253 | e_lock: |
| 254 | spin_unlock_irqrestore(&req_queue_lock, flags); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 255 | |
Tom Lendacky | c65a52f | 2014-02-24 08:42:14 -0600 | [diff] [blame] | 256 | if (free_cmd) |
| 257 | kfree(crypto_cmd); |
| 258 | |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 259 | return ret; |
| 260 | } |
| 261 | |
| 262 | /** |
| 263 | * ccp_crypto_enqueue_request - queue an crypto async request for processing |
| 264 | * by the CCP |
| 265 | * |
| 266 | * @req: crypto_async_request struct to be processed |
| 267 | * @cmd: ccp_cmd struct to be sent to the CCP |
| 268 | */ |
| 269 | int ccp_crypto_enqueue_request(struct crypto_async_request *req, |
| 270 | struct ccp_cmd *cmd) |
| 271 | { |
| 272 | struct ccp_crypto_cmd *crypto_cmd; |
| 273 | gfp_t gfp; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 274 | |
| 275 | gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; |
| 276 | |
| 277 | crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp); |
| 278 | if (!crypto_cmd) |
| 279 | return -ENOMEM; |
| 280 | |
| 281 | /* The tfm pointer must be saved and not referenced from the |
| 282 | * crypto_async_request (req) pointer because it is used after |
| 283 | * completion callback for the request and the req pointer |
| 284 | * might not be valid anymore. |
| 285 | */ |
| 286 | crypto_cmd->cmd = cmd; |
| 287 | crypto_cmd->req = req; |
| 288 | crypto_cmd->tfm = req->tfm; |
| 289 | |
| 290 | cmd->callback = ccp_crypto_complete; |
| 291 | cmd->data = crypto_cmd; |
| 292 | |
| 293 | if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) |
| 294 | cmd->flags |= CCP_CMD_MAY_BACKLOG; |
| 295 | else |
| 296 | cmd->flags &= ~CCP_CMD_MAY_BACKLOG; |
| 297 | |
Tom Lendacky | c65a52f | 2014-02-24 08:42:14 -0600 | [diff] [blame] | 298 | return ccp_crypto_enqueue_cmd(crypto_cmd); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, |
| 302 | struct scatterlist *sg_add) |
| 303 | { |
| 304 | struct scatterlist *sg, *sg_last = NULL; |
| 305 | |
| 306 | for (sg = table->sgl; sg; sg = sg_next(sg)) |
| 307 | if (!sg_page(sg)) |
| 308 | break; |
| 309 | BUG_ON(!sg); |
| 310 | |
| 311 | for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) { |
| 312 | sg_set_page(sg, sg_page(sg_add), sg_add->length, |
| 313 | sg_add->offset); |
| 314 | sg_last = sg; |
| 315 | } |
| 316 | BUG_ON(sg_add); |
| 317 | |
| 318 | return sg_last; |
| 319 | } |
| 320 | |
| 321 | static int ccp_register_algs(void) |
| 322 | { |
| 323 | int ret; |
| 324 | |
Tom Lendacky | d81ed65 | 2014-01-24 16:17:56 -0600 | [diff] [blame] | 325 | if (!aes_disable) { |
| 326 | ret = ccp_register_aes_algs(&cipher_algs); |
| 327 | if (ret) |
| 328 | return ret; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 329 | |
Tom Lendacky | d81ed65 | 2014-01-24 16:17:56 -0600 | [diff] [blame] | 330 | ret = ccp_register_aes_cmac_algs(&hash_algs); |
| 331 | if (ret) |
| 332 | return ret; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 333 | |
Tom Lendacky | d81ed65 | 2014-01-24 16:17:56 -0600 | [diff] [blame] | 334 | ret = ccp_register_aes_xts_algs(&cipher_algs); |
| 335 | if (ret) |
| 336 | return ret; |
| 337 | } |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 338 | |
Tom Lendacky | d81ed65 | 2014-01-24 16:17:56 -0600 | [diff] [blame] | 339 | if (!sha_disable) { |
| 340 | ret = ccp_register_sha_algs(&hash_algs); |
| 341 | if (ret) |
| 342 | return ret; |
| 343 | } |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 344 | |
| 345 | return 0; |
| 346 | } |
| 347 | |
| 348 | static void ccp_unregister_algs(void) |
| 349 | { |
| 350 | struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp; |
| 351 | struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp; |
| 352 | |
| 353 | list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) { |
| 354 | crypto_unregister_ahash(&ahash_alg->alg); |
| 355 | list_del(&ahash_alg->entry); |
| 356 | kfree(ahash_alg); |
| 357 | } |
| 358 | |
| 359 | list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) { |
| 360 | crypto_unregister_alg(&ablk_alg->alg); |
| 361 | list_del(&ablk_alg->entry); |
| 362 | kfree(ablk_alg); |
| 363 | } |
| 364 | } |
| 365 | |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 366 | static int ccp_crypto_init(void) |
| 367 | { |
| 368 | int ret; |
| 369 | |
Tom Lendacky | c9f21cb | 2014-09-05 10:31:09 -0500 | [diff] [blame] | 370 | ret = ccp_present(); |
| 371 | if (ret) |
| 372 | return ret; |
| 373 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 374 | spin_lock_init(&req_queue_lock); |
| 375 | INIT_LIST_HEAD(&req_queue.cmds); |
| 376 | req_queue.backlog = &req_queue.cmds; |
| 377 | req_queue.cmd_count = 0; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 378 | |
| 379 | ret = ccp_register_algs(); |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 380 | if (ret) |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 381 | ccp_unregister_algs(); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 382 | |
| 383 | return ret; |
| 384 | } |
| 385 | |
| 386 | static void ccp_crypto_exit(void) |
| 387 | { |
| 388 | ccp_unregister_algs(); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 389 | } |
| 390 | |
| 391 | module_init(ccp_crypto_init); |
| 392 | module_exit(ccp_crypto_exit); |