Zain Wang | 433cd2c | 2015-11-25 13:43:32 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Crypto acceleration support for Rockchip RK3288 |
| 3 | * |
| 4 | * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd |
| 5 | * |
| 6 | * Author: Zain Wang <zain.wang@rock-chips.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify it |
| 9 | * under the terms and conditions of the GNU General Public License, |
| 10 | * version 2, as published by the Free Software Foundation. |
| 11 | * |
| 12 | * Some ideas are from marvell-cesa.c and s5p-sss.c driver. |
| 13 | */ |
| 14 | |
| 15 | #include "rk3288_crypto.h" |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/platform_device.h> |
| 18 | #include <linux/of.h> |
| 19 | #include <linux/clk.h> |
| 20 | #include <linux/crypto.h> |
| 21 | #include <linux/reset.h> |
| 22 | |
| 23 | static int rk_crypto_enable_clk(struct rk_crypto_info *dev) |
| 24 | { |
| 25 | int err; |
| 26 | |
| 27 | err = clk_prepare_enable(dev->sclk); |
| 28 | if (err) { |
| 29 | dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n", |
| 30 | __func__, __LINE__); |
| 31 | goto err_return; |
| 32 | } |
| 33 | err = clk_prepare_enable(dev->aclk); |
| 34 | if (err) { |
| 35 | dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n", |
| 36 | __func__, __LINE__); |
| 37 | goto err_aclk; |
| 38 | } |
| 39 | err = clk_prepare_enable(dev->hclk); |
| 40 | if (err) { |
| 41 | dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n", |
| 42 | __func__, __LINE__); |
| 43 | goto err_hclk; |
| 44 | } |
| 45 | err = clk_prepare_enable(dev->dmaclk); |
| 46 | if (err) { |
| 47 | dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n", |
| 48 | __func__, __LINE__); |
| 49 | goto err_dmaclk; |
| 50 | } |
| 51 | return err; |
| 52 | err_dmaclk: |
| 53 | clk_disable_unprepare(dev->hclk); |
| 54 | err_hclk: |
| 55 | clk_disable_unprepare(dev->aclk); |
| 56 | err_aclk: |
| 57 | clk_disable_unprepare(dev->sclk); |
| 58 | err_return: |
| 59 | return err; |
| 60 | } |
| 61 | |
| 62 | static void rk_crypto_disable_clk(struct rk_crypto_info *dev) |
| 63 | { |
| 64 | clk_disable_unprepare(dev->dmaclk); |
| 65 | clk_disable_unprepare(dev->hclk); |
| 66 | clk_disable_unprepare(dev->aclk); |
| 67 | clk_disable_unprepare(dev->sclk); |
| 68 | } |
| 69 | |
| 70 | static int check_alignment(struct scatterlist *sg_src, |
| 71 | struct scatterlist *sg_dst, |
| 72 | int align_mask) |
| 73 | { |
| 74 | int in, out, align; |
| 75 | |
| 76 | in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && |
| 77 | IS_ALIGNED((uint32_t)sg_src->length, align_mask); |
| 78 | if (!sg_dst) |
| 79 | return in; |
| 80 | out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && |
| 81 | IS_ALIGNED((uint32_t)sg_dst->length, align_mask); |
| 82 | align = in && out; |
| 83 | |
| 84 | return (align && (sg_src->length == sg_dst->length)); |
| 85 | } |
| 86 | |
| 87 | static int rk_load_data(struct rk_crypto_info *dev, |
| 88 | struct scatterlist *sg_src, |
| 89 | struct scatterlist *sg_dst) |
| 90 | { |
| 91 | unsigned int count; |
| 92 | |
| 93 | dev->aligned = dev->aligned ? |
| 94 | check_alignment(sg_src, sg_dst, dev->align_size) : |
| 95 | dev->aligned; |
| 96 | if (dev->aligned) { |
| 97 | count = min(dev->left_bytes, sg_src->length); |
| 98 | dev->left_bytes -= count; |
| 99 | |
| 100 | if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { |
| 101 | dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", |
| 102 | __func__, __LINE__); |
| 103 | return -EINVAL; |
| 104 | } |
| 105 | dev->addr_in = sg_dma_address(sg_src); |
| 106 | |
| 107 | if (sg_dst) { |
| 108 | if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { |
| 109 | dev_err(dev->dev, |
| 110 | "[%s:%d] dma_map_sg(dst) error\n", |
| 111 | __func__, __LINE__); |
| 112 | dma_unmap_sg(dev->dev, sg_src, 1, |
| 113 | DMA_TO_DEVICE); |
| 114 | return -EINVAL; |
| 115 | } |
| 116 | dev->addr_out = sg_dma_address(sg_dst); |
| 117 | } |
| 118 | } else { |
| 119 | count = (dev->left_bytes > PAGE_SIZE) ? |
| 120 | PAGE_SIZE : dev->left_bytes; |
| 121 | |
| 122 | if (!sg_pcopy_to_buffer(dev->first, dev->nents, |
| 123 | dev->addr_vir, count, |
| 124 | dev->total - dev->left_bytes)) { |
| 125 | dev_err(dev->dev, "[%s:%d] pcopy err\n", |
| 126 | __func__, __LINE__); |
| 127 | return -EINVAL; |
| 128 | } |
| 129 | dev->left_bytes -= count; |
| 130 | sg_init_one(&dev->sg_tmp, dev->addr_vir, count); |
| 131 | if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { |
| 132 | dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", |
| 133 | __func__, __LINE__); |
| 134 | return -ENOMEM; |
| 135 | } |
| 136 | dev->addr_in = sg_dma_address(&dev->sg_tmp); |
| 137 | |
| 138 | if (sg_dst) { |
| 139 | if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, |
| 140 | DMA_FROM_DEVICE)) { |
| 141 | dev_err(dev->dev, |
| 142 | "[%s:%d] dma_map_sg(sg_tmp) error\n", |
| 143 | __func__, __LINE__); |
| 144 | dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, |
| 145 | DMA_TO_DEVICE); |
| 146 | return -ENOMEM; |
| 147 | } |
| 148 | dev->addr_out = sg_dma_address(&dev->sg_tmp); |
| 149 | } |
| 150 | } |
| 151 | dev->count = count; |
| 152 | return 0; |
| 153 | } |
| 154 | |
| 155 | static void rk_unload_data(struct rk_crypto_info *dev) |
| 156 | { |
| 157 | struct scatterlist *sg_in, *sg_out; |
| 158 | |
| 159 | sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; |
| 160 | dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); |
| 161 | |
| 162 | if (dev->sg_dst) { |
| 163 | sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; |
| 164 | dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); |
| 165 | } |
| 166 | } |
| 167 | |
| 168 | static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) |
| 169 | { |
| 170 | struct rk_crypto_info *dev = platform_get_drvdata(dev_id); |
| 171 | u32 interrupt_status; |
| 172 | int err = 0; |
| 173 | |
| 174 | spin_lock(&dev->lock); |
| 175 | interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); |
| 176 | CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); |
| 177 | if (interrupt_status & 0x0a) { |
| 178 | dev_warn(dev->dev, "DMA Error\n"); |
| 179 | err = -EFAULT; |
| 180 | } else if (interrupt_status & 0x05) { |
| 181 | err = dev->update(dev); |
| 182 | } |
| 183 | if (err) |
| 184 | dev->complete(dev, err); |
| 185 | spin_unlock(&dev->lock); |
| 186 | return IRQ_HANDLED; |
| 187 | } |
| 188 | |
| 189 | static void rk_crypto_tasklet_cb(unsigned long data) |
| 190 | { |
| 191 | struct rk_crypto_info *dev = (struct rk_crypto_info *)data; |
| 192 | struct crypto_async_request *async_req, *backlog; |
Heiko Stuebner | ac7c8e6 | 2015-11-28 13:27:48 +0100 | [diff] [blame] | 193 | unsigned long flags; |
Zain Wang | 433cd2c | 2015-11-25 13:43:32 +0800 | [diff] [blame] | 194 | int err = 0; |
| 195 | |
Heiko Stuebner | ac7c8e6 | 2015-11-28 13:27:48 +0100 | [diff] [blame] | 196 | spin_lock_irqsave(&dev->lock, flags); |
Zain Wang | 433cd2c | 2015-11-25 13:43:32 +0800 | [diff] [blame] | 197 | backlog = crypto_get_backlog(&dev->queue); |
| 198 | async_req = crypto_dequeue_request(&dev->queue); |
Heiko Stuebner | ac7c8e6 | 2015-11-28 13:27:48 +0100 | [diff] [blame] | 199 | spin_unlock_irqrestore(&dev->lock, flags); |
Zain Wang | 433cd2c | 2015-11-25 13:43:32 +0800 | [diff] [blame] | 200 | if (!async_req) { |
| 201 | dev_err(dev->dev, "async_req is NULL !!\n"); |
| 202 | return; |
| 203 | } |
| 204 | if (backlog) { |
| 205 | backlog->complete(backlog, -EINPROGRESS); |
| 206 | backlog = NULL; |
| 207 | } |
| 208 | |
| 209 | if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER) |
| 210 | dev->ablk_req = ablkcipher_request_cast(async_req); |
| 211 | err = dev->start(dev); |
| 212 | if (err) |
| 213 | dev->complete(dev, err); |
| 214 | } |
| 215 | |
| 216 | static struct rk_crypto_tmp *rk_cipher_algs[] = { |
| 217 | &rk_ecb_aes_alg, |
| 218 | &rk_cbc_aes_alg, |
| 219 | &rk_ecb_des_alg, |
| 220 | &rk_cbc_des_alg, |
| 221 | &rk_ecb_des3_ede_alg, |
| 222 | &rk_cbc_des3_ede_alg, |
| 223 | }; |
| 224 | |
| 225 | static int rk_crypto_register(struct rk_crypto_info *crypto_info) |
| 226 | { |
| 227 | unsigned int i, k; |
| 228 | int err = 0; |
| 229 | |
| 230 | for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { |
| 231 | rk_cipher_algs[i]->dev = crypto_info; |
| 232 | err = crypto_register_alg(&rk_cipher_algs[i]->alg); |
| 233 | if (err) |
| 234 | goto err_cipher_algs; |
| 235 | } |
| 236 | return 0; |
| 237 | |
| 238 | err_cipher_algs: |
| 239 | for (k = 0; k < i; k++) |
| 240 | crypto_unregister_alg(&rk_cipher_algs[k]->alg); |
| 241 | return err; |
| 242 | } |
| 243 | |
| 244 | static void rk_crypto_unregister(void) |
| 245 | { |
| 246 | unsigned int i; |
| 247 | |
| 248 | for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) |
| 249 | crypto_unregister_alg(&rk_cipher_algs[i]->alg); |
| 250 | } |
| 251 | |
| 252 | static void rk_crypto_action(void *data) |
| 253 | { |
| 254 | struct rk_crypto_info *crypto_info = data; |
| 255 | |
| 256 | reset_control_assert(crypto_info->rst); |
| 257 | } |
| 258 | |
| 259 | static const struct of_device_id crypto_of_id_table[] = { |
| 260 | { .compatible = "rockchip,rk3288-crypto" }, |
| 261 | {} |
| 262 | }; |
| 263 | MODULE_DEVICE_TABLE(of, crypto_of_id_table); |
| 264 | |
| 265 | static int rk_crypto_probe(struct platform_device *pdev) |
| 266 | { |
| 267 | struct resource *res; |
| 268 | struct device *dev = &pdev->dev; |
| 269 | struct rk_crypto_info *crypto_info; |
| 270 | int err = 0; |
| 271 | |
| 272 | crypto_info = devm_kzalloc(&pdev->dev, |
| 273 | sizeof(*crypto_info), GFP_KERNEL); |
| 274 | if (!crypto_info) { |
| 275 | err = -ENOMEM; |
| 276 | goto err_crypto; |
| 277 | } |
| 278 | |
| 279 | crypto_info->rst = devm_reset_control_get(dev, "crypto-rst"); |
| 280 | if (IS_ERR(crypto_info->rst)) { |
| 281 | err = PTR_ERR(crypto_info->rst); |
| 282 | goto err_crypto; |
| 283 | } |
| 284 | |
| 285 | reset_control_assert(crypto_info->rst); |
| 286 | usleep_range(10, 20); |
| 287 | reset_control_deassert(crypto_info->rst); |
| 288 | |
| 289 | err = devm_add_action(dev, rk_crypto_action, crypto_info); |
| 290 | if (err) { |
| 291 | reset_control_assert(crypto_info->rst); |
| 292 | goto err_crypto; |
| 293 | } |
| 294 | |
| 295 | spin_lock_init(&crypto_info->lock); |
| 296 | |
| 297 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 298 | crypto_info->reg = devm_ioremap_resource(&pdev->dev, res); |
| 299 | if (IS_ERR(crypto_info->reg)) { |
| 300 | err = PTR_ERR(crypto_info->reg); |
| 301 | goto err_crypto; |
| 302 | } |
| 303 | |
| 304 | crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk"); |
| 305 | if (IS_ERR(crypto_info->aclk)) { |
| 306 | err = PTR_ERR(crypto_info->aclk); |
| 307 | goto err_crypto; |
| 308 | } |
| 309 | |
| 310 | crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk"); |
| 311 | if (IS_ERR(crypto_info->hclk)) { |
| 312 | err = PTR_ERR(crypto_info->hclk); |
| 313 | goto err_crypto; |
| 314 | } |
| 315 | |
| 316 | crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk"); |
| 317 | if (IS_ERR(crypto_info->sclk)) { |
| 318 | err = PTR_ERR(crypto_info->sclk); |
| 319 | goto err_crypto; |
| 320 | } |
| 321 | |
| 322 | crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk"); |
| 323 | if (IS_ERR(crypto_info->dmaclk)) { |
| 324 | err = PTR_ERR(crypto_info->dmaclk); |
| 325 | goto err_crypto; |
| 326 | } |
| 327 | |
| 328 | crypto_info->irq = platform_get_irq(pdev, 0); |
| 329 | if (crypto_info->irq < 0) { |
| 330 | dev_warn(crypto_info->dev, |
| 331 | "control Interrupt is not available.\n"); |
| 332 | err = crypto_info->irq; |
| 333 | goto err_crypto; |
| 334 | } |
| 335 | |
| 336 | err = devm_request_irq(&pdev->dev, crypto_info->irq, |
| 337 | rk_crypto_irq_handle, IRQF_SHARED, |
| 338 | "rk-crypto", pdev); |
| 339 | |
| 340 | if (err) { |
| 341 | dev_err(crypto_info->dev, "irq request failed.\n"); |
| 342 | goto err_crypto; |
| 343 | } |
| 344 | |
| 345 | crypto_info->dev = &pdev->dev; |
| 346 | platform_set_drvdata(pdev, crypto_info); |
| 347 | |
| 348 | tasklet_init(&crypto_info->crypto_tasklet, |
| 349 | rk_crypto_tasklet_cb, (unsigned long)crypto_info); |
| 350 | crypto_init_queue(&crypto_info->queue, 50); |
| 351 | |
| 352 | crypto_info->enable_clk = rk_crypto_enable_clk; |
| 353 | crypto_info->disable_clk = rk_crypto_disable_clk; |
| 354 | crypto_info->load_data = rk_load_data; |
| 355 | crypto_info->unload_data = rk_unload_data; |
| 356 | |
| 357 | err = rk_crypto_register(crypto_info); |
| 358 | if (err) { |
| 359 | dev_err(dev, "err in register alg"); |
| 360 | goto err_register_alg; |
| 361 | } |
| 362 | |
| 363 | dev_info(dev, "Crypto Accelerator successfully registered\n"); |
| 364 | return 0; |
| 365 | |
| 366 | err_register_alg: |
| 367 | tasklet_kill(&crypto_info->crypto_tasklet); |
| 368 | err_crypto: |
| 369 | return err; |
| 370 | } |
| 371 | |
| 372 | static int rk_crypto_remove(struct platform_device *pdev) |
| 373 | { |
| 374 | struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); |
| 375 | |
| 376 | rk_crypto_unregister(); |
| 377 | tasklet_kill(&crypto_tmp->crypto_tasklet); |
| 378 | return 0; |
| 379 | } |
| 380 | |
| 381 | static struct platform_driver crypto_driver = { |
| 382 | .probe = rk_crypto_probe, |
| 383 | .remove = rk_crypto_remove, |
| 384 | .driver = { |
| 385 | .name = "rk3288-crypto", |
| 386 | .of_match_table = crypto_of_id_table, |
| 387 | }, |
| 388 | }; |
| 389 | |
| 390 | module_platform_driver(crypto_driver); |
| 391 | |
| 392 | MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>"); |
| 393 | MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine"); |
| 394 | MODULE_LICENSE("GPL"); |