blob: 3a2a51d6a3e3ea7549fce3994a37c5a1a390f16c [file] [log] [blame]
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001/* Copyright (c) 2010-2014,2017 The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/* QTI Over the Air (OTA) Crypto driver */
15
16#include <linux/types.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/kernel.h>
20#include <linux/dmapool.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/fs.h>
26#include <linux/miscdevice.h>
27#include <linux/uaccess.h>
28#include <linux/debugfs.h>
29#include <linux/cache.h>
30
31
32#include <linux/qcota.h>
33#include "qce.h"
34#include "qce_ota.h"
35
36enum qce_ota_oper_enum {
37 QCE_OTA_F8_OPER = 0,
38 QCE_OTA_MPKT_F8_OPER = 1,
39 QCE_OTA_F9_OPER = 2,
40 QCE_OTA_VAR_MPKT_F8_OPER = 3,
41 QCE_OTA_OPER_LAST
42};
43
44struct ota_dev_control;
45
46struct ota_async_req {
47 struct list_head rlist;
48 struct completion complete;
49 int err;
50 enum qce_ota_oper_enum op;
51 union {
52 struct qce_f9_req f9_req;
53 struct qce_f8_req f8_req;
54 struct qce_f8_multi_pkt_req f8_mp_req;
55 struct qce_f8_varible_multi_pkt_req f8_v_mp_req;
56 } req;
57 unsigned int steps;
58 struct ota_qce_dev *pqce;
59};
60
61/*
62 * Register ourselves as a misc device to be able to access the ota
63 * from userspace.
64 */
65
66
67#define QCOTA_DEV "qcota"
68
69
70struct ota_dev_control {
71
72 /* misc device */
73 struct miscdevice miscdevice;
74 struct list_head ready_commands;
75 unsigned int magic;
76 struct list_head qce_dev;
77 spinlock_t lock;
78 struct mutex register_lock;
79 bool registered;
80 uint32_t total_units;
81};
82
83struct ota_qce_dev {
84 struct list_head qlist;
85 /* qce handle */
86 void *qce;
87
88 /* platform device */
89 struct platform_device *pdev;
90
91 struct ota_async_req *active_command;
92 struct tasklet_struct done_tasklet;
93 struct ota_dev_control *podev;
94 uint32_t unit;
95 u64 total_req;
96 u64 err_req;
97};
98
99#define OTA_MAGIC 0x4f544143
100
101static long qcota_ioctl(struct file *file,
102 unsigned int cmd, unsigned long arg);
103static int qcota_open(struct inode *inode, struct file *file);
104static int qcota_release(struct inode *inode, struct file *file);
105static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq);
106static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret);
107
108static const struct file_operations qcota_fops = {
109 .owner = THIS_MODULE,
110 .unlocked_ioctl = qcota_ioctl,
111 .open = qcota_open,
112 .release = qcota_release,
113};
114
115static struct ota_dev_control qcota_dev = {
116 .miscdevice = {
117 .minor = MISC_DYNAMIC_MINOR,
118 .name = "qcota0",
119 .fops = &qcota_fops,
120 },
121 .magic = OTA_MAGIC,
122};
123
124#define DEBUG_MAX_FNAME 16
125#define DEBUG_MAX_RW_BUF 1024
126
127struct qcota_stat {
128 u64 f8_req;
129 u64 f8_mp_req;
130 u64 f8_v_mp_req;
131 u64 f9_req;
132 u64 f8_op_success;
133 u64 f8_op_fail;
134 u64 f8_mp_op_success;
135 u64 f8_mp_op_fail;
136 u64 f8_v_mp_op_success;
137 u64 f8_v_mp_op_fail;
138 u64 f9_op_success;
139 u64 f9_op_fail;
140};
141static struct qcota_stat _qcota_stat;
142static struct dentry *_debug_dent;
143static char _debug_read_buf[DEBUG_MAX_RW_BUF];
144static int _debug_qcota;
145
146static struct ota_dev_control *qcota_control(void)
147{
148
149 return &qcota_dev;
150}
151
152static int qcota_open(struct inode *inode, struct file *file)
153{
154 struct ota_dev_control *podev;
155
156 podev = qcota_control();
157 if (podev == NULL) {
158 pr_err("%s: no such device %d\n", __func__,
159 MINOR(inode->i_rdev));
160 return -ENOENT;
161 }
162
163 file->private_data = podev;
164
165 return 0;
166}
167
168static int qcota_release(struct inode *inode, struct file *file)
169{
170 struct ota_dev_control *podev;
171
172 podev = file->private_data;
173
174 if (podev != NULL && podev->magic != OTA_MAGIC) {
175 pr_err("%s: invalid handle %p\n",
176 __func__, podev);
177 }
178
179 file->private_data = NULL;
180
181 return 0;
182}
183
184static bool _next_v_mp_req(struct ota_async_req *areq)
185{
186 unsigned char *p;
187
188 if (areq->err)
189 return false;
190 if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt)
191 return false;
192
193 p = areq->req.f8_v_mp_req.qce_f8_req.data_in;
194 p += areq->req.f8_v_mp_req.qce_f8_req.data_len;
195 p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES);
196
197 areq->req.f8_v_mp_req.qce_f8_req.data_out = p;
198 areq->req.f8_v_mp_req.qce_f8_req.data_in = p;
199 areq->req.f8_v_mp_req.qce_f8_req.data_len =
200 areq->req.f8_v_mp_req.cipher_iov[areq->steps].size;
201
202 areq->req.f8_v_mp_req.qce_f8_req.count_c++;
203 return true;
204}
205
206static void req_done(unsigned long data)
207{
208 struct ota_qce_dev *pqce = (struct ota_qce_dev *)data;
209 struct ota_dev_control *podev = pqce->podev;
210 struct ota_async_req *areq;
211 unsigned long flags;
212 struct ota_async_req *new_req = NULL;
213 int ret = 0;
214 bool schedule = true;
215
216 spin_lock_irqsave(&podev->lock, flags);
217 areq = pqce->active_command;
218 if (unlikely(areq == NULL))
219 pr_err("ota_crypto: req_done, no active request\n");
220 else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) {
221 if (_next_v_mp_req(areq)) {
222 /* execute next subcommand */
223 spin_unlock_irqrestore(&podev->lock, flags);
224 ret = start_req(pqce, areq);
225 if (unlikely(ret)) {
226 areq->err = ret;
227 schedule = true;
228 spin_lock_irqsave(&podev->lock, flags);
229 } else {
230 areq = NULL;
231 schedule = false;
232 }
233 } else {
234 /* done with this variable mp req */
235 schedule = true;
236 }
237 }
238 while (schedule) {
239 if (!list_empty(&podev->ready_commands)) {
240 new_req = container_of(podev->ready_commands.next,
241 struct ota_async_req, rlist);
242 list_del(&new_req->rlist);
243 pqce->active_command = new_req;
244 spin_unlock_irqrestore(&podev->lock, flags);
245
246 new_req->err = 0;
247 /* start a new request */
248 ret = start_req(pqce, new_req);
249 if (unlikely(new_req && ret)) {
250 new_req->err = ret;
251 complete(&new_req->complete);
252 ret = 0;
253 new_req = NULL;
254 spin_lock_irqsave(&podev->lock, flags);
255 } else {
256 schedule = false;
257 }
258 } else {
259 pqce->active_command = NULL;
260 spin_unlock_irqrestore(&podev->lock, flags);
261 schedule = false;
262 };
263 }
264 if (areq)
265 complete(&areq->complete);
266}
267
268static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
269 int ret)
270{
271 struct ota_async_req *areq = (struct ota_async_req *) cookie;
272 struct ota_qce_dev *pqce;
273
274 pqce = areq->pqce;
275 areq->req.f9_req.mac_i = *((uint32_t *)icv);
276
277 if (ret) {
278 pqce->err_req++;
279 areq->err = -ENXIO;
280 } else
281 areq->err = 0;
282
283 tasklet_schedule(&pqce->done_tasklet);
284}
285
286static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
287 int ret)
288{
289 struct ota_async_req *areq = (struct ota_async_req *) cookie;
290 struct ota_qce_dev *pqce;
291
292 pqce = areq->pqce;
293
294 if (ret) {
295 pqce->err_req++;
296 areq->err = -ENXIO;
297 } else {
298 areq->err = 0;
299 }
300
301 tasklet_schedule(&pqce->done_tasklet);
302}
303
304static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq)
305{
306 struct qce_f9_req *pf9;
307 struct qce_f8_multi_pkt_req *p_mp_f8;
308 struct qce_f8_req *pf8;
309 int ret = 0;
310
311 /* command should be on the podev->active_command */
312 areq->pqce = pqce;
313
314 switch (areq->op) {
315 case QCE_OTA_F8_OPER:
316 pf8 = &areq->req.f8_req;
317 ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
318 break;
319 case QCE_OTA_MPKT_F8_OPER:
320 p_mp_f8 = &areq->req.f8_mp_req;
321 ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb);
322 break;
323
324 case QCE_OTA_F9_OPER:
325 pf9 = &areq->req.f9_req;
326 ret = qce_f9_req(pqce->qce, pf9, areq, f9_cb);
327 break;
328
329 case QCE_OTA_VAR_MPKT_F8_OPER:
330 pf8 = &areq->req.f8_v_mp_req.qce_f8_req;
331 ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
332 break;
333
334 default:
335 ret = -ENOTSUPP;
336 break;
337 };
338 areq->err = ret;
339 pqce->total_req++;
340 if (ret)
341 pqce->err_req++;
342 return ret;
343}
344
345static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev)
346{
347 /* do this function with spinlock set */
348 struct ota_qce_dev *p;
349
350 if (unlikely(list_empty(&podev->qce_dev))) {
351 pr_err("%s: no valid qce to schedule\n", __func__);
352 return NULL;
353 }
354
355 list_for_each_entry(p, &podev->qce_dev, qlist) {
356 if (p->active_command == NULL)
357 return p;
358 }
359 return NULL;
360}
361
362static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
363{
364 unsigned long flags;
365 int ret = 0;
366 struct qcota_stat *pstat;
367 struct ota_qce_dev *pqce;
368
369 areq->err = 0;
370
371 spin_lock_irqsave(&podev->lock, flags);
372 pqce = schedule_qce(podev);
373 if (pqce) {
374 pqce->active_command = areq;
375 spin_unlock_irqrestore(&podev->lock, flags);
376
377 ret = start_req(pqce, areq);
378 if (ret != 0) {
379 spin_lock_irqsave(&podev->lock, flags);
380 pqce->active_command = NULL;
381 spin_unlock_irqrestore(&podev->lock, flags);
382 }
383
384 } else {
385 list_add_tail(&areq->rlist, &podev->ready_commands);
386 spin_unlock_irqrestore(&podev->lock, flags);
387 }
388
389 if (ret == 0)
390 wait_for_completion(&areq->complete);
391
392 pstat = &_qcota_stat;
393 switch (areq->op) {
394 case QCE_OTA_F8_OPER:
395 if (areq->err)
396 pstat->f8_op_fail++;
397 else
398 pstat->f8_op_success++;
399 break;
400
401 case QCE_OTA_MPKT_F8_OPER:
402
403 if (areq->err)
404 pstat->f8_mp_op_fail++;
405 else
406 pstat->f8_mp_op_success++;
407 break;
408
409 case QCE_OTA_F9_OPER:
410 if (areq->err)
411 pstat->f9_op_fail++;
412 else
413 pstat->f9_op_success++;
414 break;
415 case QCE_OTA_VAR_MPKT_F8_OPER:
416 default:
417 if (areq->err)
418 pstat->f8_v_mp_op_fail++;
419 else
420 pstat->f8_v_mp_op_success++;
421 break;
422 };
423
424 return areq->err;
425}
426
427static long qcota_ioctl(struct file *file,
428 unsigned int cmd, unsigned long arg)
429{
430 int err = 0;
431 struct ota_dev_control *podev;
432 uint8_t *user_src;
433 uint8_t *user_dst;
434 uint8_t *k_buf = NULL;
435 struct ota_async_req areq;
436 uint32_t total, temp;
437 struct qcota_stat *pstat;
438 int i;
439 uint8_t *p = NULL;
440
441 podev = file->private_data;
442 if (podev == NULL || podev->magic != OTA_MAGIC) {
443 pr_err("%s: invalid handle %p\n",
444 __func__, podev);
445 return -ENOENT;
446 }
447
448 /* Verify user arguments. */
449 if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
450 return -ENOTTY;
451
452 init_completion(&areq.complete);
453
454 pstat = &_qcota_stat;
455
456 switch (cmd) {
457 case QCOTA_F9_REQ:
458 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
459 sizeof(struct qce_f9_req)))
460 return -EFAULT;
461 if (__copy_from_user(&areq.req.f9_req, (void __user *)arg,
462 sizeof(struct qce_f9_req)))
463 return -EFAULT;
464
465 user_src = areq.req.f9_req.message;
466 if (!access_ok(VERIFY_READ, (void __user *)user_src,
467 areq.req.f9_req.msize))
468 return -EFAULT;
469
470 if (areq.req.f9_req.msize == 0)
471 return 0;
472 k_buf = kmalloc(areq.req.f9_req.msize, GFP_KERNEL);
473 if (k_buf == NULL)
474 return -ENOMEM;
475
476 if (__copy_from_user(k_buf, (void __user *)user_src,
477 areq.req.f9_req.msize)) {
478 kfree(k_buf);
479 return -EFAULT;
480 }
481
482 areq.req.f9_req.message = k_buf;
483 areq.op = QCE_OTA_F9_OPER;
484
485 pstat->f9_req++;
486 err = submit_req(&areq, podev);
487
488 areq.req.f9_req.message = user_src;
489 if (err == 0 && __copy_to_user((void __user *)arg,
490 &areq.req.f9_req, sizeof(struct qce_f9_req))) {
491 err = -EFAULT;
492 }
493 kfree(k_buf);
494 break;
495
496 case QCOTA_F8_REQ:
497 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
498 sizeof(struct qce_f8_req)))
499 return -EFAULT;
500 if (__copy_from_user(&areq.req.f8_req, (void __user *)arg,
501 sizeof(struct qce_f8_req)))
502 return -EFAULT;
503 total = areq.req.f8_req.data_len;
504 user_src = areq.req.f8_req.data_in;
505 if (user_src != NULL) {
506 if (!access_ok(VERIFY_READ, (void __user *)
507 user_src, total))
508 return -EFAULT;
509
510 };
511
512 user_dst = areq.req.f8_req.data_out;
513 if (!access_ok(VERIFY_WRITE, (void __user *)
514 user_dst, total))
515 return -EFAULT;
516
517 if (!total)
518 return 0;
519 k_buf = kmalloc(total, GFP_KERNEL);
520 if (k_buf == NULL)
521 return -ENOMEM;
522
523 /* k_buf returned from kmalloc should be cache line aligned */
524 if (user_src && __copy_from_user(k_buf,
525 (void __user *)user_src, total)) {
526 kfree(k_buf);
527 return -EFAULT;
528 }
529
530 if (user_src)
531 areq.req.f8_req.data_in = k_buf;
532 else
533 areq.req.f8_req.data_in = NULL;
534 areq.req.f8_req.data_out = k_buf;
535
536 areq.op = QCE_OTA_F8_OPER;
537
538 pstat->f8_req++;
539 err = submit_req(&areq, podev);
540
541 if (err == 0 && __copy_to_user(user_dst, k_buf, total))
542 err = -EFAULT;
543 kfree(k_buf);
544
545 break;
546
547 case QCOTA_F8_MPKT_REQ:
548 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
549 sizeof(struct qce_f8_multi_pkt_req)))
550 return -EFAULT;
551 if (__copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
552 sizeof(struct qce_f8_multi_pkt_req)))
553 return -EFAULT;
554 temp = areq.req.f8_mp_req.qce_f8_req.data_len;
555 if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start +
556 areq.req.f8_mp_req.cipher_size)
557 return -EINVAL;
558 total = (uint32_t) areq.req.f8_mp_req.num_pkt *
559 areq.req.f8_mp_req.qce_f8_req.data_len;
560
561 user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
562 if (!access_ok(VERIFY_READ, (void __user *)
563 user_src, total))
564 return -EFAULT;
565
566 user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
567 if (!access_ok(VERIFY_WRITE, (void __user *)
568 user_dst, total))
569 return -EFAULT;
570
571 if (!total)
572 return 0;
573 k_buf = kmalloc(total, GFP_KERNEL);
574 if (k_buf == NULL)
575 return -ENOMEM;
576 /* k_buf returned from kmalloc should be cache line aligned */
577 if (__copy_from_user(k_buf, (void __user *)user_src, total)) {
578 kfree(k_buf);
579
580 return -EFAULT;
581 }
582
583 areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
584 areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
585
586 areq.op = QCE_OTA_MPKT_F8_OPER;
587
588 pstat->f8_mp_req++;
589 err = submit_req(&areq, podev);
590
591 if (err == 0 && __copy_to_user(user_dst, k_buf, total))
592 err = -EFAULT;
593 kfree(k_buf);
594 break;
595
596 case QCOTA_F8_V_MPKT_REQ:
597 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
598 sizeof(struct qce_f8_varible_multi_pkt_req)))
599 return -EFAULT;
600 if (__copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg,
601 sizeof(struct qce_f8_varible_multi_pkt_req)))
602 return -EFAULT;
603
604 if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT)
605 return -EINVAL;
606
607 for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) {
608 if (!access_ok(VERIFY_WRITE, (void __user *)
609 areq.req.f8_v_mp_req.cipher_iov[i].addr,
610 areq.req.f8_v_mp_req.cipher_iov[i].size))
611 return -EFAULT;
612 total += areq.req.f8_v_mp_req.cipher_iov[i].size;
613 total = ALIGN(total, L1_CACHE_BYTES);
614 }
615
616 if (!total)
617 return 0;
618 k_buf = kmalloc(total, GFP_KERNEL);
619 if (k_buf == NULL)
620 return -ENOMEM;
621
622 for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
623 user_src = areq.req.f8_v_mp_req.cipher_iov[i].addr;
624 if (__copy_from_user(p, (void __user *)user_src,
625 areq.req.f8_v_mp_req.cipher_iov[i].size)) {
626 kfree(k_buf);
627 return -EFAULT;
628 }
629 p += areq.req.f8_v_mp_req.cipher_iov[i].size;
630 p = (uint8_t *) ALIGN(((uintptr_t)p),
631 L1_CACHE_BYTES);
632 }
633
634 areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf;
635 areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf;
636 areq.req.f8_v_mp_req.qce_f8_req.data_len =
637 areq.req.f8_v_mp_req.cipher_iov[0].size;
638 areq.steps = 0;
639 areq.op = QCE_OTA_VAR_MPKT_F8_OPER;
640
641 pstat->f8_v_mp_req++;
642 err = submit_req(&areq, podev);
643
644 if (err != 0) {
645 kfree(k_buf);
646 return err;
647 }
648
649 for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
650 user_dst = areq.req.f8_v_mp_req.cipher_iov[i].addr;
651 if (__copy_to_user(user_dst, p,
652 areq.req.f8_v_mp_req.cipher_iov[i].size)) {
653 kfree(k_buf);
654 return -EFAULT;
655 }
656 p += areq.req.f8_v_mp_req.cipher_iov[i].size;
657 p = (uint8_t *) ALIGN(((uintptr_t)p),
658 L1_CACHE_BYTES);
659 }
660 kfree(k_buf);
661 break;
662 default:
663 return -ENOTTY;
664 }
665
666 return err;
667}
668
669static int qcota_probe(struct platform_device *pdev)
670{
671 void *handle = NULL;
672 int rc = 0;
673 struct ota_dev_control *podev;
674 struct ce_hw_support ce_support;
675 struct ota_qce_dev *pqce;
676 unsigned long flags;
677
678 podev = &qcota_dev;
679 pqce = kzalloc(sizeof(*pqce), GFP_KERNEL);
680 if (!pqce) {
681 pr_err("qcota_probe: Memory allocation FAIL\n");
682 return -ENOMEM;
683 }
684
685 pqce->podev = podev;
686 pqce->active_command = NULL;
687 tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce);
688
689 /* open qce */
690 handle = qce_open(pdev, &rc);
691 if (handle == NULL) {
692 pr_err("%s: device %s, can not open qce\n",
693 __func__, pdev->name);
694 goto err;
695 }
696 if (qce_hw_support(handle, &ce_support) < 0 ||
697 ce_support.ota == false) {
698 pr_err("%s: device %s, qce does not support ota capability\n",
699 __func__, pdev->name);
700 rc = -ENODEV;
701 goto err;
702 }
703 pqce->qce = handle;
704 pqce->pdev = pdev;
705 pqce->total_req = 0;
706 pqce->err_req = 0;
707 platform_set_drvdata(pdev, pqce);
708
709 mutex_lock(&podev->register_lock);
710 rc = 0;
711 if (podev->registered == false) {
712 rc = misc_register(&podev->miscdevice);
713 if (rc == 0) {
714 pqce->unit = podev->total_units;
715 podev->total_units++;
716 podev->registered = true;
717 };
718 } else {
719 pqce->unit = podev->total_units;
720 podev->total_units++;
721 }
722 mutex_unlock(&podev->register_lock);
723 if (rc) {
724 pr_err("ion: failed to register misc device.\n");
725 goto err;
726 }
727
728 spin_lock_irqsave(&podev->lock, flags);
729 list_add_tail(&pqce->qlist, &podev->qce_dev);
730 spin_unlock_irqrestore(&podev->lock, flags);
731
732 return 0;
733err:
734 if (handle)
735 qce_close(handle);
736
737 platform_set_drvdata(pdev, NULL);
738 tasklet_kill(&pqce->done_tasklet);
739 kfree(pqce);
740 return rc;
741}
742
743static int qcota_remove(struct platform_device *pdev)
744{
745 struct ota_dev_control *podev;
746 struct ota_qce_dev *pqce;
747 unsigned long flags;
748
749 pqce = platform_get_drvdata(pdev);
750 if (!pqce)
751 return 0;
752 if (pqce->qce)
753 qce_close(pqce->qce);
754
755 podev = pqce->podev;
756 if (!podev)
757 goto ret;
758
759 spin_lock_irqsave(&podev->lock, flags);
760 list_del(&pqce->qlist);
761 spin_unlock_irqrestore(&podev->lock, flags);
762
763 mutex_lock(&podev->register_lock);
764 if (--podev->total_units == 0) {
765 if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
766 misc_deregister(&podev->miscdevice);
767 podev->registered = false;
768 }
769 mutex_unlock(&podev->register_lock);
770ret:
771
772 tasklet_kill(&pqce->done_tasklet);
773 kfree(pqce);
774 return 0;
775}
776
777static const struct of_device_id qcota_match[] = {
778 { .compatible = "qcom,qcota",
779 },
780 {}
781};
782
783static struct platform_driver qcota_plat_driver = {
784 .probe = qcota_probe,
785 .remove = qcota_remove,
786 .driver = {
787 .name = "qcota",
788 .owner = THIS_MODULE,
789 .of_match_table = qcota_match,
790 },
791};
792
793static int _disp_stats(void)
794{
795 struct qcota_stat *pstat;
796 int len = 0;
797 struct ota_dev_control *podev = &qcota_dev;
798 unsigned long flags;
799 struct ota_qce_dev *p;
800
801 pstat = &_qcota_stat;
802 len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
803 "\nQTI OTA crypto accelerator Statistics:\n");
804
805 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
806 " F8 request : %llu\n",
807 pstat->f8_req);
808 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
809 " F8 operation success : %llu\n",
810 pstat->f8_op_success);
811 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
812 " F8 operation fail : %llu\n",
813 pstat->f8_op_fail);
814
815 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
816 " F8 MP request : %llu\n",
817 pstat->f8_mp_req);
818 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
819 " F8 MP operation success : %llu\n",
820 pstat->f8_mp_op_success);
821 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
822 " F8 MP operation fail : %llu\n",
823 pstat->f8_mp_op_fail);
824
825 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
826 " F8 Variable MP request : %llu\n",
827 pstat->f8_v_mp_req);
828 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
829 " F8 Variable MP operation success: %llu\n",
830 pstat->f8_v_mp_op_success);
831 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
832 " F8 Variable MP operation fail : %llu\n",
833 pstat->f8_v_mp_op_fail);
834
835 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
836 " F9 request : %llu\n",
837 pstat->f9_req);
838 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
839 " F9 operation success : %llu\n",
840 pstat->f9_op_success);
841 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
842 " F9 operation fail : %llu\n",
843 pstat->f9_op_fail);
844
845 spin_lock_irqsave(&podev->lock, flags);
846
847 list_for_each_entry(p, &podev->qce_dev, qlist) {
848 len += scnprintf(
849 _debug_read_buf + len,
850 DEBUG_MAX_RW_BUF - len - 1,
851 " Engine %4d Req : %llu\n",
852 p->unit,
853 p->total_req
854 );
855 len += scnprintf(
856 _debug_read_buf + len,
857 DEBUG_MAX_RW_BUF - len - 1,
858 " Engine %4d Req Error : %llu\n",
859 p->unit,
860 p->err_req
861 );
862 }
863
864 spin_unlock_irqrestore(&podev->lock, flags);
865
866 return len;
867}
868
869static int _debug_stats_open(struct inode *inode, struct file *file)
870{
871 file->private_data = inode->i_private;
872 return 0;
873}
874
875static ssize_t _debug_stats_read(struct file *file, char __user *buf,
876 size_t count, loff_t *ppos)
877{
878 int rc = -EINVAL;
879 int len;
880
881 len = _disp_stats();
882 if (len <= count)
883 rc = simple_read_from_buffer((void __user *) buf, len,
884 ppos, (void *) _debug_read_buf, len);
885
886 return rc;
887}
888
889static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
890 size_t count, loff_t *ppos)
891{
892 struct ota_dev_control *podev = &qcota_dev;
893 unsigned long flags;
894 struct ota_qce_dev *p;
895
896 memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat));
897
898 spin_lock_irqsave(&podev->lock, flags);
899
900 list_for_each_entry(p, &podev->qce_dev, qlist) {
901 p->total_req = 0;
902 p->err_req = 0;
903 }
904
905 spin_unlock_irqrestore(&podev->lock, flags);
906
907 return count;
908}
909
910static const struct file_operations _debug_stats_ops = {
911 .open = _debug_stats_open,
912 .read = _debug_stats_read,
913 .write = _debug_stats_write,
914};
915
916static int _qcota_debug_init(void)
917{
918 int rc;
919 char name[DEBUG_MAX_FNAME];
920 struct dentry *dent;
921
922 _debug_dent = debugfs_create_dir("qcota", NULL);
923 if (IS_ERR(_debug_dent)) {
924 pr_err("qcota debugfs_create_dir fail, error %ld\n",
925 PTR_ERR(_debug_dent));
926 return PTR_ERR(_debug_dent);
927 }
928
929 snprintf(name, DEBUG_MAX_FNAME-1, "stats-0");
930 _debug_qcota = 0;
931 dent = debugfs_create_file(name, 0644, _debug_dent,
932 &_debug_qcota, &_debug_stats_ops);
933 if (dent == NULL) {
934 pr_err("qcota debugfs_create_file fail, error %ld\n",
935 PTR_ERR(dent));
936 rc = PTR_ERR(dent);
937 goto err;
938 }
939 return 0;
940err:
941 debugfs_remove_recursive(_debug_dent);
942 return rc;
943}
944
945static int __init qcota_init(void)
946{
947 int rc;
948 struct ota_dev_control *podev;
949
950 rc = _qcota_debug_init();
951 if (rc)
952 return rc;
953
954 podev = &qcota_dev;
955 INIT_LIST_HEAD(&podev->ready_commands);
956 INIT_LIST_HEAD(&podev->qce_dev);
957 spin_lock_init(&podev->lock);
958 mutex_init(&podev->register_lock);
959 podev->registered = false;
960 podev->total_units = 0;
961
962 return platform_driver_register(&qcota_plat_driver);
963}
964static void __exit qcota_exit(void)
965{
966 debugfs_remove_recursive(_debug_dent);
967 platform_driver_unregister(&qcota_plat_driver);
968}
969
970MODULE_LICENSE("GPL v2");
971MODULE_DESCRIPTION("QTI Ota Crypto driver");
972
973module_init(qcota_init);
974module_exit(qcota_exit);