blob: 33b8d3d9b0af461fd9bbb75950316d9b54012d85 [file] [log] [blame]
Neeraj Sonic692cb92018-04-18 17:20:22 +05301/*
2 * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14/*
15 * PFK Key Cache
16 *
17 * Key Cache used internally in PFK.
18 * The purpose of the cache is to save access time to QSEE when loading keys.
19 * Currently the cache is the same size as the total number of keys that can
20 * be loaded to ICE. Since this number is relatively small, the algorithms for
21 * cache eviction are simple, linear and based on last usage timestamp, i.e
22 * the node that will be evicted is the one with the oldest timestamp.
23 * Empty entries always have the oldest timestamp.
24 */
25
26#include <linux/module.h>
27#include <linux/mutex.h>
28#include <linux/spinlock.h>
29#include <crypto/ice.h>
30#include <linux/errno.h>
31#include <linux/string.h>
32#include <linux/jiffies.h>
33#include <linux/slab.h>
34#include <linux/printk.h>
35#include <linux/sched.h>
36
37#include "pfk_kc.h"
38#include "pfk_ice.h"
39
40
41/** the first available index in ice engine */
42#define PFK_KC_STARTING_INDEX 2
43
44/** currently the only supported key and salt sizes */
45#define PFK_KC_KEY_SIZE 32
46#define PFK_KC_SALT_SIZE 32
47
48/** Table size */
49/* TODO replace by some constant from ice.h */
50#define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX))
51
52/** The maximum key and salt size */
53#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
54#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
55#define PFK_UFS "ufs"
56
57static DEFINE_SPINLOCK(kc_lock);
58static unsigned long flags;
59static bool kc_ready;
60static char *s_type = "sdcc";
61
62/**
63 * enum pfk_kc_entry_state - state of the entry inside kc table
64 *
65 * @FREE: entry is free
66 * @ACTIVE_ICE_PRELOAD: entry is actively used by ICE engine
67 and cannot be used by others. SCM call
68 to load key to ICE is pending to be performed
69 * @ACTIVE_ICE_LOADED: entry is actively used by ICE engine and
70 cannot be used by others. SCM call to load the
71 key to ICE was successfully executed and key is
72 now loaded
73 * @INACTIVE_INVALIDATING: entry is being invalidated during file close
74 and cannot be used by others until invalidation
75 is complete
76 * @INACTIVE: entry's key is already loaded, but is not
77 currently being used. It can be re-used for
78 optimization and to avoid SCM call cost or
79 it can be taken by another key if there are
80 no FREE entries
81 * @SCM_ERROR: error occurred while scm call was performed to
82 load the key to ICE
83 */
84enum pfk_kc_entry_state {
85 FREE,
86 ACTIVE_ICE_PRELOAD,
87 ACTIVE_ICE_LOADED,
88 INACTIVE_INVALIDATING,
89 INACTIVE,
90 SCM_ERROR
91};
92
93struct kc_entry {
94 unsigned char key[PFK_MAX_KEY_SIZE];
95 size_t key_size;
96
97 unsigned char salt[PFK_MAX_SALT_SIZE];
98 size_t salt_size;
99
100 u64 time_stamp;
101 u32 key_index;
102
103 struct task_struct *thread_pending;
104
105 enum pfk_kc_entry_state state;
106
107 /* ref count for the number of requests in the HW queue for this key */
108 int loaded_ref_cnt;
109 int scm_error;
110};
111
112static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
113
114/**
115 * kc_is_ready() - driver is initialized and ready.
116 *
117 * Return: true if the key cache is ready.
118 */
119static inline bool kc_is_ready(void)
120{
121 return kc_ready;
122}
123
124static inline void kc_spin_lock(void)
125{
126 spin_lock_irqsave(&kc_lock, flags);
127}
128
129static inline void kc_spin_unlock(void)
130{
131 spin_unlock_irqrestore(&kc_lock, flags);
132}
133
134/**
Neeraj Soniefb33112018-08-17 20:39:35 +0530135 * pfk_kc_get_storage_type() - return the hardware storage type.
136 *
137 * Return: storage type queried during bootup.
138 */
139const char *pfk_kc_get_storage_type(void)
140{
141 return s_type;
142}
143
144/**
Neeraj Sonic692cb92018-04-18 17:20:22 +0530145 * kc_entry_is_available() - checks whether the entry is available
146 *
147 * Return true if it is , false otherwise or if invalid
148 * Should be invoked under spinlock
149 */
150static bool kc_entry_is_available(const struct kc_entry *entry)
151{
152 if (!entry)
153 return false;
154
155 return (entry->state == FREE || entry->state == INACTIVE);
156}
157
158/**
159 * kc_entry_wait_till_available() - waits till entry is available
160 *
161 * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
162 * by signal
163 *
164 * Should be invoked under spinlock
165 */
166static int kc_entry_wait_till_available(struct kc_entry *entry)
167{
168 int res = 0;
169
170 while (!kc_entry_is_available(entry)) {
171 set_current_state(TASK_INTERRUPTIBLE);
172 if (signal_pending(current)) {
173 res = -ERESTARTSYS;
174 break;
175 }
176 /* assuming only one thread can try to invalidate
177 * the same entry
178 */
179 entry->thread_pending = current;
180 kc_spin_unlock();
181 schedule();
182 kc_spin_lock();
183 }
184 set_current_state(TASK_RUNNING);
185
186 return res;
187}
188
189/**
190 * kc_entry_start_invalidating() - moves entry to state
191 * INACTIVE_INVALIDATING
192 * If entry is in use, waits till
193 * it gets available
194 * @entry: pointer to entry
195 *
196 * Return 0 in case of success, otherwise error
197 * Should be invoked under spinlock
198 */
199static int kc_entry_start_invalidating(struct kc_entry *entry)
200{
201 int res;
202
203 res = kc_entry_wait_till_available(entry);
204 if (res)
205 return res;
206
207 entry->state = INACTIVE_INVALIDATING;
208
209 return 0;
210}
211
212/**
213 * kc_entry_finish_invalidating() - moves entry to state FREE
214 * wakes up all the tasks waiting
215 * on it
216 *
217 * @entry: pointer to entry
218 *
219 * Return 0 in case of success, otherwise error
220 * Should be invoked under spinlock
221 */
222static void kc_entry_finish_invalidating(struct kc_entry *entry)
223{
224 if (!entry)
225 return;
226
227 if (entry->state != INACTIVE_INVALIDATING)
228 return;
229
230 entry->state = FREE;
231}
232
233/**
234 * kc_min_entry() - compare two entries to find one with minimal time
235 * @a: ptr to the first entry. If NULL the other entry will be returned
236 * @b: pointer to the second entry
237 *
238 * Return the entry which timestamp is the minimal, or b if a is NULL
239 */
240static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
241 struct kc_entry *b)
242{
243 if (!a)
244 return b;
245
246 if (time_before64(b->time_stamp, a->time_stamp))
247 return b;
248
249 return a;
250}
251
252/**
253 * kc_entry_at_index() - return entry at specific index
254 * @index: index of entry to be accessed
255 *
256 * Return entry
257 * Should be invoked under spinlock
258 */
259static struct kc_entry *kc_entry_at_index(int index)
260{
261 return &(kc_table[index]);
262}
263
264/**
265 * kc_find_key_at_index() - find kc entry starting at specific index
266 * @key: key to look for
267 * @key_size: the key size
268 * @salt: salt to look for
269 * @salt_size: the salt size
270 * @sarting_index: index to start search with, if entry found, updated with
271 * index of that entry
272 *
273 * Return entry or NULL in case of error
274 * Should be invoked under spinlock
275 */
276static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
277 size_t key_size, const unsigned char *salt, size_t salt_size,
278 int *starting_index)
279{
280 struct kc_entry *entry = NULL;
281 int i = 0;
282
283 for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
284 entry = kc_entry_at_index(i);
285
286 if (salt != NULL) {
287 if (entry->salt_size != salt_size)
288 continue;
289
290 if (memcmp(entry->salt, salt, salt_size) != 0)
291 continue;
292 }
293
294 if (entry->key_size != key_size)
295 continue;
296
297 if (memcmp(entry->key, key, key_size) == 0) {
298 *starting_index = i;
299 return entry;
300 }
301 }
302
303 return NULL;
304}
305
306/**
307 * kc_find_key() - find kc entry
308 * @key: key to look for
309 * @key_size: the key size
310 * @salt: salt to look for
311 * @salt_size: the salt size
312 *
313 * Return entry or NULL in case of error
314 * Should be invoked under spinlock
315 */
316static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
317 const unsigned char *salt, size_t salt_size)
318{
319 int index = 0;
320
321 return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
322}
323
324/**
325 * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
326 * that is not locked
327 *
328 * Returns entry with minimal timestamp. Empty entries have timestamp
329 * of 0, therefore they are returned first.
330 * If all the entries are locked, will return NULL
331 * Should be invoked under spin lock
332 */
333static struct kc_entry *kc_find_oldest_entry_non_locked(void)
334{
335 struct kc_entry *curr_min_entry = NULL;
336 struct kc_entry *entry = NULL;
337 int i = 0;
338
339 for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
340 entry = kc_entry_at_index(i);
341
342 if (entry->state == FREE)
343 return entry;
344
345 if (entry->state == INACTIVE)
346 curr_min_entry = kc_min_entry(curr_min_entry, entry);
347 }
348
349 return curr_min_entry;
350}
351
352/**
353 * kc_update_timestamp() - updates timestamp of entry to current
354 *
355 * @entry: entry to update
356 *
357 */
358static void kc_update_timestamp(struct kc_entry *entry)
359{
360 if (!entry)
361 return;
362
363 entry->time_stamp = get_jiffies_64();
364}
365
366/**
367 * kc_clear_entry() - clear the key from entry and mark entry not in use
368 *
369 * @entry: pointer to entry
370 *
371 * Should be invoked under spinlock
372 */
373static void kc_clear_entry(struct kc_entry *entry)
374{
375 if (!entry)
376 return;
377
378 memset(entry->key, 0, entry->key_size);
379 memset(entry->salt, 0, entry->salt_size);
380
381 entry->key_size = 0;
382 entry->salt_size = 0;
383
384 entry->time_stamp = 0;
385 entry->scm_error = 0;
386
387 entry->state = FREE;
388
389 entry->loaded_ref_cnt = 0;
390 entry->thread_pending = NULL;
391}
392
393/**
394 * kc_update_entry() - replaces the key in given entry and
395 * loads the new key to ICE
396 *
397 * @entry: entry to replace key in
398 * @key: key
399 * @key_size: key_size
400 * @salt: salt
401 * @salt_size: salt_size
Neeraj Soniefb33112018-08-17 20:39:35 +0530402 * @data_unit: dun size
Neeraj Sonic692cb92018-04-18 17:20:22 +0530403 *
404 * The previous key is securely released and wiped, the new one is loaded
405 * to ICE.
406 * Should be invoked under spinlock
407 */
408static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
Neeraj Soniefb33112018-08-17 20:39:35 +0530409 size_t key_size, const unsigned char *salt, size_t salt_size,
Neeraj Sonie3af8d52018-09-05 14:32:49 +0530410 unsigned int data_unit, int ice_rev)
Neeraj Sonic692cb92018-04-18 17:20:22 +0530411{
412 int ret;
413
414 kc_clear_entry(entry);
415
416 memcpy(entry->key, key, key_size);
417 entry->key_size = key_size;
418
419 memcpy(entry->salt, salt, salt_size);
420 entry->salt_size = salt_size;
421
422 /* Mark entry as no longer free before releasing the lock */
423 entry->state = ACTIVE_ICE_PRELOAD;
424 kc_spin_unlock();
425
426 ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
Neeraj Sonie3af8d52018-09-05 14:32:49 +0530427 entry->salt, s_type, data_unit, ice_rev);
Neeraj Sonic692cb92018-04-18 17:20:22 +0530428
429 kc_spin_lock();
430 return ret;
431}
432
433/**
434 * pfk_kc_init() - init function
435 *
436 * Return 0 in case of success, error otherwise
437 */
438int pfk_kc_init(void)
439{
440 int i = 0;
441 struct kc_entry *entry = NULL;
442
443 kc_spin_lock();
444 for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
445 entry = kc_entry_at_index(i);
446 entry->key_index = PFK_KC_STARTING_INDEX + i;
447 }
448 kc_ready = true;
449 kc_spin_unlock();
Neeraj Soni36c65122018-04-18 21:04:46 +0530450
Neeraj Sonic692cb92018-04-18 17:20:22 +0530451 return 0;
452}
453
454/**
455 * pfk_kc_denit() - deinit function
456 *
457 * Return 0 in case of success, error otherwise
458 */
459int pfk_kc_deinit(void)
460{
461 int res = pfk_kc_clear();
Neeraj Sonic692cb92018-04-18 17:20:22 +0530462 kc_ready = false;
Neeraj Soni36c65122018-04-18 21:04:46 +0530463
Neeraj Sonic692cb92018-04-18 17:20:22 +0530464 return res;
465}
466
467/**
468 * pfk_kc_load_key_start() - retrieve the key from cache or add it if
469 * it's not there and return the ICE hw key index in @key_index.
470 * @key: pointer to the key
471 * @key_size: the size of the key
472 * @salt: pointer to the salt
473 * @salt_size: the size of the salt
474 * @key_index: the pointer to key_index where the output will be stored
475 * @async: whether scm calls are allowed in the caller context
476 *
477 * If key is present in cache, than the key_index will be retrieved from cache.
478 * If it is not present, the oldest entry from kc table will be evicted,
479 * the key will be loaded to ICE via QSEE to the index that is the evicted
480 * entry number and stored in cache.
481 * Entry that is going to be used is marked as being used, it will mark
482 * as not being used when ICE finishes using it and pfk_kc_load_key_end
483 * will be invoked.
484 * As QSEE calls can only be done from a non-atomic context, when @async flag
485 * is set to 'false', it specifies that it is ok to make the calls in the
486 * current context. Otherwise, when @async is set, the caller should retry the
487 * call again from a different context, and -EAGAIN error will be returned.
488 *
489 * Return 0 in case of success, error otherwise
490 */
491int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
492 const unsigned char *salt, size_t salt_size, u32 *key_index,
Neeraj Sonie3af8d52018-09-05 14:32:49 +0530493 bool async, unsigned int data_unit, int ice_rev)
Neeraj Sonic692cb92018-04-18 17:20:22 +0530494{
495 int ret = 0;
496 struct kc_entry *entry = NULL;
497 bool entry_exists = false;
498
499 if (!kc_is_ready())
500 return -ENODEV;
501
502 if (!key || !salt || !key_index) {
503 pr_err("%s key/salt/key_index NULL\n", __func__);
504 return -EINVAL;
505 }
506
507 if (key_size != PFK_KC_KEY_SIZE) {
508 pr_err("unsupported key size %zu\n", key_size);
509 return -EINVAL;
510 }
511
512 if (salt_size != PFK_KC_SALT_SIZE) {
513 pr_err("unsupported salt size %zu\n", salt_size);
514 return -EINVAL;
515 }
516
517 kc_spin_lock();
518
519 entry = kc_find_key(key, key_size, salt, salt_size);
520 if (!entry) {
521 if (async) {
522 pr_debug("%s task will populate entry\n", __func__);
523 kc_spin_unlock();
524 return -EAGAIN;
525 }
526
527 entry = kc_find_oldest_entry_non_locked();
528 if (!entry) {
529 /* could not find a single non locked entry,
530 * return EBUSY to upper layers so that the
531 * request will be rescheduled
532 */
533 kc_spin_unlock();
534 return -EBUSY;
535 }
536 } else {
537 entry_exists = true;
538 }
539
540 pr_debug("entry with index %d is in state %d\n",
541 entry->key_index, entry->state);
542
543 switch (entry->state) {
544 case (INACTIVE):
545 if (entry_exists) {
546 kc_update_timestamp(entry);
547 entry->state = ACTIVE_ICE_LOADED;
548
549 if (!strcmp(s_type, (char *)PFK_UFS)) {
550 if (async)
551 entry->loaded_ref_cnt++;
552 } else {
553 entry->loaded_ref_cnt++;
554 }
555 break;
556 }
557 case (FREE):
Neeraj Soniefb33112018-08-17 20:39:35 +0530558 ret = kc_update_entry(entry, key, key_size, salt, salt_size,
Neeraj Sonie3af8d52018-09-05 14:32:49 +0530559 data_unit, ice_rev);
Neeraj Sonic692cb92018-04-18 17:20:22 +0530560 if (ret) {
561 entry->state = SCM_ERROR;
562 entry->scm_error = ret;
563 pr_err("%s: key load error (%d)\n", __func__, ret);
564 } else {
565 kc_update_timestamp(entry);
566 entry->state = ACTIVE_ICE_LOADED;
567
568 /*
569 * In case of UFS only increase ref cnt for async calls,
570 * sync calls from within work thread do not pass
571 * requests further to HW
572 */
573 if (!strcmp(s_type, (char *)PFK_UFS)) {
574 if (async)
575 entry->loaded_ref_cnt++;
576 } else {
577 entry->loaded_ref_cnt++;
578 }
579 }
580 break;
581 case (ACTIVE_ICE_PRELOAD):
582 case (INACTIVE_INVALIDATING):
583 ret = -EAGAIN;
584 break;
585 case (ACTIVE_ICE_LOADED):
586 kc_update_timestamp(entry);
587
588 if (!strcmp(s_type, (char *)PFK_UFS)) {
589 if (async)
590 entry->loaded_ref_cnt++;
591 } else {
592 entry->loaded_ref_cnt++;
593 }
594 break;
595 case(SCM_ERROR):
596 ret = entry->scm_error;
597 kc_clear_entry(entry);
598 entry->state = FREE;
599 break;
600 default:
601 pr_err("invalid state %d for entry with key index %d\n",
602 entry->state, entry->key_index);
603 ret = -EINVAL;
604 }
605
606 *key_index = entry->key_index;
607 kc_spin_unlock();
608
609 return ret;
610}
611
612/**
613 * pfk_kc_load_key_end() - finish the process of key loading that was started
614 * by pfk_kc_load_key_start
615 * by marking the entry as not
616 * being in use
617 * @key: pointer to the key
618 * @key_size: the size of the key
619 * @salt: pointer to the salt
620 * @salt_size: the size of the salt
621 *
622 */
623void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
624 const unsigned char *salt, size_t salt_size)
625{
626 struct kc_entry *entry = NULL;
627 struct task_struct *tmp_pending = NULL;
628 int ref_cnt = 0;
629
630 if (!kc_is_ready())
631 return;
632
633 if (!key || !salt)
634 return;
635
636 if (key_size != PFK_KC_KEY_SIZE)
637 return;
638
639 if (salt_size != PFK_KC_SALT_SIZE)
640 return;
641
642 kc_spin_lock();
643
644 entry = kc_find_key(key, key_size, salt, salt_size);
645 if (!entry) {
646 kc_spin_unlock();
647 pr_err("internal error, there should an entry to unlock\n");
648
649 return;
650 }
651 ref_cnt = --entry->loaded_ref_cnt;
652
653 if (ref_cnt < 0)
654 pr_err("internal error, ref count should never be negative\n");
655
656 if (!ref_cnt) {
657 entry->state = INACTIVE;
658 /*
659 * wake-up invalidation if it's waiting
660 * for the entry to be released
661 */
662 if (entry->thread_pending) {
663 tmp_pending = entry->thread_pending;
664 entry->thread_pending = NULL;
665
666 kc_spin_unlock();
667 wake_up_process(tmp_pending);
668 return;
669 }
670 }
671
672 kc_spin_unlock();
673}
674
675/**
676 * pfk_kc_remove_key() - remove the key from cache and from ICE engine
677 * @key: pointer to the key
678 * @key_size: the size of the key
679 * @salt: pointer to the key
680 * @salt_size: the size of the key
681 *
682 * Return 0 in case of success, error otherwise (also in case of non
683 * (existing key)
684 */
685int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
686 const unsigned char *salt, size_t salt_size)
687{
688 struct kc_entry *entry = NULL;
689 int res = 0;
690
691 if (!kc_is_ready())
692 return -ENODEV;
693
694 if (!key)
695 return -EINVAL;
696
697 if (!salt)
698 return -EINVAL;
699
700 if (key_size != PFK_KC_KEY_SIZE)
701 return -EINVAL;
702
703 if (salt_size != PFK_KC_SALT_SIZE)
704 return -EINVAL;
705
706 kc_spin_lock();
707
708 entry = kc_find_key(key, key_size, salt, salt_size);
709 if (!entry) {
710 pr_debug("%s: key does not exist\n", __func__);
711 kc_spin_unlock();
712 return -EINVAL;
713 }
714
715 res = kc_entry_start_invalidating(entry);
716 if (res != 0) {
717 kc_spin_unlock();
718 return res;
719 }
720 kc_clear_entry(entry);
721
722 kc_spin_unlock();
723
724 qti_pfk_ice_invalidate_key(entry->key_index, s_type);
725
726 kc_spin_lock();
727 kc_entry_finish_invalidating(entry);
728 kc_spin_unlock();
729
730 return 0;
731}
732
733/**
734 * pfk_kc_remove_key() - remove the key from cache and from ICE engine
735 * when no salt is available. Will only search key part, if there are several,
736 * all will be removed
737 *
738 * @key: pointer to the key
739 * @key_size: the size of the key
740 *
741 * Return 0 in case of success, error otherwise (also for non-existing key)
742 */
743int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
744{
745 struct kc_entry *entry = NULL;
746 int index = 0;
747 int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
748 int temp_indexes_size = 0;
749 int i = 0;
750 int res = 0;
751
752 if (!kc_is_ready())
753 return -ENODEV;
754
755 if (!key)
756 return -EINVAL;
757
758 if (key_size != PFK_KC_KEY_SIZE)
759 return -EINVAL;
760
761 memset(temp_indexes, -1, sizeof(temp_indexes));
762
763 kc_spin_lock();
764
765 entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
766 if (!entry) {
767 pr_err("%s: key does not exist\n", __func__);
768 kc_spin_unlock();
769 return -EINVAL;
770 }
771
772 res = kc_entry_start_invalidating(entry);
773 if (res != 0) {
774 kc_spin_unlock();
775 return res;
776 }
777
778 temp_indexes[temp_indexes_size++] = index;
779 kc_clear_entry(entry);
780
781 /* let's clean additional entries with the same key if there are any */
782 do {
783 index++;
784 entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
785 if (!entry)
786 break;
787
788 res = kc_entry_start_invalidating(entry);
789 if (res != 0) {
790 kc_spin_unlock();
791 goto out;
792 }
793
794 temp_indexes[temp_indexes_size++] = index;
795
796 kc_clear_entry(entry);
797
798
799 } while (true);
800
801 kc_spin_unlock();
802
803 temp_indexes_size--;
804 for (i = temp_indexes_size; i >= 0 ; i--)
805 qti_pfk_ice_invalidate_key(
806 kc_entry_at_index(temp_indexes[i])->key_index,
807 s_type);
808
809 /* fall through */
810 res = 0;
811
812out:
813 kc_spin_lock();
814 for (i = temp_indexes_size; i >= 0 ; i--)
815 kc_entry_finish_invalidating(
816 kc_entry_at_index(temp_indexes[i]));
817 kc_spin_unlock();
818
819 return res;
820}
821
822/**
823 * pfk_kc_clear() - clear the table and remove all keys from ICE
824 *
825 * Return 0 on success, error otherwise
826 *
827 */
828int pfk_kc_clear(void)
829{
830 struct kc_entry *entry = NULL;
831 int i = 0;
832 int res = 0;
833
834 if (!kc_is_ready())
835 return -ENODEV;
836
837 kc_spin_lock();
838 for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
839 entry = kc_entry_at_index(i);
840 res = kc_entry_start_invalidating(entry);
841 if (res != 0) {
842 kc_spin_unlock();
843 goto out;
844 }
845 kc_clear_entry(entry);
846 }
847 kc_spin_unlock();
848
849 for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
850 qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
851 s_type);
852
853 /* fall through */
854 res = 0;
855out:
856 kc_spin_lock();
857 for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
858 kc_entry_finish_invalidating(kc_entry_at_index(i));
859 kc_spin_unlock();
860
861 return res;
862}
863
864/**
865 * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE
866 * The assumption is that at this point we don't have any pending transactions
867 * Also, there is no need to clear keys from ICE
868 *
869 * Return 0 on success, error otherwise
870 *
871 */
872void pfk_kc_clear_on_reset(void)
873{
874 struct kc_entry *entry = NULL;
875 int i = 0;
876
877 if (!kc_is_ready())
878 return;
879
880 kc_spin_lock();
881 for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
882 entry = kc_entry_at_index(i);
883 kc_clear_entry(entry);
884 }
885 kc_spin_unlock();
886}
887
888static int pfk_kc_find_storage_type(char **device)
889{
890 char boot[20] = {'\0'};
891 char *match = (char *)strnstr(saved_command_line,
892 "androidboot.bootdevice=",
893 strlen(saved_command_line));
894 if (match) {
895 memcpy(boot, (match + strlen("androidboot.bootdevice=")),
896 sizeof(boot) - 1);
897 if (strnstr(boot, PFK_UFS, strlen(boot)))
898 *device = PFK_UFS;
899
900 return 0;
901 }
902 return -EINVAL;
903}
904
905static int __init pfk_kc_pre_init(void)
906{
907 return pfk_kc_find_storage_type(&s_type);
908}
909
910static void __exit pfk_kc_exit(void)
911{
912 s_type = NULL;
913}
914
915module_init(pfk_kc_pre_init);
916module_exit(pfk_kc_exit);
917
918MODULE_LICENSE("GPL v2");
919MODULE_DESCRIPTION("Per-File-Key-KC driver");