blob: 64c168da4f5f03bc0a45d916b2c281abf199b1cd [file] [log] [blame]
Zhen Kongee7bdc62019-03-14 10:55:19 -07001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
4 */
5
6/*
7 * PFK Key Cache
8 *
9 * Key Cache used internally in PFK.
10 * The purpose of the cache is to save access time to QSEE when loading keys.
11 * Currently the cache is the same size as the total number of keys that can
12 * be loaded to ICE. Since this number is relatively small, the algorithms for
13 * cache eviction are simple, linear and based on last usage timestamp, i.e
14 * the node that will be evicted is the one with the oldest timestamp.
15 * Empty entries always have the oldest timestamp.
16 */
17
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/spinlock.h>
21#include <crypto/ice.h>
22#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/jiffies.h>
25#include <linux/slab.h>
26#include <linux/printk.h>
27#include <linux/sched/signal.h>
28
29#include "pfk_kc.h"
30#include "pfk_ice.h"
31
32
33/** the first available index in ice engine */
34#define PFK_KC_STARTING_INDEX 2
35
36/** currently the only supported key and salt sizes */
37#define PFK_KC_KEY_SIZE 32
38#define PFK_KC_SALT_SIZE 32
39
40/** Table size */
41#define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX))
42
43/** The maximum key and salt size */
44#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
45#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
46#define PFK_UFS "ufs"
47
48static DEFINE_SPINLOCK(kc_lock);
49static unsigned long flags;
50static bool kc_ready;
51static char *s_type = "sdcc";
52
53/**
54 * enum pfk_kc_entry_state - state of the entry inside kc table
55 *
56 * @FREE: entry is free
57 * @ACTIVE_ICE_PRELOAD: entry is actively used by ICE engine
58 and cannot be used by others. SCM call
59 to load key to ICE is pending to be performed
60 * @ACTIVE_ICE_LOADED: entry is actively used by ICE engine and
61 cannot be used by others. SCM call to load the
62 key to ICE was successfully executed and key is
63 now loaded
64 * @INACTIVE_INVALIDATING: entry is being invalidated during file close
65 and cannot be used by others until invalidation
66 is complete
67 * @INACTIVE: entry's key is already loaded, but is not
68 currently being used. It can be re-used for
69 optimization and to avoid SCM call cost or
70 it can be taken by another key if there are
71 no FREE entries
72 * @SCM_ERROR: error occurred while scm call was performed to
73 load the key to ICE
74 */
75enum pfk_kc_entry_state {
76 FREE,
77 ACTIVE_ICE_PRELOAD,
78 ACTIVE_ICE_LOADED,
79 INACTIVE_INVALIDATING,
80 INACTIVE,
81 SCM_ERROR
82};
83
84struct kc_entry {
85 unsigned char key[PFK_MAX_KEY_SIZE];
86 size_t key_size;
87
88 unsigned char salt[PFK_MAX_SALT_SIZE];
89 size_t salt_size;
90
91 u64 time_stamp;
92 u32 key_index;
93
94 struct task_struct *thread_pending;
95
96 enum pfk_kc_entry_state state;
97
98 /* ref count for the number of requests in the HW queue for this key */
99 int loaded_ref_cnt;
100 int scm_error;
101};
102
103static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
104
105/**
106 * kc_is_ready() - driver is initialized and ready.
107 *
108 * Return: true if the key cache is ready.
109 */
110static inline bool kc_is_ready(void)
111{
112 return kc_ready;
113}
114
115static inline void kc_spin_lock(void)
116{
117 spin_lock_irqsave(&kc_lock, flags);
118}
119
120static inline void kc_spin_unlock(void)
121{
122 spin_unlock_irqrestore(&kc_lock, flags);
123}
124
125/**
126 * pfk_kc_get_storage_type() - return the hardware storage type.
127 *
128 * Return: storage type queried during bootup.
129 */
130const char *pfk_kc_get_storage_type(void)
131{
132 return s_type;
133}
134
135/**
136 * kc_entry_is_available() - checks whether the entry is available
137 *
138 * Return true if it is , false otherwise or if invalid
139 * Should be invoked under spinlock
140 */
141static bool kc_entry_is_available(const struct kc_entry *entry)
142{
143 if (!entry)
144 return false;
145
146 return (entry->state == FREE || entry->state == INACTIVE);
147}
148
149/**
150 * kc_entry_wait_till_available() - waits till entry is available
151 *
152 * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
153 * by signal
154 *
155 * Should be invoked under spinlock
156 */
157static int kc_entry_wait_till_available(struct kc_entry *entry)
158{
159 int res = 0;
160
161 while (!kc_entry_is_available(entry)) {
162 set_current_state(TASK_INTERRUPTIBLE);
163 if (signal_pending(current)) {
164 res = -ERESTARTSYS;
165 break;
166 }
167 /* assuming only one thread can try to invalidate
168 * the same entry
169 */
170 entry->thread_pending = current;
171 kc_spin_unlock();
172 schedule();
173 kc_spin_lock();
174 }
175 set_current_state(TASK_RUNNING);
176
177 return res;
178}
179
180/**
181 * kc_entry_start_invalidating() - moves entry to state
182 * INACTIVE_INVALIDATING
183 * If entry is in use, waits till
184 * it gets available
185 * @entry: pointer to entry
186 *
187 * Return 0 in case of success, otherwise error
188 * Should be invoked under spinlock
189 */
190static int kc_entry_start_invalidating(struct kc_entry *entry)
191{
192 int res;
193
194 res = kc_entry_wait_till_available(entry);
195 if (res)
196 return res;
197
198 entry->state = INACTIVE_INVALIDATING;
199
200 return 0;
201}
202
203/**
204 * kc_entry_finish_invalidating() - moves entry to state FREE
205 * wakes up all the tasks waiting
206 * on it
207 *
208 * @entry: pointer to entry
209 *
210 * Return 0 in case of success, otherwise error
211 * Should be invoked under spinlock
212 */
213static void kc_entry_finish_invalidating(struct kc_entry *entry)
214{
215 if (!entry)
216 return;
217
218 if (entry->state != INACTIVE_INVALIDATING)
219 return;
220
221 entry->state = FREE;
222}
223
224/**
225 * kc_min_entry() - compare two entries to find one with minimal time
226 * @a: ptr to the first entry. If NULL the other entry will be returned
227 * @b: pointer to the second entry
228 *
229 * Return the entry which timestamp is the minimal, or b if a is NULL
230 */
231static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
232 struct kc_entry *b)
233{
234 if (!a)
235 return b;
236
237 if (time_before64(b->time_stamp, a->time_stamp))
238 return b;
239
240 return a;
241}
242
243/**
244 * kc_entry_at_index() - return entry at specific index
245 * @index: index of entry to be accessed
246 *
247 * Return entry
248 * Should be invoked under spinlock
249 */
250static struct kc_entry *kc_entry_at_index(int index)
251{
252 return &(kc_table[index]);
253}
254
255/**
256 * kc_find_key_at_index() - find kc entry starting at specific index
257 * @key: key to look for
258 * @key_size: the key size
259 * @salt: salt to look for
260 * @salt_size: the salt size
261 * @sarting_index: index to start search with, if entry found, updated with
262 * index of that entry
263 *
264 * Return entry or NULL in case of error
265 * Should be invoked under spinlock
266 */
267static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
268 size_t key_size, const unsigned char *salt, size_t salt_size,
269 int *starting_index)
270{
271 struct kc_entry *entry = NULL;
272 int i = 0;
273
274 for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
275 entry = kc_entry_at_index(i);
276
277 if (salt != NULL) {
278 if (entry->salt_size != salt_size)
279 continue;
280
281 if (memcmp(entry->salt, salt, salt_size) != 0)
282 continue;
283 }
284
285 if (entry->key_size != key_size)
286 continue;
287
288 if (memcmp(entry->key, key, key_size) == 0) {
289 *starting_index = i;
290 return entry;
291 }
292 }
293
294 return NULL;
295}
296
297/**
298 * kc_find_key() - find kc entry
299 * @key: key to look for
300 * @key_size: the key size
301 * @salt: salt to look for
302 * @salt_size: the salt size
303 *
304 * Return entry or NULL in case of error
305 * Should be invoked under spinlock
306 */
307static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
308 const unsigned char *salt, size_t salt_size)
309{
310 int index = 0;
311
312 return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
313}
314
315/**
316 * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
317 * that is not locked
318 *
319 * Returns entry with minimal timestamp. Empty entries have timestamp
320 * of 0, therefore they are returned first.
321 * If all the entries are locked, will return NULL
322 * Should be invoked under spin lock
323 */
324static struct kc_entry *kc_find_oldest_entry_non_locked(void)
325{
326 struct kc_entry *curr_min_entry = NULL;
327 struct kc_entry *entry = NULL;
328 int i = 0;
329
330 for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
331 entry = kc_entry_at_index(i);
332
333 if (entry->state == FREE)
334 return entry;
335
336 if (entry->state == INACTIVE)
337 curr_min_entry = kc_min_entry(curr_min_entry, entry);
338 }
339
340 return curr_min_entry;
341}
342
343/**
344 * kc_update_timestamp() - updates timestamp of entry to current
345 *
346 * @entry: entry to update
347 *
348 */
349static void kc_update_timestamp(struct kc_entry *entry)
350{
351 if (!entry)
352 return;
353
354 entry->time_stamp = get_jiffies_64();
355}
356
357/**
358 * kc_clear_entry() - clear the key from entry and mark entry not in use
359 *
360 * @entry: pointer to entry
361 *
362 * Should be invoked under spinlock
363 */
364static void kc_clear_entry(struct kc_entry *entry)
365{
366 if (!entry)
367 return;
368
369 memset(entry->key, 0, entry->key_size);
370 memset(entry->salt, 0, entry->salt_size);
371
372 entry->key_size = 0;
373 entry->salt_size = 0;
374
375 entry->time_stamp = 0;
376 entry->scm_error = 0;
377
378 entry->state = FREE;
379
380 entry->loaded_ref_cnt = 0;
381 entry->thread_pending = NULL;
382}
383
384/**
385 * kc_update_entry() - replaces the key in given entry and
386 * loads the new key to ICE
387 *
388 * @entry: entry to replace key in
389 * @key: key
390 * @key_size: key_size
391 * @salt: salt
392 * @salt_size: salt_size
393 * @data_unit: dun size
394 *
395 * The previous key is securely released and wiped, the new one is loaded
396 * to ICE.
397 * Should be invoked under spinlock
398 * Caller to validate that key/salt_size matches the size in struct kc_entry
399 */
400static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
401 size_t key_size, const unsigned char *salt, size_t salt_size,
402 unsigned int data_unit)
403{
404 int ret;
405
406 kc_clear_entry(entry);
407
408 memcpy(entry->key, key, key_size);
409 entry->key_size = key_size;
410
411 memcpy(entry->salt, salt, salt_size);
412 entry->salt_size = salt_size;
413
414 /* Mark entry as no longer free before releasing the lock */
415 entry->state = ACTIVE_ICE_PRELOAD;
416 kc_spin_unlock();
417
418 ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
419 entry->salt, s_type, data_unit);
420
421 kc_spin_lock();
422 return ret;
423}
424
425/**
426 * pfk_kc_init() - init function
427 *
428 * Return 0 in case of success, error otherwise
429 */
430int pfk_kc_init(void)
431{
432 int i = 0;
433 struct kc_entry *entry = NULL;
434
435 kc_spin_lock();
436 for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
437 entry = kc_entry_at_index(i);
438 entry->key_index = PFK_KC_STARTING_INDEX + i;
439 }
440 kc_ready = true;
441 kc_spin_unlock();
442
443 return 0;
444}
445
446/**
447 * pfk_kc_denit() - deinit function
448 *
449 * Return 0 in case of success, error otherwise
450 */
451int pfk_kc_deinit(void)
452{
453 int res = pfk_kc_clear();
454
455 kc_ready = false;
456
457 return res;
458}
459
460/**
461 * pfk_kc_load_key_start() - retrieve the key from cache or add it if
462 * it's not there and return the ICE hw key index in @key_index.
463 * @key: pointer to the key
464 * @key_size: the size of the key
465 * @salt: pointer to the salt
466 * @salt_size: the size of the salt
467 * @key_index: the pointer to key_index where the output will be stored
468 * @async: whether scm calls are allowed in the caller context
469 *
470 * If key is present in cache, than the key_index will be retrieved from cache.
471 * If it is not present, the oldest entry from kc table will be evicted,
472 * the key will be loaded to ICE via QSEE to the index that is the evicted
473 * entry number and stored in cache.
474 * Entry that is going to be used is marked as being used, it will mark
475 * as not being used when ICE finishes using it and pfk_kc_load_key_end
476 * will be invoked.
477 * As QSEE calls can only be done from a non-atomic context, when @async flag
478 * is set to 'false', it specifies that it is ok to make the calls in the
479 * current context. Otherwise, when @async is set, the caller should retry the
480 * call again from a different context, and -EAGAIN error will be returned.
481 *
482 * Return 0 in case of success, error otherwise
483 */
484int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
485 const unsigned char *salt, size_t salt_size, u32 *key_index,
486 bool async, unsigned int data_unit)
487{
488 int ret = 0;
489 struct kc_entry *entry = NULL;
490 bool entry_exists = false;
491
492 if (!kc_is_ready())
493 return -ENODEV;
494
495 if (!key || !salt || !key_index) {
496 pr_err("%s key/salt/key_index NULL\n", __func__);
497 return -EINVAL;
498 }
499
500 if (key_size != PFK_KC_KEY_SIZE) {
501 pr_err("unsupported key size %zu\n", key_size);
502 return -EINVAL;
503 }
504
505 if (salt_size != PFK_KC_SALT_SIZE) {
506 pr_err("unsupported salt size %zu\n", salt_size);
507 return -EINVAL;
508 }
509
510 kc_spin_lock();
511
512 entry = kc_find_key(key, key_size, salt, salt_size);
513 if (!entry) {
514 if (async) {
515 pr_debug("%s task will populate entry\n", __func__);
516 kc_spin_unlock();
517 return -EAGAIN;
518 }
519
520 entry = kc_find_oldest_entry_non_locked();
521 if (!entry) {
522 /* could not find a single non locked entry,
523 * return EBUSY to upper layers so that the
524 * request will be rescheduled
525 */
526 kc_spin_unlock();
527 return -EBUSY;
528 }
529 } else {
530 entry_exists = true;
531 }
532
533 pr_debug("entry with index %d is in state %d\n",
534 entry->key_index, entry->state);
535
536 switch (entry->state) {
537 case (INACTIVE):
538 if (entry_exists) {
539 kc_update_timestamp(entry);
540 entry->state = ACTIVE_ICE_LOADED;
541
542 if (!strcmp(s_type, (char *)PFK_UFS)) {
543 if (async)
544 entry->loaded_ref_cnt++;
545 } else {
546 entry->loaded_ref_cnt++;
547 }
548 break;
549 }
550 case (FREE):
551 ret = kc_update_entry(entry, key, key_size, salt, salt_size,
552 data_unit);
553 if (ret) {
554 entry->state = SCM_ERROR;
555 entry->scm_error = ret;
556 pr_err("%s: key load error (%d)\n", __func__, ret);
557 } else {
558 kc_update_timestamp(entry);
559 entry->state = ACTIVE_ICE_LOADED;
560
561 /*
562 * In case of UFS only increase ref cnt for async calls,
563 * sync calls from within work thread do not pass
564 * requests further to HW
565 */
566 if (!strcmp(s_type, (char *)PFK_UFS)) {
567 if (async)
568 entry->loaded_ref_cnt++;
569 } else {
570 entry->loaded_ref_cnt++;
571 }
572 }
573 break;
574 case (ACTIVE_ICE_PRELOAD):
575 case (INACTIVE_INVALIDATING):
576 ret = -EAGAIN;
577 break;
578 case (ACTIVE_ICE_LOADED):
579 kc_update_timestamp(entry);
580
581 if (!strcmp(s_type, (char *)PFK_UFS)) {
582 if (async)
583 entry->loaded_ref_cnt++;
584 } else {
585 entry->loaded_ref_cnt++;
586 }
587 break;
588 case(SCM_ERROR):
589 ret = entry->scm_error;
590 kc_clear_entry(entry);
591 entry->state = FREE;
592 break;
593 default:
594 pr_err("invalid state %d for entry with key index %d\n",
595 entry->state, entry->key_index);
596 ret = -EINVAL;
597 }
598
599 *key_index = entry->key_index;
600 kc_spin_unlock();
601
602 return ret;
603}
604
605/**
606 * pfk_kc_load_key_end() - finish the process of key loading that was started
607 * by pfk_kc_load_key_start
608 * by marking the entry as not
609 * being in use
610 * @key: pointer to the key
611 * @key_size: the size of the key
612 * @salt: pointer to the salt
613 * @salt_size: the size of the salt
614 *
615 */
616void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
617 const unsigned char *salt, size_t salt_size)
618{
619 struct kc_entry *entry = NULL;
620 struct task_struct *tmp_pending = NULL;
621 int ref_cnt = 0;
622
623 if (!kc_is_ready())
624 return;
625
626 if (!key || !salt)
627 return;
628
629 if (key_size != PFK_KC_KEY_SIZE)
630 return;
631
632 if (salt_size != PFK_KC_SALT_SIZE)
633 return;
634
635 kc_spin_lock();
636
637 entry = kc_find_key(key, key_size, salt, salt_size);
638 if (!entry) {
639 kc_spin_unlock();
640 pr_err("internal error, there should an entry to unlock\n");
641
642 return;
643 }
644 ref_cnt = --entry->loaded_ref_cnt;
645
646 if (ref_cnt < 0)
647 pr_err("internal error, ref count should never be negative\n");
648
649 if (!ref_cnt) {
650 entry->state = INACTIVE;
651 /*
652 * wake-up invalidation if it's waiting
653 * for the entry to be released
654 */
655 if (entry->thread_pending) {
656 tmp_pending = entry->thread_pending;
657 entry->thread_pending = NULL;
658
659 kc_spin_unlock();
660 wake_up_process(tmp_pending);
661 return;
662 }
663 }
664
665 kc_spin_unlock();
666}
667
668/**
669 * pfk_kc_remove_key() - remove the key from cache and from ICE engine
670 * @key: pointer to the key
671 * @key_size: the size of the key
672 * @salt: pointer to the key
673 * @salt_size: the size of the key
674 *
675 * Return 0 in case of success, error otherwise (also in case of non
676 * (existing key)
677 */
678int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
679 const unsigned char *salt, size_t salt_size)
680{
681 struct kc_entry *entry = NULL;
682 int res = 0;
683
684 if (!kc_is_ready())
685 return -ENODEV;
686
687 if (!key)
688 return -EINVAL;
689
690 if (!salt)
691 return -EINVAL;
692
693 if (key_size != PFK_KC_KEY_SIZE)
694 return -EINVAL;
695
696 if (salt_size != PFK_KC_SALT_SIZE)
697 return -EINVAL;
698
699 kc_spin_lock();
700
701 entry = kc_find_key(key, key_size, salt, salt_size);
702 if (!entry) {
703 pr_debug("%s: key does not exist\n", __func__);
704 kc_spin_unlock();
705 return -EINVAL;
706 }
707
708 res = kc_entry_start_invalidating(entry);
709 if (res != 0) {
710 kc_spin_unlock();
711 return res;
712 }
713 kc_clear_entry(entry);
714
715 kc_spin_unlock();
716
717 qti_pfk_ice_invalidate_key(entry->key_index, s_type);
718
719 kc_spin_lock();
720 kc_entry_finish_invalidating(entry);
721 kc_spin_unlock();
722
723 return 0;
724}
725
726/**
727 * pfk_kc_remove_key() - remove the key from cache and from ICE engine
728 * when no salt is available. Will only search key part, if there are several,
729 * all will be removed
730 *
731 * @key: pointer to the key
732 * @key_size: the size of the key
733 *
734 * Return 0 in case of success, error otherwise (also for non-existing key)
735 */
736int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
737{
738 struct kc_entry *entry = NULL;
739 int index = 0;
740 int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
741 int temp_indexes_size = 0;
742 int i = 0;
743 int res = 0;
744
745 if (!kc_is_ready())
746 return -ENODEV;
747
748 if (!key)
749 return -EINVAL;
750
751 if (key_size != PFK_KC_KEY_SIZE)
752 return -EINVAL;
753
754 memset(temp_indexes, -1, sizeof(temp_indexes));
755
756 kc_spin_lock();
757
758 entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
759 if (!entry) {
760 pr_err("%s: key does not exist\n", __func__);
761 kc_spin_unlock();
762 return -EINVAL;
763 }
764
765 res = kc_entry_start_invalidating(entry);
766 if (res != 0) {
767 kc_spin_unlock();
768 return res;
769 }
770
771 temp_indexes[temp_indexes_size++] = index;
772 kc_clear_entry(entry);
773
774 /* let's clean additional entries with the same key if there are any */
775 do {
776 index++;
777 entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
778 if (!entry)
779 break;
780
781 res = kc_entry_start_invalidating(entry);
782 if (res != 0) {
783 kc_spin_unlock();
784 goto out;
785 }
786
787 temp_indexes[temp_indexes_size++] = index;
788
789 kc_clear_entry(entry);
790
791
792 } while (true);
793
794 kc_spin_unlock();
795
796 temp_indexes_size--;
797 for (i = temp_indexes_size; i >= 0 ; i--)
798 qti_pfk_ice_invalidate_key(
799 kc_entry_at_index(temp_indexes[i])->key_index,
800 s_type);
801
802 /* fall through */
803 res = 0;
804
805out:
806 kc_spin_lock();
807 for (i = temp_indexes_size; i >= 0 ; i--)
808 kc_entry_finish_invalidating(
809 kc_entry_at_index(temp_indexes[i]));
810 kc_spin_unlock();
811
812 return res;
813}
814
815/**
816 * pfk_kc_clear() - clear the table and remove all keys from ICE
817 *
818 * Return 0 on success, error otherwise
819 *
820 */
821int pfk_kc_clear(void)
822{
823 struct kc_entry *entry = NULL;
824 int i = 0;
825 int res = 0;
826
827 if (!kc_is_ready())
828 return -ENODEV;
829
830 kc_spin_lock();
831 for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
832 entry = kc_entry_at_index(i);
833 res = kc_entry_start_invalidating(entry);
834 if (res != 0) {
835 kc_spin_unlock();
836 goto out;
837 }
838 kc_clear_entry(entry);
839 }
840 kc_spin_unlock();
841
842 for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
843 qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
844 s_type);
845
846 /* fall through */
847 res = 0;
848out:
849 kc_spin_lock();
850 for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
851 kc_entry_finish_invalidating(kc_entry_at_index(i));
852 kc_spin_unlock();
853
854 return res;
855}
856
857/**
858 * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE
859 * The assumption is that at this point we don't have any pending transactions
860 * Also, there is no need to clear keys from ICE
861 *
862 * Return 0 on success, error otherwise
863 *
864 */
865void pfk_kc_clear_on_reset(void)
866{
867 struct kc_entry *entry = NULL;
868 int i = 0;
869
870 if (!kc_is_ready())
871 return;
872
873 kc_spin_lock();
874 for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
875 entry = kc_entry_at_index(i);
876 kc_clear_entry(entry);
877 }
878 kc_spin_unlock();
879}
880
881static int pfk_kc_find_storage_type(char **device)
882{
883 char boot[20] = {'\0'};
884 char *match = (char *)strnstr(saved_command_line,
885 "androidboot.bootdevice=",
886 strlen(saved_command_line));
887 if (match) {
888 memcpy(boot, (match + strlen("androidboot.bootdevice=")),
889 sizeof(boot) - 1);
890 if (strnstr(boot, PFK_UFS, strlen(boot)))
891 *device = PFK_UFS;
892
893 return 0;
894 }
895 return -EINVAL;
896}
897
898static int __init pfk_kc_pre_init(void)
899{
900 return pfk_kc_find_storage_type(&s_type);
901}
902
903static void __exit pfk_kc_exit(void)
904{
905 s_type = NULL;
906}
907
908module_init(pfk_kc_pre_init);
909module_exit(pfk_kc_exit);
910
911MODULE_LICENSE("GPL v2");
912MODULE_DESCRIPTION("Per-File-Key-KC driver");