blob: ba6825e9e03a41c01923c474bf5744394d8bd246 [file] [log] [blame]
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/delay.h>
18#include <linux/of.h>
19#include <linux/device-mapper.h>
20#include <linux/clk.h>
21#include <linux/cdev.h>
22#include <linux/regulator/consumer.h>
23#include <linux/msm-bus.h>
24#include <linux/pfk.h>
25#include <crypto/ice.h>
26#include <soc/qcom/scm.h>
27#include <soc/qcom/qseecomi.h>
28#include "iceregs.h"
29
30#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
31 ((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
32
33#define TZ_OWNER_QSEE_OS 50
34#define TZ_SVC_KEYSTORE 5 /* Keystore management */
35
36#define TZ_OS_KS_RESTORE_KEY_ID \
37 TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x06)
38
39#define TZ_SYSCALL_CREATE_PARAM_ID_0 0
40
41#define TZ_OS_KS_RESTORE_KEY_ID_PARAM_ID \
42 TZ_SYSCALL_CREATE_PARAM_ID_0
43
44#define TZ_OS_KS_RESTORE_KEY_CONFIG_ID \
45 TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x06)
46
47#define TZ_OS_KS_RESTORE_KEY_CONFIG_ID_PARAM_ID \
48 TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
49
50
51#define ICE_REV(x, y) (((x) & ICE_CORE_##y##_REV_MASK) >> ICE_CORE_##y##_REV)
52#define QCOM_UFS_ICE_DEV "iceufs"
53#define QCOM_SDCC_ICE_DEV "icesdcc"
54#define QCOM_ICE_TYPE_NAME_LEN 8
55#define QCOM_ICE_MAX_BIST_CHECK_COUNT 100
56#define QCOM_ICE_UFS 10
57#define QCOM_ICE_SDCC 20
58
59struct ice_clk_info {
60 struct list_head list;
61 struct clk *clk;
62 const char *name;
63 u32 max_freq;
64 u32 min_freq;
65 u32 curr_freq;
66 bool enabled;
67};
68
69struct qcom_ice_bus_vote {
70 uint32_t client_handle;
71 uint32_t curr_vote;
72 int min_bw_vote;
73 int max_bw_vote;
74 int saved_vote;
75 bool is_max_bw_needed;
76 struct device_attribute max_bus_bw;
77};
78
79static LIST_HEAD(ice_devices);
80/*
81 * ICE HW device structure.
82 */
83struct ice_device {
84 struct list_head list;
85 struct device *pdev;
86 struct cdev cdev;
87 dev_t device_no;
88 struct class *driver_class;
89 void __iomem *mmio;
90 struct resource *res;
91 int irq;
92 bool is_ice_enabled;
93 bool is_ice_disable_fuse_blown;
94 ice_error_cb error_cb;
95 void *host_controller_data; /* UFS/EMMC/other? */
96 struct list_head clk_list_head;
97 u32 ice_hw_version;
98 bool is_ice_clk_available;
99 char ice_instance_type[QCOM_ICE_TYPE_NAME_LEN];
100 struct regulator *reg;
101 bool is_regulator_available;
102 struct qcom_ice_bus_vote bus_vote;
103 ktime_t ice_reset_start_time;
104 ktime_t ice_reset_complete_time;
105};
106
107static int qti_ice_setting_config(struct request *req,
108 struct platform_device *pdev,
109 struct ice_crypto_setting *crypto_data,
110 struct ice_data_setting *setting)
111{
112 struct ice_device *ice_dev = NULL;
113
114 ice_dev = platform_get_drvdata(pdev);
115
116 if (!ice_dev) {
117 pr_debug("%s no ICE device\n", __func__);
118
119 /* make the caller finish peacfully */
120 return 0;
121 }
122
123 if (ice_dev->is_ice_disable_fuse_blown) {
124 pr_err("%s ICE disabled fuse is blown\n", __func__);
125 return -EPERM;
126 }
127
128 if ((short)(crypto_data->key_index) >= 0) {
129
130 memcpy(&setting->crypto_data, crypto_data,
131 sizeof(setting->crypto_data));
132
133 if (rq_data_dir(req) == WRITE)
134 setting->encr_bypass = false;
135 else if (rq_data_dir(req) == READ)
136 setting->decr_bypass = false;
137 else {
138 /* Should I say BUG_ON */
139 setting->encr_bypass = true;
140 setting->decr_bypass = true;
141 }
142 }
143
144 return 0;
145}
146
147static int qcom_ice_enable_clocks(struct ice_device *, bool);
148
149#ifdef CONFIG_MSM_BUS_SCALING
150
151static int qcom_ice_set_bus_vote(struct ice_device *ice_dev, int vote)
152{
153 int err = 0;
154
155 if (vote != ice_dev->bus_vote.curr_vote) {
156 err = msm_bus_scale_client_update_request(
157 ice_dev->bus_vote.client_handle, vote);
158 if (err) {
159 dev_err(ice_dev->pdev,
160 "%s:failed:client_handle=0x%x, vote=%d, err=%d\n",
161 __func__, ice_dev->bus_vote.client_handle,
162 vote, err);
163 goto out;
164 }
165 ice_dev->bus_vote.curr_vote = vote;
166 }
167out:
168 return err;
169}
170
171static int qcom_ice_get_bus_vote(struct ice_device *ice_dev,
172 const char *speed_mode)
173{
174 struct device *dev = ice_dev->pdev;
175 struct device_node *np = dev->of_node;
176 int err;
177 const char *key = "qcom,bus-vector-names";
178
179 if (!speed_mode) {
180 err = -EINVAL;
181 goto out;
182 }
183
184 if (ice_dev->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
185 err = of_property_match_string(np, key, "MAX");
186 else
187 err = of_property_match_string(np, key, speed_mode);
188out:
189 if (err < 0)
190 dev_err(dev, "%s: Invalid %s mode %d\n",
191 __func__, speed_mode, err);
192 return err;
193}
194
195static int qcom_ice_bus_register(struct ice_device *ice_dev)
196{
197 int err = 0;
198 struct msm_bus_scale_pdata *bus_pdata;
199 struct device *dev = ice_dev->pdev;
200 struct platform_device *pdev = to_platform_device(dev);
201 struct device_node *np = dev->of_node;
202
203 bus_pdata = msm_bus_cl_get_pdata(pdev);
204 if (!bus_pdata) {
205 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
206 err = -ENODATA;
207 goto out;
208 }
209
210 err = of_property_count_strings(np, "qcom,bus-vector-names");
211 if (err < 0 || err != bus_pdata->num_usecases) {
212 dev_err(dev, "%s: Error = %d with qcom,bus-vector-names\n",
213 __func__, err);
214 goto out;
215 }
216 err = 0;
217
218 ice_dev->bus_vote.client_handle =
219 msm_bus_scale_register_client(bus_pdata);
220 if (!ice_dev->bus_vote.client_handle) {
221 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
222 __func__);
223 err = -EFAULT;
224 goto out;
225 }
226
227 /* cache the vote index for minimum and maximum bandwidth */
228 ice_dev->bus_vote.min_bw_vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
229 ice_dev->bus_vote.max_bw_vote = qcom_ice_get_bus_vote(ice_dev, "MAX");
230out:
231 return err;
232}
233
234#else
235
236static int qcom_ice_set_bus_vote(struct ice_device *ice_dev, int vote)
237{
238 return 0;
239}
240
241static int qcom_ice_get_bus_vote(struct ice_device *ice_dev,
242 const char *speed_mode)
243{
244 return 0;
245}
246
247static int qcom_ice_bus_register(struct ice_device *ice_dev)
248{
249 return 0;
250}
251#endif /* CONFIG_MSM_BUS_SCALING */
252
253static int qcom_ice_get_vreg(struct ice_device *ice_dev)
254{
255 int ret = 0;
256
257 if (!ice_dev->is_regulator_available)
258 return 0;
259
260 if (ice_dev->reg)
261 return 0;
262
263 ice_dev->reg = devm_regulator_get(ice_dev->pdev, "vdd-hba");
264 if (IS_ERR(ice_dev->reg)) {
265 ret = PTR_ERR(ice_dev->reg);
266 dev_err(ice_dev->pdev, "%s: %s get failed, err=%d\n",
267 __func__, "vdd-hba-supply", ret);
268 }
269 return ret;
270}
271
272static void qcom_ice_config_proc_ignore(struct ice_device *ice_dev)
273{
274 u32 regval;
275
276 if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2 &&
277 ICE_REV(ice_dev->ice_hw_version, MINOR) == 0 &&
278 ICE_REV(ice_dev->ice_hw_version, STEP) == 0) {
279 regval = qcom_ice_readl(ice_dev,
280 QCOM_ICE_REGS_ADVANCED_CONTROL);
281 regval |= 0x800;
282 qcom_ice_writel(ice_dev, regval,
283 QCOM_ICE_REGS_ADVANCED_CONTROL);
284 /* Ensure register is updated */
285 mb();
286 }
287}
288
289static void qcom_ice_low_power_mode_enable(struct ice_device *ice_dev)
290{
291 u32 regval;
292
293 regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL);
294 /*
295 * Enable low power mode sequence
296 * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0
297 */
298 regval |= 0x7000;
299 qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ADVANCED_CONTROL);
300 /*
301 * Ensure previous instructions was completed before issuing next
302 * ICE initialization/optimization instruction
303 */
304 mb();
305}
306
307static void qcom_ice_enable_test_bus_config(struct ice_device *ice_dev)
308{
309 /*
310 * Configure & enable ICE_TEST_BUS_REG to reflect ICE intr lines
311 * MAIN_TEST_BUS_SELECTOR = 0 (ICE_CONFIG)
312 * TEST_BUS_REG_EN = 1 (ENABLE)
313 */
314 u32 regval;
315
316 if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
317 return;
318
319 regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_CONTROL);
320 regval &= 0x0FFFFFFF;
321 /* TBD: replace 0x2 with define in iceregs.h */
322 regval |= 0x2;
323 qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_TEST_BUS_CONTROL);
324
325 /*
326 * Ensure previous instructions was completed before issuing next
327 * ICE initialization/optimization instruction
328 */
329 mb();
330}
331
332static void qcom_ice_optimization_enable(struct ice_device *ice_dev)
333{
334 u32 regval;
335
336 regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL);
337 if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
338 regval |= 0xD807100;
339 else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
340 regval |= 0x3F007100;
341
342 /* ICE Optimizations Enable Sequence */
343 udelay(5);
344 /* [0]-0, [1]-0, [2]-8, [3]-E, [4]-0, [5]-0, [6]-F, [7]-A */
345 qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ADVANCED_CONTROL);
346 /*
347 * Ensure previous instructions was completed before issuing next
348 * ICE initialization/optimization instruction
349 */
350 mb();
351
352 /* ICE HPG requires sleep before writing */
353 udelay(5);
354 if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1) {
355 regval = 0;
356 regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ENDIAN_SWAP);
357 regval |= 0xF;
358 qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ENDIAN_SWAP);
359 /*
360 * Ensure previous instructions were completed before issue
361 * next ICE commands
362 */
363 mb();
364 }
365}
366
367static int qcom_ice_wait_bist_status(struct ice_device *ice_dev)
368{
369 int count;
370 u32 reg;
371
372 /* Poll until all BIST bits are reset */
373 for (count = 0; count < QCOM_ICE_MAX_BIST_CHECK_COUNT; count++) {
374 reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BIST_STATUS);
375 if (!(reg & ICE_BIST_STATUS_MASK))
376 break;
377 udelay(50);
378 }
379
380 if (reg)
381 return -ETIMEDOUT;
382
383 return 0;
384}
385
386static int qcom_ice_enable(struct ice_device *ice_dev)
387{
388 unsigned int reg;
389 int ret = 0;
390
391 if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
392 ((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
393 (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1)))
394 ret = qcom_ice_wait_bist_status(ice_dev);
395 if (ret) {
396 dev_err(ice_dev->pdev, "BIST status error (%d)\n", ret);
397 return ret;
398 }
399
400 /* Starting ICE v3 enabling is done at storage controller (UFS/SDCC) */
401 if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 3)
402 return 0;
403
404 /*
405 * To enable ICE, perform following
406 * 1. Set IGNORE_CONTROLLER_RESET to USE in ICE_RESET register
407 * 2. Disable GLOBAL_BYPASS bit in ICE_CONTROL register
408 */
409 reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_RESET);
410
411 if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
412 reg &= 0x0;
413 else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
414 reg &= ~0x100;
415
416 qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_RESET);
417
418 /*
419 * Ensure previous instructions was completed before issuing next
420 * ICE initialization/optimization instruction
421 */
422 mb();
423
424 reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_CONTROL);
425
426 if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
427 reg &= 0xFFFE;
428 else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
429 reg &= ~0x7;
430 qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_CONTROL);
431
432 /*
433 * Ensure previous instructions was completed before issuing next
434 * ICE initialization/optimization instruction
435 */
436 mb();
437
438 if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
439 ((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
440 (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
441 reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS);
442 if ((reg & 0x80000000) != 0x0) {
443 pr_err("%s: Bypass failed for ice = %p",
444 __func__, (void *)ice_dev);
445 WARN_ON(1);
446 }
447 }
448 return 0;
449}
450
451static int qcom_ice_verify_ice(struct ice_device *ice_dev)
452{
453 unsigned int rev;
454 unsigned int maj_rev, min_rev, step_rev;
455
456 rev = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION);
457 maj_rev = (rev & ICE_CORE_MAJOR_REV_MASK) >> ICE_CORE_MAJOR_REV;
458 min_rev = (rev & ICE_CORE_MINOR_REV_MASK) >> ICE_CORE_MINOR_REV;
459 step_rev = (rev & ICE_CORE_STEP_REV_MASK) >> ICE_CORE_STEP_REV;
460
461 if (maj_rev > ICE_CORE_CURRENT_MAJOR_VERSION) {
462 pr_err("%s: Unknown QC ICE device at %lu, rev %d.%d.%d\n",
463 __func__, (unsigned long)ice_dev->mmio,
464 maj_rev, min_rev, step_rev);
465 return -ENODEV;
466 }
467 ice_dev->ice_hw_version = rev;
468
469 dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%p\n",
470 maj_rev, min_rev, step_rev,
471 ice_dev->mmio);
472
473 return 0;
474}
475
476static void qcom_ice_enable_intr(struct ice_device *ice_dev)
477{
478 unsigned int reg;
479
480 reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
481 reg &= ~QCOM_ICE_NON_SEC_IRQ_MASK;
482 qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
483 /*
484 * Ensure previous instructions was completed before issuing next
485 * ICE initialization/optimization instruction
486 */
487 mb();
488}
489
490static void qcom_ice_disable_intr(struct ice_device *ice_dev)
491{
492 unsigned int reg;
493
494 reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
495 reg |= QCOM_ICE_NON_SEC_IRQ_MASK;
496 qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
497 /*
498 * Ensure previous instructions was completed before issuing next
499 * ICE initialization/optimization instruction
500 */
501 mb();
502}
503
504static irqreturn_t qcom_ice_isr(int isr, void *data)
505{
506 irqreturn_t retval = IRQ_NONE;
507 u32 status;
508 struct ice_device *ice_dev = data;
509
510 status = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_STTS);
511 if (status) {
512 ice_dev->error_cb(ice_dev->host_controller_data, status);
513
514 /* Interrupt has been handled. Clear the IRQ */
515 qcom_ice_writel(ice_dev, status, QCOM_ICE_REGS_NON_SEC_IRQ_CLR);
516 /* Ensure instruction is completed */
517 mb();
518 retval = IRQ_HANDLED;
519 }
520 return retval;
521}
522
523static void qcom_ice_parse_ice_instance_type(struct platform_device *pdev,
524 struct ice_device *ice_dev)
525{
526 int ret = -1;
527 struct device *dev = &pdev->dev;
528 struct device_node *np = dev->of_node;
529 const char *type;
530
531 ret = of_property_read_string_index(np, "qcom,instance-type", 0, &type);
532 if (ret) {
533 pr_err("%s: Could not get ICE instance type\n", __func__);
534 goto out;
535 }
536 strlcpy(ice_dev->ice_instance_type, type, QCOM_ICE_TYPE_NAME_LEN);
537out:
538 return;
539}
540
541static int qcom_ice_parse_clock_info(struct platform_device *pdev,
542 struct ice_device *ice_dev)
543{
544 int ret = -1, cnt, i, len;
545 struct device *dev = &pdev->dev;
546 struct device_node *np = dev->of_node;
547 char *name;
548 struct ice_clk_info *clki;
549 u32 *clkfreq = NULL;
550
551 if (!np)
552 goto out;
553
554 cnt = of_property_count_strings(np, "clock-names");
555 if (cnt <= 0) {
556 dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
557 __func__);
558 ret = cnt;
559 goto out;
560 }
561
562 if (!of_get_property(np, "qcom,op-freq-hz", &len)) {
563 dev_info(dev, "qcom,op-freq-hz property not specified\n");
564 goto out;
565 }
566
567 len = len/sizeof(*clkfreq);
568 if (len != cnt)
569 goto out;
570
571 clkfreq = devm_kzalloc(dev, len * sizeof(*clkfreq), GFP_KERNEL);
572 if (!clkfreq) {
573 ret = -ENOMEM;
574 goto out;
575 }
576 ret = of_property_read_u32_array(np, "qcom,op-freq-hz", clkfreq, len);
577
578 INIT_LIST_HEAD(&ice_dev->clk_list_head);
579
580 for (i = 0; i < cnt; i++) {
581 ret = of_property_read_string_index(np,
582 "clock-names", i, (const char **)&name);
583 if (ret)
584 goto out;
585
586 clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
587 if (!clki) {
588 ret = -ENOMEM;
589 goto out;
590 }
591 clki->max_freq = clkfreq[i];
592 clki->name = kstrdup(name, GFP_KERNEL);
593 list_add_tail(&clki->list, &ice_dev->clk_list_head);
594 }
595out:
596 if (clkfreq)
597 devm_kfree(dev, (void *)clkfreq);
598 return ret;
599}
600
601static int qcom_ice_get_device_tree_data(struct platform_device *pdev,
602 struct ice_device *ice_dev)
603{
604 struct device *dev = &pdev->dev;
605 int rc = -1;
606 int irq;
607
608 ice_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
609 if (!ice_dev->res) {
610 pr_err("%s: No memory available for IORESOURCE\n", __func__);
611 return -ENOMEM;
612 }
613
614 ice_dev->mmio = devm_ioremap_resource(dev, ice_dev->res);
615 if (IS_ERR(ice_dev->mmio)) {
616 rc = PTR_ERR(ice_dev->mmio);
617 pr_err("%s: Error = %d mapping ICE io memory\n", __func__, rc);
618 goto out;
619 }
620
621 if (!of_parse_phandle(pdev->dev.of_node, "vdd-hba-supply", 0)) {
622 pr_err("%s: No vdd-hba-supply regulator, assuming not needed\n",
623 __func__);
624 ice_dev->is_regulator_available = false;
625 } else {
626 ice_dev->is_regulator_available = true;
627 }
628 ice_dev->is_ice_clk_available = of_property_read_bool(
629 (&pdev->dev)->of_node,
630 "qcom,enable-ice-clk");
631
632 if (ice_dev->is_ice_clk_available) {
633 rc = qcom_ice_parse_clock_info(pdev, ice_dev);
634 if (rc) {
635 pr_err("%s: qcom_ice_parse_clock_info failed (%d)\n",
636 __func__, rc);
637 goto err_dev;
638 }
639 }
640
641 /* ICE interrupts is only relevant for v2.x */
642 irq = platform_get_irq(pdev, 0);
643 if (irq >= 0) {
644 rc = devm_request_irq(dev, irq, qcom_ice_isr, 0, dev_name(dev),
645 ice_dev);
646 if (rc) {
647 pr_err("%s: devm_request_irq irq=%d failed (%d)\n",
648 __func__, irq, rc);
649 goto err_dev;
650 }
651 ice_dev->irq = irq;
652 pr_info("ICE IRQ = %d\n", ice_dev->irq);
653 } else {
654 dev_dbg(dev, "IRQ resource not available\n");
655 }
656
657 qcom_ice_parse_ice_instance_type(pdev, ice_dev);
658
659 return 0;
660err_dev:
661 if (rc && ice_dev->mmio)
662 devm_iounmap(dev, ice_dev->mmio);
663out:
664 return rc;
665}
666
667/*
668 * ICE HW instance can exist in UFS or eMMC based storage HW
669 * Userspace does not know what kind of ICE it is dealing with.
670 * Though userspace can find which storage device it is booting
671 * from but all kind of storage types dont support ICE from
672 * beginning. So ICE device is created for user space to ping
673 * if ICE exist for that kind of storage
674 */
675static const struct file_operations qcom_ice_fops = {
676 .owner = THIS_MODULE,
677};
678
679static int register_ice_device(struct ice_device *ice_dev)
680{
681 int rc = 0;
682 unsigned int baseminor = 0;
683 unsigned int count = 1;
684 struct device *class_dev;
685 int is_sdcc_ice = !strcmp(ice_dev->ice_instance_type, "sdcc");
686
687 rc = alloc_chrdev_region(&ice_dev->device_no, baseminor, count,
688 is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
689 if (rc < 0) {
690 pr_err("alloc_chrdev_region failed %d for %s\n", rc,
691 is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
692 return rc;
693 }
694 ice_dev->driver_class = class_create(THIS_MODULE,
695 is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
696 if (IS_ERR(ice_dev->driver_class)) {
697 rc = -ENOMEM;
698 pr_err("class_create failed %d for %s\n", rc,
699 is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
700 goto exit_unreg_chrdev_region;
701 }
702 class_dev = device_create(ice_dev->driver_class, NULL,
703 ice_dev->device_no, NULL,
704 is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
705
706 if (!class_dev) {
707 pr_err("class_device_create failed %d for %s\n", rc,
708 is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
709 rc = -ENOMEM;
710 goto exit_destroy_class;
711 }
712
713 cdev_init(&ice_dev->cdev, &qcom_ice_fops);
714 ice_dev->cdev.owner = THIS_MODULE;
715
716 rc = cdev_add(&ice_dev->cdev, MKDEV(MAJOR(ice_dev->device_no), 0), 1);
717 if (rc < 0) {
718 pr_err("cdev_add failed %d for %s\n", rc,
719 is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
720 goto exit_destroy_device;
721 }
722 return 0;
723
724exit_destroy_device:
725 device_destroy(ice_dev->driver_class, ice_dev->device_no);
726
727exit_destroy_class:
728 class_destroy(ice_dev->driver_class);
729
730exit_unreg_chrdev_region:
731 unregister_chrdev_region(ice_dev->device_no, 1);
732 return rc;
733}
734
735static int qcom_ice_probe(struct platform_device *pdev)
736{
737 struct ice_device *ice_dev;
738 int rc = 0;
739
740 if (!pdev) {
741 pr_err("%s: Invalid platform_device passed\n",
742 __func__);
743 return -EINVAL;
744 }
745
746 ice_dev = kzalloc(sizeof(struct ice_device), GFP_KERNEL);
747
748 if (!ice_dev) {
749 rc = -ENOMEM;
750 pr_err("%s: Error %d allocating memory for ICE device:\n",
751 __func__, rc);
752 goto out;
753 }
754
755 ice_dev->pdev = &pdev->dev;
756 if (!ice_dev->pdev) {
757 rc = -EINVAL;
758 pr_err("%s: Invalid device passed in platform_device\n",
759 __func__);
760 goto err_ice_dev;
761 }
762
763 if (pdev->dev.of_node)
764 rc = qcom_ice_get_device_tree_data(pdev, ice_dev);
765 else {
766 rc = -EINVAL;
767 pr_err("%s: ICE device node not found\n", __func__);
768 }
769
770 if (rc)
771 goto err_ice_dev;
772
773 pr_debug("%s: Registering ICE device\n", __func__);
774 rc = register_ice_device(ice_dev);
775 if (rc) {
776 pr_err("create character device failed.\n");
777 goto err_ice_dev;
778 }
779
780 /*
781 * If ICE is enabled here, it would be waste of power.
782 * We would enable ICE when first request for crypto
783 * operation arrives.
784 */
785 ice_dev->is_ice_enabled = false;
786
787 platform_set_drvdata(pdev, ice_dev);
788 list_add_tail(&ice_dev->list, &ice_devices);
789
790 goto out;
791
792err_ice_dev:
793 kfree(ice_dev);
794out:
795 return rc;
796}
797
798static int qcom_ice_remove(struct platform_device *pdev)
799{
800 struct ice_device *ice_dev;
801
802 ice_dev = (struct ice_device *)platform_get_drvdata(pdev);
803
804 if (!ice_dev)
805 return 0;
806
807 qcom_ice_disable_intr(ice_dev);
808
809 device_init_wakeup(&pdev->dev, false);
810 if (ice_dev->mmio)
811 iounmap(ice_dev->mmio);
812
813 list_del_init(&ice_dev->list);
814 kfree(ice_dev);
815
816 return 1;
817}
818
819static int qcom_ice_suspend(struct platform_device *pdev)
820{
821 return 0;
822}
823
824static int qcom_ice_restore_config(void)
825{
826 struct scm_desc desc = {0};
827 int ret;
828
829 /*
830 * TZ would check KEYS_RAM_RESET_COMPLETED status bit before processing
831 * restore config command. This would prevent two calls from HLOS to TZ
832 * One to check KEYS_RAM_RESET_COMPLETED status bit second to restore
833 * config
834 */
835
836 desc.arginfo = TZ_OS_KS_RESTORE_KEY_ID_PARAM_ID;
837
838 ret = scm_call2(TZ_OS_KS_RESTORE_KEY_ID, &desc);
839
840 if (ret)
841 pr_err("%s: Error: 0x%x\n", __func__, ret);
842
843 return ret;
844}
845
846static int qcom_ice_restore_key_config(struct ice_device *ice_dev)
847{
848 struct scm_desc desc = {0};
849 int ret = -1;
850
851 /* For ice 3, key configuration needs to be restored in case of reset */
852
853 desc.arginfo = TZ_OS_KS_RESTORE_KEY_CONFIG_ID_PARAM_ID;
854
855 if (!strcmp(ice_dev->ice_instance_type, "sdcc"))
856 desc.args[0] = QCOM_ICE_SDCC;
857
858 if (!strcmp(ice_dev->ice_instance_type, "ufs"))
859 desc.args[0] = QCOM_ICE_UFS;
860
861 ret = scm_call2(TZ_OS_KS_RESTORE_KEY_CONFIG_ID, &desc);
862
863 if (ret)
864 pr_err("%s: Error: 0x%x\n", __func__, ret);
865
866 return ret;
867}
868
869static int qcom_ice_init_clocks(struct ice_device *ice)
870{
871 int ret = -EINVAL;
872 struct ice_clk_info *clki;
873 struct device *dev = ice->pdev;
874 struct list_head *head = &ice->clk_list_head;
875
876 if (!head || list_empty(head)) {
877 dev_err(dev, "%s:ICE Clock list null/empty\n", __func__);
878 goto out;
879 }
880
881 list_for_each_entry(clki, head, list) {
882 if (!clki->name)
883 continue;
884
885 clki->clk = devm_clk_get(dev, clki->name);
886 if (IS_ERR(clki->clk)) {
887 ret = PTR_ERR(clki->clk);
888 dev_err(dev, "%s: %s clk get failed, %d\n",
889 __func__, clki->name, ret);
890 goto out;
891 }
892
893 /* Not all clocks would have a rate to be set */
894 ret = 0;
895 if (clki->max_freq) {
896 ret = clk_set_rate(clki->clk, clki->max_freq);
897 if (ret) {
898 dev_err(dev,
899 "%s: %s clk set rate(%dHz) failed, %d\n",
900 __func__, clki->name,
901 clki->max_freq, ret);
902 goto out;
903 }
904 clki->curr_freq = clki->max_freq;
905 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
906 clki->name, clk_get_rate(clki->clk));
907 }
908 }
909out:
910 return ret;
911}
912
913static int qcom_ice_enable_clocks(struct ice_device *ice, bool enable)
914{
915 int ret = 0;
916 struct ice_clk_info *clki;
917 struct device *dev = ice->pdev;
918 struct list_head *head = &ice->clk_list_head;
919
920 if (!head || list_empty(head)) {
921 dev_err(dev, "%s:ICE Clock list null/empty\n", __func__);
922 ret = -EINVAL;
923 goto out;
924 }
925
926 if (!ice->is_ice_clk_available) {
927 dev_err(dev, "%s:ICE Clock not available\n", __func__);
928 ret = -EINVAL;
929 goto out;
930 }
931
932 list_for_each_entry(clki, head, list) {
933 if (!clki->name)
934 continue;
935
936 if (enable)
937 ret = clk_prepare_enable(clki->clk);
938 else
939 clk_disable_unprepare(clki->clk);
940
941 if (ret) {
942 dev_err(dev, "Unable to %s ICE core clk\n",
943 enable?"enable":"disable");
944 goto out;
945 }
946 }
947out:
948 return ret;
949}
950
951static int qcom_ice_secure_ice_init(struct ice_device *ice_dev)
952{
953 /* We need to enable source for ICE secure interrupts */
954 int ret = 0;
955 u32 regval;
956
957 regval = scm_io_read((unsigned long)ice_dev->res +
958 QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK);
959
960 regval &= ~QCOM_ICE_SEC_IRQ_MASK;
961 ret = scm_io_write((unsigned long)ice_dev->res +
962 QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK, regval);
963
964 /*
965 * Ensure previous instructions was completed before issuing next
966 * ICE initialization/optimization instruction
967 */
968 mb();
969
970 if (!ret)
971 pr_err("%s: failed(0x%x) to init secure ICE config\n",
972 __func__, ret);
973 return ret;
974}
975
976static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev)
977{
978 int ret = 0, scm_ret = 0;
979
980 /* scm command buffer structure */
981 struct qcom_scm_cmd_buf {
982 unsigned int device_id;
983 unsigned int spare;
984 } cbuf = {0};
985
986 /*
987 * Ideally, we should check ICE version to decide whether to proceed or
988 * or not. Since version wont be available when this function is called
989 * we need to depend upon is_ice_clk_available to decide
990 */
991 if (ice_dev->is_ice_clk_available)
992 goto out;
993
994 /*
995 * Store dev_id in ice_device structure so that emmc/ufs cases can be
996 * handled properly
997 */
998 #define RESTORE_SEC_CFG_CMD 0x2
999 #define ICE_TZ_DEV_ID 20
1000
1001 cbuf.device_id = ICE_TZ_DEV_ID;
1002 ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
1003 if (ret || scm_ret) {
1004 pr_err("%s: failed, ret %d scm_ret %d\n",
1005 __func__, ret, scm_ret);
1006 if (!ret)
1007 ret = scm_ret;
1008 }
1009out:
1010
1011 return ret;
1012}
1013
1014static int qcom_ice_finish_init(struct ice_device *ice_dev)
1015{
1016 unsigned int reg;
1017 int err = 0;
1018
1019 if (!ice_dev) {
1020 pr_err("%s: Null data received\n", __func__);
1021 err = -ENODEV;
1022 goto out;
1023 }
1024
1025 if (ice_dev->is_ice_clk_available) {
1026 err = qcom_ice_init_clocks(ice_dev);
1027 if (err)
1028 goto out;
1029
1030 err = qcom_ice_bus_register(ice_dev);
1031 if (err)
1032 goto out;
1033 }
1034
1035 /*
1036 * It is possible that ICE device is not probed when host is probed
1037 * This would cause host probe to be deferred. When probe for host is
1038 * deferred, it can cause power collapse for host and that can wipe
1039 * configurations of host & ice. It is prudent to restore the config
1040 */
1041 err = qcom_ice_update_sec_cfg(ice_dev);
1042 if (err)
1043 goto out;
1044
1045 err = qcom_ice_verify_ice(ice_dev);
1046 if (err)
1047 goto out;
1048
1049 /* if ICE_DISABLE_FUSE is blown, return immediately
1050 * Currently, FORCE HW Keys are also disabled, since
1051 * there is no use case for their usage neither in FDE
1052 * nor in PFE
1053 */
1054 reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING);
1055 reg &= (ICE_FUSE_SETTING_MASK |
1056 ICE_FORCE_HW_KEY0_SETTING_MASK |
1057 ICE_FORCE_HW_KEY1_SETTING_MASK);
1058
1059 if (reg) {
1060 ice_dev->is_ice_disable_fuse_blown = true;
1061 pr_err("%s: Error: ICE_ERROR_HW_DISABLE_FUSE_BLOWN\n",
1062 __func__);
1063 err = -EPERM;
1064 goto out;
1065 }
1066
1067 /* TZ side of ICE driver would handle secure init of ICE HW from v2 */
1068 if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1 &&
1069 !qcom_ice_secure_ice_init(ice_dev)) {
1070 pr_err("%s: Error: ICE_ERROR_ICE_TZ_INIT_FAILED\n", __func__);
1071 err = -EFAULT;
1072 goto out;
1073 }
1074
1075 qcom_ice_low_power_mode_enable(ice_dev);
1076 qcom_ice_optimization_enable(ice_dev);
1077 qcom_ice_config_proc_ignore(ice_dev);
1078 qcom_ice_enable_test_bus_config(ice_dev);
1079 qcom_ice_enable(ice_dev);
1080 ice_dev->is_ice_enabled = true;
1081 qcom_ice_enable_intr(ice_dev);
1082
1083out:
1084 return err;
1085}
1086
1087static int qcom_ice_init(struct platform_device *pdev,
1088 void *host_controller_data,
1089 ice_error_cb error_cb)
1090{
1091 /*
1092 * A completion event for host controller would be triggered upon
1093 * initialization completion
1094 * When ICE is initialized, it would put ICE into Global Bypass mode
1095 * When any request for data transfer is received, it would enable
1096 * the ICE for that particular request
1097 */
1098 struct ice_device *ice_dev;
1099
1100 ice_dev = platform_get_drvdata(pdev);
1101 if (!ice_dev) {
1102 pr_err("%s: invalid device\n", __func__);
1103 return -EINVAL;
1104 }
1105
1106 ice_dev->error_cb = error_cb;
1107 ice_dev->host_controller_data = host_controller_data;
1108
1109 return qcom_ice_finish_init(ice_dev);
1110}
1111
1112static int qcom_ice_finish_power_collapse(struct ice_device *ice_dev)
1113{
1114 int err = 0;
1115
1116 if (ice_dev->is_ice_disable_fuse_blown) {
1117 err = -EPERM;
1118 goto out;
1119 }
1120
1121 if (ice_dev->is_ice_enabled) {
1122 /*
1123 * ICE resets into global bypass mode with optimization and
1124 * low power mode disabled. Hence we need to redo those seq's.
1125 */
1126 qcom_ice_low_power_mode_enable(ice_dev);
1127
1128 qcom_ice_enable_test_bus_config(ice_dev);
1129
1130 qcom_ice_optimization_enable(ice_dev);
1131 qcom_ice_enable(ice_dev);
1132
1133 if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1) {
1134 /*
1135 * When ICE resets, it wipes all of keys from LUTs
1136 * ICE driver should call TZ to restore keys
1137 */
1138 if (qcom_ice_restore_config()) {
1139 err = -EFAULT;
1140 goto out;
1141 }
1142
1143 /*
1144 * ICE looses its key configuration when UFS is reset,
1145 * restore it
1146 */
1147 } else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) {
1148 err = qcom_ice_restore_key_config(ice_dev);
1149 if (err)
1150 goto out;
1151
1152 /*
1153 * for PFE case, clear the cached ICE key table,
1154 * this will force keys to be reconfigured
1155 * per each next transaction
1156 */
1157 pfk_clear_on_reset();
1158 }
1159 }
1160
1161 ice_dev->ice_reset_complete_time = ktime_get();
1162out:
1163 return err;
1164}
1165
1166static int qcom_ice_resume(struct platform_device *pdev)
1167{
1168 /*
1169 * ICE is power collapsed when storage controller is power collapsed
1170 * ICE resume function is responsible for:
1171 * ICE HW enabling sequence
1172 * Key restoration
1173 * A completion event should be triggered
1174 * upon resume completion
1175 * Storage driver will be fully operational only
1176 * after receiving this event
1177 */
1178 struct ice_device *ice_dev;
1179
1180 ice_dev = platform_get_drvdata(pdev);
1181
1182 if (!ice_dev)
1183 return -EINVAL;
1184
1185 if (ice_dev->is_ice_clk_available) {
1186 /*
1187 * Storage is calling this function after power collapse which
1188 * would put ICE into GLOBAL_BYPASS mode. Make sure to enable
1189 * ICE
1190 */
1191 qcom_ice_enable(ice_dev);
1192 }
1193
1194 return 0;
1195}
1196
1197static void qcom_ice_dump_test_bus(struct ice_device *ice_dev)
1198{
1199 u32 reg = 0x1;
1200 u32 val;
1201 u8 bus_selector;
1202 u8 stream_selector;
1203
1204 pr_err("ICE TEST BUS DUMP:\n");
1205
1206 for (bus_selector = 0; bus_selector <= 0xF; bus_selector++) {
1207 reg = 0x1; /* enable test bus */
1208 reg |= bus_selector << 28;
1209 if (bus_selector == 0xD)
1210 continue;
1211 qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_TEST_BUS_CONTROL);
1212 /*
1213 * make sure test bus selector is written before reading
1214 * the test bus register
1215 */
1216 mb();
1217 val = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_REG);
1218 pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
1219 reg, val);
1220 }
1221
1222 pr_err("ICE TEST BUS DUMP (ICE_STREAM1_DATAPATH_TEST_BUS):\n");
1223 for (stream_selector = 0; stream_selector <= 0xF; stream_selector++) {
1224 reg = 0xD0000001; /* enable stream test bus */
1225 reg |= stream_selector << 16;
1226 qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_TEST_BUS_CONTROL);
1227 /*
1228 * make sure test bus selector is written before reading
1229 * the test bus register
1230 */
1231 mb();
1232 val = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_REG);
1233 pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
1234 reg, val);
1235 }
1236}
1237
1238static void qcom_ice_debug(struct platform_device *pdev)
1239{
1240 struct ice_device *ice_dev;
1241
1242 if (!pdev) {
1243 pr_err("%s: Invalid params passed\n", __func__);
1244 goto out;
1245 }
1246
1247 ice_dev = platform_get_drvdata(pdev);
1248
1249 if (!ice_dev) {
1250 pr_err("%s: No ICE device available\n", __func__);
1251 goto out;
1252 }
1253
1254 if (!ice_dev->is_ice_enabled) {
1255 pr_err("%s: ICE device is not enabled\n", __func__);
1256 goto out;
1257 }
1258
1259 pr_err("%s: =========== REGISTER DUMP (%p)===========\n",
1260 ice_dev->ice_instance_type, ice_dev);
1261
1262 pr_err("%s: ICE Control: 0x%08x | ICE Reset: 0x%08x\n",
1263 ice_dev->ice_instance_type,
1264 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_CONTROL),
1265 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_RESET));
1266
1267 pr_err("%s: ICE Version: 0x%08x | ICE FUSE: 0x%08x\n",
1268 ice_dev->ice_instance_type,
1269 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION),
1270 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING));
1271
1272 pr_err("%s: ICE Param1: 0x%08x | ICE Param2: 0x%08x\n",
1273 ice_dev->ice_instance_type,
1274 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_1),
1275 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_2));
1276
1277 pr_err("%s: ICE Param3: 0x%08x | ICE Param4: 0x%08x\n",
1278 ice_dev->ice_instance_type,
1279 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_3),
1280 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_4));
1281
1282 pr_err("%s: ICE Param5: 0x%08x | ICE IRQ STTS: 0x%08x\n",
1283 ice_dev->ice_instance_type,
1284 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_5),
1285 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_STTS));
1286
1287 pr_err("%s: ICE IRQ MASK: 0x%08x | ICE IRQ CLR: 0x%08x\n",
1288 ice_dev->ice_instance_type,
1289 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK),
1290 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_CLR));
1291
1292 if (ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) {
1293 pr_err("%s: ICE INVALID CCFG ERR STTS: 0x%08x\n",
1294 ice_dev->ice_instance_type,
1295 qcom_ice_readl(ice_dev,
1296 QCOM_ICE_INVALID_CCFG_ERR_STTS));
1297 }
1298
1299 if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
1300 ((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
1301 (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
1302 pr_err("%s: ICE BIST Sts: 0x%08x | ICE Bypass Sts: 0x%08x\n",
1303 ice_dev->ice_instance_type,
1304 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BIST_STATUS),
1305 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS));
1306 }
1307
1308 pr_err("%s: ICE ADV CTRL: 0x%08x | ICE ENDIAN SWAP: 0x%08x\n",
1309 ice_dev->ice_instance_type,
1310 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL),
1311 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ENDIAN_SWAP));
1312
1313 pr_err("%s: ICE_STM1_ERR_SYND1: 0x%08x | ICE_STM1_ERR_SYND2: 0x%08x\n",
1314 ice_dev->ice_instance_type,
1315 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME1),
1316 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME2));
1317
1318 pr_err("%s: ICE_STM2_ERR_SYND1: 0x%08x | ICE_STM2_ERR_SYND2: 0x%08x\n",
1319 ice_dev->ice_instance_type,
1320 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME1),
1321 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME2));
1322
1323 pr_err("%s: ICE_STM1_COUNTER1: 0x%08x | ICE_STM1_COUNTER2: 0x%08x\n",
1324 ice_dev->ice_instance_type,
1325 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS1),
1326 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS2));
1327
1328 pr_err("%s: ICE_STM1_COUNTER3: 0x%08x | ICE_STM1_COUNTER4: 0x%08x\n",
1329 ice_dev->ice_instance_type,
1330 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS3),
1331 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS4));
1332
1333 pr_err("%s: ICE_STM2_COUNTER1: 0x%08x | ICE_STM2_COUNTER2: 0x%08x\n",
1334 ice_dev->ice_instance_type,
1335 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS1),
1336 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS2));
1337
1338 pr_err("%s: ICE_STM2_COUNTER3: 0x%08x | ICE_STM2_COUNTER4: 0x%08x\n",
1339 ice_dev->ice_instance_type,
1340 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS3),
1341 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS4));
1342
1343 pr_err("%s: ICE_STM1_CTR5_MSB: 0x%08x | ICE_STM1_CTR5_LSB: 0x%08x\n",
1344 ice_dev->ice_instance_type,
1345 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS5_MSB),
1346 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS5_LSB));
1347
1348 pr_err("%s: ICE_STM1_CTR6_MSB: 0x%08x | ICE_STM1_CTR6_LSB: 0x%08x\n",
1349 ice_dev->ice_instance_type,
1350 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS6_MSB),
1351 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS6_LSB));
1352
1353 pr_err("%s: ICE_STM1_CTR7_MSB: 0x%08x | ICE_STM1_CTR7_LSB: 0x%08x\n",
1354 ice_dev->ice_instance_type,
1355 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS7_MSB),
1356 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS7_LSB));
1357
1358 pr_err("%s: ICE_STM1_CTR8_MSB: 0x%08x | ICE_STM1_CTR8_LSB: 0x%08x\n",
1359 ice_dev->ice_instance_type,
1360 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS8_MSB),
1361 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS8_LSB));
1362
1363 pr_err("%s: ICE_STM1_CTR9_MSB: 0x%08x | ICE_STM1_CTR9_LSB: 0x%08x\n",
1364 ice_dev->ice_instance_type,
1365 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS9_MSB),
1366 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS9_LSB));
1367
1368 pr_err("%s: ICE_STM2_CTR5_MSB: 0x%08x | ICE_STM2_CTR5_LSB: 0x%08x\n",
1369 ice_dev->ice_instance_type,
1370 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS5_MSB),
1371 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS5_LSB));
1372
1373 pr_err("%s: ICE_STM2_CTR6_MSB: 0x%08x | ICE_STM2_CTR6_LSB: 0x%08x\n",
1374 ice_dev->ice_instance_type,
1375 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS6_MSB),
1376 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS6_LSB));
1377
1378 pr_err("%s: ICE_STM2_CTR7_MSB: 0x%08x | ICE_STM2_CTR7_LSB: 0x%08x\n",
1379 ice_dev->ice_instance_type,
1380 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS7_MSB),
1381 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS7_LSB));
1382
1383 pr_err("%s: ICE_STM2_CTR8_MSB: 0x%08x | ICE_STM2_CTR8_LSB: 0x%08x\n",
1384 ice_dev->ice_instance_type,
1385 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS8_MSB),
1386 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS8_LSB));
1387
1388 pr_err("%s: ICE_STM2_CTR9_MSB: 0x%08x | ICE_STM2_CTR9_LSB: 0x%08x\n",
1389 ice_dev->ice_instance_type,
1390 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS9_MSB),
1391 qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS9_LSB));
1392
1393 qcom_ice_dump_test_bus(ice_dev);
1394 pr_err("%s: ICE reset start time: %llu ICE reset done time: %llu\n",
1395 ice_dev->ice_instance_type,
1396 (unsigned long long)ice_dev->ice_reset_start_time.tv64,
1397 (unsigned long long)ice_dev->ice_reset_complete_time.tv64);
1398
1399 if (ktime_to_us(ktime_sub(ice_dev->ice_reset_complete_time,
1400 ice_dev->ice_reset_start_time)) > 0)
1401 pr_err("%s: Time taken for reset: %lu\n",
1402 ice_dev->ice_instance_type,
1403 (unsigned long)ktime_to_us(ktime_sub(
1404 ice_dev->ice_reset_complete_time,
1405 ice_dev->ice_reset_start_time)));
1406out:
1407 return;
1408}
1409
1410static int qcom_ice_reset(struct platform_device *pdev)
1411{
1412 struct ice_device *ice_dev;
1413
1414 ice_dev = platform_get_drvdata(pdev);
1415 if (!ice_dev) {
1416 pr_err("%s: INVALID ice_dev\n", __func__);
1417 return -EINVAL;
1418 }
1419
1420 ice_dev->ice_reset_start_time = ktime_get();
1421
1422 return qcom_ice_finish_power_collapse(ice_dev);
1423}
1424
1425static int qcom_ice_config_start(struct platform_device *pdev,
1426 struct request *req,
1427 struct ice_data_setting *setting, bool async)
1428{
1429 struct ice_crypto_setting *crypto_data;
1430 struct ice_crypto_setting pfk_crypto_data = {0};
1431 union map_info *info;
1432 int ret = 0;
1433 bool is_pfe = false;
1434
1435 if (!pdev || !req || !setting) {
1436 pr_err("%s: Invalid params passed\n", __func__);
1437 return -EINVAL;
1438 }
1439
1440 /*
1441 * It is not an error to have a request with no bio
1442 * Such requests must bypass ICE. So first set bypass and then
1443 * return if bio is not available in request
1444 */
1445 if (setting) {
1446 setting->encr_bypass = true;
1447 setting->decr_bypass = true;
1448 }
1449
1450 if (!req->bio) {
1451 /* It is not an error to have a request with no bio */
1452 return 0;
1453 }
1454
1455 ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
1456 if (is_pfe) {
1457 if (ret) {
1458 if (ret != -EBUSY && ret != -EAGAIN)
1459 pr_err("%s error %d while configuring ice key for PFE\n",
1460 __func__, ret);
1461 return ret;
1462 }
1463
1464 return qti_ice_setting_config(req, pdev,
1465 &pfk_crypto_data, setting);
1466 }
1467
1468 /*
1469 * info field in req->end_io_data could be used by mulitple dm or
1470 * non-dm entities. To ensure that we are running operation on dm
1471 * based request, check BIO_DONT_FREE flag
1472 */
1473 if (bio_flagged(req->bio, BIO_INLINECRYPT)) {
1474 info = dm_get_rq_mapinfo(req);
1475 if (!info) {
1476 pr_debug("%s info not available in request\n",
1477 __func__);
1478 return 0;
1479 }
1480
1481 crypto_data = (struct ice_crypto_setting *)info->ptr;
1482 if (!crypto_data) {
1483 pr_err("%s crypto_data not available in request\n",
1484 __func__);
1485 return -EINVAL;
1486 }
1487
1488 return qti_ice_setting_config(req, pdev,
1489 crypto_data, setting);
1490 }
1491
1492 /*
1493 * It is not an error. If target is not req-crypt based, all request
1494 * from storage driver would come here to check if there is any ICE
1495 * setting required
1496 */
1497 return 0;
1498}
1499EXPORT_SYMBOL(qcom_ice_config_start);
1500
1501static int qcom_ice_config_end(struct request *req)
1502{
1503 int ret = 0;
1504 bool is_pfe = false;
1505
1506 if (!req) {
1507 pr_err("%s: Invalid params passed\n", __func__);
1508 return -EINVAL;
1509 }
1510
1511 if (!req->bio) {
1512 /* It is not an error to have a request with no bio */
1513 return 0;
1514 }
1515
1516 ret = pfk_load_key_end(req->bio, &is_pfe);
1517 if (is_pfe) {
1518 if (ret != 0)
1519 pr_err("%s error %d while end configuring ice key for PFE\n",
1520 __func__, ret);
1521 return ret;
1522 }
1523
1524
1525 return 0;
1526}
1527EXPORT_SYMBOL(qcom_ice_config_end);
1528
1529
1530static int qcom_ice_status(struct platform_device *pdev)
1531{
1532 struct ice_device *ice_dev;
1533 unsigned int test_bus_reg_status;
1534
1535 if (!pdev) {
1536 pr_err("%s: Invalid params passed\n", __func__);
1537 return -EINVAL;
1538 }
1539
1540 ice_dev = platform_get_drvdata(pdev);
1541
1542 if (!ice_dev)
1543 return -ENODEV;
1544
1545 if (!ice_dev->is_ice_enabled)
1546 return -ENODEV;
1547
1548 test_bus_reg_status = qcom_ice_readl(ice_dev,
1549 QCOM_ICE_REGS_TEST_BUS_REG);
1550
1551 return !!(test_bus_reg_status & QCOM_ICE_TEST_BUS_REG_NON_SECURE_INTR);
1552
1553}
1554
1555struct qcom_ice_variant_ops qcom_ice_ops = {
1556 .name = "qcom",
1557 .init = qcom_ice_init,
1558 .reset = qcom_ice_reset,
1559 .resume = qcom_ice_resume,
1560 .suspend = qcom_ice_suspend,
1561 .config_start = qcom_ice_config_start,
1562 .config_end = qcom_ice_config_end,
1563 .status = qcom_ice_status,
1564 .debug = qcom_ice_debug,
1565};
1566
1567struct platform_device *qcom_ice_get_pdevice(struct device_node *node)
1568{
1569 struct platform_device *ice_pdev = NULL;
1570 struct ice_device *ice_dev = NULL;
1571
1572 if (!node) {
1573 pr_err("%s: invalid node %p", __func__, node);
1574 goto out;
1575 }
1576
1577 if (!of_device_is_available(node)) {
1578 pr_err("%s: device unavailable\n", __func__);
1579 goto out;
1580 }
1581
1582 if (list_empty(&ice_devices)) {
1583 pr_err("%s: invalid device list\n", __func__);
1584 ice_pdev = ERR_PTR(-EPROBE_DEFER);
1585 goto out;
1586 }
1587
1588 list_for_each_entry(ice_dev, &ice_devices, list) {
1589 if (ice_dev->pdev->of_node == node) {
1590 pr_info("%s: found ice device %p\n", __func__, ice_dev);
1591 break;
1592 }
1593 }
1594
1595 ice_pdev = to_platform_device(ice_dev->pdev);
1596 pr_info("%s: matching platform device %p\n", __func__, ice_pdev);
1597out:
1598 return ice_pdev;
1599}
1600
1601static struct ice_device *get_ice_device_from_storage_type
1602 (const char *storage_type)
1603{
1604 struct ice_device *ice_dev = NULL;
1605
1606 if (list_empty(&ice_devices)) {
1607 pr_err("%s: invalid device list\n", __func__);
1608 ice_dev = ERR_PTR(-EPROBE_DEFER);
1609 goto out;
1610 }
1611
1612 list_for_each_entry(ice_dev, &ice_devices, list) {
1613 if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
1614 pr_info("%s: found ice device %p\n", __func__, ice_dev);
1615 break;
1616 }
1617 }
1618out:
1619 return ice_dev;
1620}
1621
1622static int enable_ice_setup(struct ice_device *ice_dev)
1623{
1624 int ret = -1, vote;
1625
1626 /* Setup Regulator */
1627 if (ice_dev->is_regulator_available) {
1628 if (qcom_ice_get_vreg(ice_dev)) {
1629 pr_err("%s: Could not get regulator\n", __func__);
1630 goto out;
1631 }
1632 ret = regulator_enable(ice_dev->reg);
1633 if (ret) {
1634 pr_err("%s:%p: Could not enable regulator\n",
1635 __func__, ice_dev);
1636 goto out;
1637 }
1638 }
1639
1640 /* Setup Clocks */
1641 if (qcom_ice_enable_clocks(ice_dev, true)) {
1642 pr_err("%s:%p:%s Could not enable clocks\n", __func__,
1643 ice_dev, ice_dev->ice_instance_type);
1644 goto out_reg;
1645 }
1646
1647 /* Setup Bus Vote */
1648 vote = qcom_ice_get_bus_vote(ice_dev, "MAX");
1649 if (vote < 0)
1650 goto out_clocks;
1651
1652 ret = qcom_ice_set_bus_vote(ice_dev, vote);
1653 if (ret) {
1654 pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
1655 goto out_clocks;
1656 }
1657
1658 return ret;
1659
1660out_clocks:
1661 qcom_ice_enable_clocks(ice_dev, false);
1662out_reg:
1663 if (ice_dev->is_regulator_available) {
1664 if (qcom_ice_get_vreg(ice_dev)) {
1665 pr_err("%s: Could not get regulator\n", __func__);
1666 goto out;
1667 }
1668 ret = regulator_disable(ice_dev->reg);
1669 if (ret) {
1670 pr_err("%s:%pK: Could not disable regulator\n",
1671 __func__, ice_dev);
1672 goto out;
1673 }
1674 }
1675out:
1676 return ret;
1677}
1678
1679static int disable_ice_setup(struct ice_device *ice_dev)
1680{
1681 int ret = -1, vote;
1682
1683 /* Setup Bus Vote */
1684 vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
1685 if (vote < 0) {
1686 pr_err("%s:%p: Unable to get bus vote\n", __func__, ice_dev);
1687 goto out_disable_clocks;
1688 }
1689
1690 ret = qcom_ice_set_bus_vote(ice_dev, vote);
1691 if (ret)
1692 pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
1693
1694out_disable_clocks:
1695
1696 /* Setup Clocks */
1697 if (qcom_ice_enable_clocks(ice_dev, false))
1698 pr_err("%s:%p:%s Could not disable clocks\n", __func__,
1699 ice_dev, ice_dev->ice_instance_type);
1700
1701 /* Setup Regulator */
1702 if (ice_dev->is_regulator_available) {
1703 if (qcom_ice_get_vreg(ice_dev)) {
1704 pr_err("%s: Could not get regulator\n", __func__);
1705 goto out;
1706 }
1707 ret = regulator_disable(ice_dev->reg);
1708 if (ret) {
1709 pr_err("%s:%p: Could not disable regulator\n",
1710 __func__, ice_dev);
1711 goto out;
1712 }
1713 }
1714out:
1715 return ret;
1716}
1717
1718int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
1719{
1720 int ret = -1;
1721 struct ice_device *ice_dev = NULL;
1722
1723 ice_dev = get_ice_device_from_storage_type(storage_type);
1724 if (ice_dev == ERR_PTR(-EPROBE_DEFER))
1725 return -EPROBE_DEFER;
1726
1727 if (!ice_dev)
1728 return ret;
1729
1730 if (enable)
1731 return enable_ice_setup(ice_dev);
1732 else
1733 return disable_ice_setup(ice_dev);
1734}
1735
1736struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node)
1737{
1738 return &qcom_ice_ops;
1739}
1740EXPORT_SYMBOL(qcom_ice_get_variant_ops);
1741
1742/* Following struct is required to match device with driver from dts file */
1743static const struct of_device_id qcom_ice_match[] = {
1744 { .compatible = "qcom,ice" },
1745 {},
1746};
1747MODULE_DEVICE_TABLE(of, qcom_ice_match);
1748
1749static struct platform_driver qcom_ice_driver = {
1750 .probe = qcom_ice_probe,
1751 .remove = qcom_ice_remove,
1752 .driver = {
1753 .owner = THIS_MODULE,
1754 .name = "qcom_ice",
1755 .of_match_table = qcom_ice_match,
1756 },
1757};
1758module_platform_driver(qcom_ice_driver);
1759
1760MODULE_LICENSE("GPL v2");
1761MODULE_DESCRIPTION("QTI Inline Crypto Engine driver");