blob: b938a3a6cee3007be1ff020f32c133b06c1d0d34 [file] [log] [blame]
Hariram Purushothamandc4402e2017-03-28 20:41:43 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) "CAM-CDM-HW %s:%d " fmt, __func__, __LINE__
14
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/of.h>
18#include <linux/module.h>
19#include <linux/ion.h>
20#include <linux/iommu.h>
21#include <linux/timer.h>
22#include <linux/kernel.h>
23
24#include <media/cam_req_mgr.h>
25#include "cam_soc_util.h"
26#include "cam_smmu_api.h"
27#include "cam_cdm_intf_api.h"
28#include "cam_cdm.h"
29#include "cam_cdm_core_common.h"
30#include "cam_cdm_soc.h"
31#include "cam_io_util.h"
32#include "cam_hw_cdm170_reg.h"
33
34
35#define CAM_HW_CDM_CPAS_0_NAME "qcom,cam170-cpas-cdm0"
36#define CAM_HW_CDM_IPE_0_NAME "qcom,cam170-ipe0-cdm"
37#define CAM_HW_CDM_IPE_1_NAME "qcom,cam170-ipe1-cdm"
38#define CAM_HW_CDM_BPS_NAME "qcom,cam170-bps-cdm"
39
40#define CAM_CDM_BL_FIFO_WAIT_TIMEOUT 2000
41
42static void cam_hw_cdm_work(struct work_struct *work);
43
44/* DT match table entry for all CDM variants*/
45static const struct of_device_id msm_cam_hw_cdm_dt_match[] = {
46 {
47 .compatible = CAM_HW_CDM_CPAS_0_NAME,
48 .data = &cam170_cpas_cdm_offset_table,
49 },
50 {}
51};
52
53static enum cam_cdm_id cam_hw_cdm_get_id_by_name(char *name)
54{
55 if (!strcmp(CAM_HW_CDM_CPAS_0_NAME, name))
56 return CAM_CDM_CPAS_0;
57
58 return CAM_CDM_MAX;
59}
60
61int cam_hw_cdm_bl_fifo_pending_bl_rb(struct cam_hw_info *cdm_hw,
62 uint32_t *pending_bl)
63{
64 int rc = 0;
65
66 if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
67 pending_bl)) {
68 pr_err("Failed to read CDM pending BL's\n");
69 rc = -1;
70 }
71
72 return rc;
73}
74
75int cam_hw_cdm_enable_core_dbg(struct cam_hw_info *cdm_hw)
76{
77 int rc = 0;
78
79 if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0x10100)) {
80 pr_err("Failed to Write CDM HW core debug\n");
81 rc = -1;
82 }
83
84 return rc;
85}
86
87int cam_hw_cdm_disable_core_dbg(struct cam_hw_info *cdm_hw)
88{
89 int rc = 0;
90
91 if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0)) {
92 pr_err("Failed to Write CDM HW core debug\n");
93 rc = -1;
94 }
95
96 return rc;
97}
98
99void cam_hw_cdm_dump_scratch_registors(struct cam_hw_info *cdm_hw)
100{
101 uint32_t dump_reg = 0;
102
103 cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
104 pr_err("dump core en=%x\n", dump_reg);
105
106 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_0_REG, &dump_reg);
107 pr_err("dump scratch0=%x\n", dump_reg);
108
109 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_1_REG, &dump_reg);
110 pr_err("dump scratch1=%x\n", dump_reg);
111
112 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_2_REG, &dump_reg);
113 pr_err("dump scratch2=%x\n", dump_reg);
114
115 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_3_REG, &dump_reg);
116 pr_err("dump scratch3=%x\n", dump_reg);
117
118 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_4_REG, &dump_reg);
119 pr_err("dump scratch4=%x\n", dump_reg);
120
121 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_5_REG, &dump_reg);
122 pr_err("dump scratch5=%x\n", dump_reg);
123
124 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_6_REG, &dump_reg);
125 pr_err("dump scratch6=%x\n", dump_reg);
126
127 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_7_REG, &dump_reg);
128 pr_err("dump scratch7=%x\n", dump_reg);
129
130}
131
132void cam_hw_cdm_dump_core_debug_registers(
133 struct cam_hw_info *cdm_hw)
134{
135 uint32_t dump_reg, core_dbg, loop_cnt;
136
137 mutex_lock(&cdm_hw->hw_mutex);
138 cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
139 pr_err("CDM HW core status=%x\n", dump_reg);
140 /* First pause CDM */
141 cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x03);
142 cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
143 pr_err("CDM HW current pending BL=%x\n", dump_reg);
144 loop_cnt = dump_reg;
145 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_DEBUG_STATUS, &dump_reg);
146 pr_err("CDM HW Debug status reg=%x\n", dump_reg);
147 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, &core_dbg);
148 if (core_dbg & 0x100) {
149 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_ADDR, &dump_reg);
150 pr_err("AHB dump reglastaddr=%x\n", dump_reg);
151 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_DATA, &dump_reg);
152 pr_err("AHB dump reglastdata=%x\n", dump_reg);
153 } else {
154 pr_err("CDM HW AHB dump not enable\n");
155 }
156
157 if (core_dbg & 0x10000) {
158 int i;
159
160 pr_err("CDM HW BL FIFO dump with loop count=%d\n", loop_cnt);
161 for (i = 0 ; i < loop_cnt ; i++) {
162 cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_RB, i);
163 cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_RB,
164 &dump_reg);
165 pr_err("BL(%d) base addr =%x\n", i, dump_reg);
166 cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_RB,
167 &dump_reg);
168 pr_err("BL(%d) len=%d tag=%d\n", i,
169 (dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
170 }
171 } else {
172 pr_err("CDM HW BL FIFO readback not enable\n");
173 }
174
175 pr_err("CDM HW default dump\n");
176 cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_CFG, &dump_reg);
177 pr_err("CDM HW core cfg=%x\n", dump_reg);
178
179 cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS, &dump_reg);
180 pr_err("CDM HW irq status=%x\n", dump_reg);
181
182 cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_SET, &dump_reg);
183 pr_err("CDM HW irq set reg=%x\n", dump_reg);
184
185 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_BASE, &dump_reg);
186 pr_err("CDM HW current BL base=%x\n", dump_reg);
187
188 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_LEN, &dump_reg);
189 pr_err("CDM HW current BL len=%d tag=%d\n", (dump_reg & 0xFFFFF),
190 (dump_reg & 0xFF000000));
191
192 cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_USED_AHB_BASE, &dump_reg);
193 pr_err("CDM HW current AHB base=%x\n", dump_reg);
194
195 cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
196 pr_err("CDM HW current pending BL=%x\n", dump_reg);
197
198 /* Enable CDM back */
199 cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 1);
200 mutex_unlock(&cdm_hw->hw_mutex);
201
202}
203
204int cam_hw_cdm_wait_for_bl_fifo(struct cam_hw_info *cdm_hw,
205 uint32_t bl_count)
206{
207 uint32_t pending_bl = 0;
208 int32_t available_bl_slots = 0;
209 int rc = -1;
210 long time_left;
211 struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
212
213 do {
214 if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
215 &pending_bl)) {
216 pr_err("Failed to read CDM pending BL's\n");
217 rc = -1;
218 break;
219 }
220 available_bl_slots = CAM_CDM_HWFIFO_SIZE - pending_bl;
221 if (available_bl_slots < 0) {
222 pr_err("Invalid available slots %d:%d:%d\n",
223 available_bl_slots, CAM_CDM_HWFIFO_SIZE,
224 pending_bl);
225 break;
226 }
227 if (bl_count < (available_bl_slots - 1)) {
228 CDM_CDBG("BL slot available_cnt=%d requested=%d\n",
229 (available_bl_slots - 1), bl_count);
230 rc = bl_count;
231 break;
232 } else if (0 == (available_bl_slots - 1)) {
233 time_left = wait_for_completion_timeout(
234 &core->bl_complete, msecs_to_jiffies(
235 CAM_CDM_BL_FIFO_WAIT_TIMEOUT));
236 if (time_left <= 0) {
237 pr_err("CDM HW BL Wait timed out failed\n");
238 rc = -1;
239 break;
240 }
241 rc = 0;
242 CDM_CDBG("CDM HW is ready for data\n");
243 } else {
244 rc = (bl_count - (available_bl_slots - 1));
245 break;
246 }
247 } while (1);
248
249 return rc;
250}
251
252bool cam_hw_cdm_bl_write(struct cam_hw_info *cdm_hw, uint32_t src,
253 uint32_t len, uint32_t tag)
254{
255 if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_REG, src)) {
256 pr_err("Failed to write CDM base to BL base\n");
257 return true;
258 }
259 if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_REG,
260 ((len & 0xFFFFF) | ((tag & 0xFF) << 20)))) {
261 pr_err("Failed to write CDM BL len\n");
262 return true;
263 }
264 return false;
265}
266
267bool cam_hw_cdm_commit_bl_write(struct cam_hw_info *cdm_hw)
268{
269 if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_STORE_REG, 1)) {
270 pr_err("Failed to write CDM commit BL\n");
271 return true;
272 }
273 return false;
274}
275
276int cam_hw_cdm_submit_gen_irq(struct cam_hw_info *cdm_hw,
277 struct cam_cdm_hw_intf_cmd_submit_bl *req)
278{
279 struct cam_cdm_bl_cb_request_entry *node;
280 struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
281 uint32_t len;
282 int rc;
283
284 if (core->bl_tag > 63) {
285 pr_err("bl_tag invalid =%d\n", core->bl_tag);
286 rc = -EINVAL;
287 goto end;
288 }
289 CDM_CDBG("CDM write BL last cmd tag=%d total=%d\n",
290 core->bl_tag, req->data->cmd_arrary_count);
291 node = kzalloc(sizeof(struct cam_cdm_bl_cb_request_entry),
292 GFP_KERNEL);
293 if (!node) {
294 rc = -ENOMEM;
295 goto end;
296 }
297 node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
298 node->client_hdl = req->handle;
299 node->cookie = req->data->cookie;
300 node->bl_tag = core->bl_tag;
301 node->userdata = req->data->userdata;
302 list_add_tail(&node->entry, &core->bl_request_list);
303 len = core->ops->cdm_required_size_genirq() * core->bl_tag;
304 core->ops->cdm_write_genirq(((uint32_t *)core->gen_irq.kmdvaddr + len),
305 core->bl_tag);
306 rc = cam_hw_cdm_bl_write(cdm_hw, (core->gen_irq.vaddr + (4*len)),
307 ((4 * core->ops->cdm_required_size_genirq()) - 1),
308 core->bl_tag);
309 if (rc) {
310 pr_err("CDM hw bl write failed for gen irq bltag=%d\n",
311 core->bl_tag);
312 list_del_init(&node->entry);
313 kfree(node);
314 rc = -1;
315 goto end;
316 }
317
318 if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
319 pr_err("Cannot commit the genirq BL with tag tag=%d\n",
320 core->bl_tag);
321 list_del_init(&node->entry);
322 kfree(node);
323 rc = -1;
324 }
325
326end:
327 return rc;
328}
329
330int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
331 struct cam_cdm_hw_intf_cmd_submit_bl *req,
332 struct cam_cdm_client *client)
333{
334 int i, rc = -1;
335 struct cam_cdm_bl_request *cdm_cmd = req->data;
336 struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
337 uint32_t pending_bl = 0;
338 int write_count = 0;
339
340 if (req->data->cmd_arrary_count > CAM_CDM_HWFIFO_SIZE) {
341 pr_info("requested BL more than max size, cnt=%d max=%d\n",
342 req->data->cmd_arrary_count, CAM_CDM_HWFIFO_SIZE);
343 }
344
345 if (atomic_read(&core->error) != 0) {
346 pr_err("HW in error state, cannot trigger transactions now\n");
347 return rc;
348 }
349
350 mutex_lock(&cdm_hw->hw_mutex);
351 mutex_lock(&client->lock);
352 rc = cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &pending_bl);
353 if (rc) {
354 pr_err("Cannot read the current BL depth\n");
355 mutex_unlock(&client->lock);
356 mutex_unlock(&cdm_hw->hw_mutex);
357 return rc;
358 }
359
360 for (i = 0; i < req->data->cmd_arrary_count ; i++) {
361 uint64_t hw_vaddr_ptr = 0;
362 size_t len = 0;
363
364 if ((!cdm_cmd->cmd[i].len) &&
365 (cdm_cmd->cmd[i].len > 0x100000)) {
366 pr_err("cmd len(%d) is invalid cnt=%d total cnt=%d\n",
367 cdm_cmd->cmd[i].len, i,
368 req->data->cmd_arrary_count);
369 rc = -1;
370 break;
371 }
372 if (atomic_read(&core->error) != 0) {
373 pr_err("HW in error state cmd_count=%d total cnt=%d\n",
374 i, req->data->cmd_arrary_count);
375 rc = -1;
376 break;
377 }
378 if (write_count == 0) {
379 write_count = cam_hw_cdm_wait_for_bl_fifo(cdm_hw,
380 (req->data->cmd_arrary_count - i));
381 if (write_count < 0) {
382 pr_err("wait for bl fifo failed %d:%d\n",
383 i, req->data->cmd_arrary_count);
384 rc = -1;
385 break;
386 }
387 } else {
388 write_count--;
389 }
390
391 if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
392 rc = cam_mem_get_io_buf(
393 cdm_cmd->cmd[i].bl_addr.mem_handle,
394 core->iommu_hdl.non_secure, &hw_vaddr_ptr,
395 &len);
396 } else if (req->data->type == CAM_CDM_BL_CMD_TYPE_HW_IOVA) {
397 if (!cdm_cmd->cmd[i].bl_addr.hw_iova) {
398 pr_err("Hw bl hw_iova is invalid %d:%d\n",
399 i, req->data->cmd_arrary_count);
400 rc = -1;
401 break;
402 }
403 rc = 0;
404 hw_vaddr_ptr =
405 (uint64_t)cdm_cmd->cmd[i].bl_addr.hw_iova;
406 len = cdm_cmd->cmd[i].len + cdm_cmd->cmd[i].offset;
407 } else {
408 pr_err("Only mem hdl/hw va type is supported %d\n",
409 req->data->type);
410 rc = -1;
411 break;
412 }
413
414 if ((!rc) && (hw_vaddr_ptr) && (len) &&
415 (len >= cdm_cmd->cmd[i].offset)) {
416 CDM_CDBG("Got the HW VA\n");
417 rc = cam_hw_cdm_bl_write(cdm_hw,
418 ((uint32_t)hw_vaddr_ptr +
419 cdm_cmd->cmd[i].offset),
420 (cdm_cmd->cmd[i].len - 1), core->bl_tag);
421 if (rc) {
422 pr_err("Hw bl write failed %d:%d\n",
423 i, req->data->cmd_arrary_count);
424 rc = -1;
425 break;
426 }
427 } else {
428 pr_err("Sanity check failed for hdl=%x len=%zu:%d\n",
429 cdm_cmd->cmd[i].bl_addr.mem_handle, len,
430 cdm_cmd->cmd[i].offset);
431 pr_err("Sanity check failed for %d:%d\n",
432 i, req->data->cmd_arrary_count);
433 rc = -1;
434 break;
435 }
436
437 if (!rc) {
438 CDM_CDBG("write BL success for cnt=%d with tag=%d\n",
439 i, core->bl_tag);
440 core->bl_tag++;
441 CDM_CDBG("Now commit the BL\n");
442 if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
443 pr_err("Cannot commit the BL %d tag=%d\n",
444 i, (core->bl_tag - 1));
445 rc = -1;
446 break;
447 }
448 CDM_CDBG("BL commit success BL %d tag=%d\n", i,
449 (core->bl_tag - 1));
450 if ((req->data->flag == true) &&
451 (i == (req->data->cmd_arrary_count -
452 1))) {
453 rc = cam_hw_cdm_submit_gen_irq(
454 cdm_hw, req);
455 if (rc == 0)
456 core->bl_tag++;
457 }
458 if (!rc && ((CAM_CDM_HWFIFO_SIZE - 1) ==
459 core->bl_tag))
460 core->bl_tag = 0;
461 }
462 }
463 mutex_unlock(&client->lock);
464 mutex_unlock(&cdm_hw->hw_mutex);
465 return rc;
466
467}
468
469static void cam_hw_cdm_work(struct work_struct *work)
470{
471 struct cam_cdm_work_payload *payload;
472 struct cam_hw_info *cdm_hw;
473 struct cam_cdm *core;
474
475 payload = container_of(work, struct cam_cdm_work_payload, work);
476 if (payload) {
477 cdm_hw = payload->hw;
478 core = (struct cam_cdm *)cdm_hw->core_info;
479
480 CDM_CDBG("IRQ status=%x\n", payload->irq_status);
481 if (payload->irq_status &
482 CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
483 struct cam_cdm_bl_cb_request_entry *node;
484
485 CDM_CDBG("inline IRQ data=%x\n",
486 payload->irq_data);
487 mutex_lock(&cdm_hw->hw_mutex);
488 node = cam_cdm_find_request_by_bl_tag(
489 payload->irq_data,
490 &core->bl_request_list);
491 if (node) {
492 if (node->request_type ==
493 CAM_HW_CDM_BL_CB_CLIENT) {
494 cam_cdm_notify_clients(cdm_hw,
495 CAM_CDM_CB_STATUS_BL_SUCCESS,
496 (void *)node);
497 } else if (node->request_type ==
498 CAM_HW_CDM_BL_CB_INTERNAL) {
499 pr_err("Invalid node=%pK %d\n", node,
500 node->request_type);
501 }
502 list_del_init(&node->entry);
503 kfree(node);
504 } else {
505 pr_err("Invalid node for inline irq\n");
506 }
507 mutex_unlock(&cdm_hw->hw_mutex);
508 }
509
510 if (payload->irq_status &
511 CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK) {
512 CDM_CDBG("CDM HW reset done IRQ\n");
513 complete(&core->reset_complete);
514 }
515 if (payload->irq_status &
516 CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK) {
517 pr_err("CDM HW BL done IRQ\n");
518 complete(&core->bl_complete);
519 }
520 if (payload->irq_status &
521 CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) {
522 pr_err("Invalid command IRQ, Need HW reset\n");
523 atomic_inc(&core->error);
524 cam_hw_cdm_dump_core_debug_registers(cdm_hw);
525 atomic_dec(&core->error);
526 }
527 if (payload->irq_status &
528 CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) {
529 pr_err("AHB IRQ\n");
530 cam_hw_cdm_dump_core_debug_registers(cdm_hw);
531 }
532 if (payload->irq_status &
533 CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK) {
534 pr_err("Overflow IRQ\n");
535 cam_hw_cdm_dump_core_debug_registers(cdm_hw);
536 }
537 kfree(payload);
538 } else {
539 pr_err("NULL payload\n");
540 }
541
542}
543
544static void cam_hw_cdm_iommu_fault_handler(struct iommu_domain *domain,
545 struct device *dev, unsigned long iova, int flags, void *token)
546{
547 struct cam_hw_info *cdm_hw = NULL;
548 struct cam_cdm *core = NULL;
549
550 if (token) {
551 cdm_hw = (struct cam_hw_info *)token;
552 core = (struct cam_cdm *)cdm_hw->core_info;
553 atomic_inc(&core->error);
554 cam_hw_cdm_dump_core_debug_registers(cdm_hw);
555 pr_err("Page fault iova addr %pK\n", (void *)iova);
556 cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT,
557 (void *)iova);
558 atomic_dec(&core->error);
559 } else {
560 pr_err("Invalid token\n");
561 }
562
563}
564
565irqreturn_t cam_hw_cdm_irq(int irq_num, void *data)
566{
567 struct cam_hw_info *cdm_hw = data;
568 struct cam_cdm *cdm_core = cdm_hw->core_info;
569 struct cam_cdm_work_payload *payload;
570 bool work_status;
571
572 CDM_CDBG("Got irq\n");
573 payload = kzalloc(sizeof(struct cam_cdm_work_payload), GFP_ATOMIC);
574 if (payload) {
575 if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS,
576 &payload->irq_status)) {
577 pr_err("Failed to read CDM HW IRQ status\n");
578 }
579 if (payload->irq_status &
580 CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
581 if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_USR_DATA,
582 &payload->irq_data)) {
583 pr_err("Failed to read CDM HW IRQ data\n");
584 }
585 }
586 CDM_CDBG("Got payload=%d\n", payload->irq_status);
587 payload->hw = cdm_hw;
588 INIT_WORK((struct work_struct *)&payload->work,
589 cam_hw_cdm_work);
590 if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR,
591 payload->irq_status))
592 pr_err("Failed to Write CDM HW IRQ Clear\n");
593 if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR_CMD, 0x01))
594 pr_err("Failed to Write CDM HW IRQ cmd\n");
595 work_status = queue_work(cdm_core->work_queue, &payload->work);
596 if (work_status == false) {
597 pr_err("Failed to queue work for irq=%x\n",
598 payload->irq_status);
599 kfree(payload);
600 }
601 }
602
603 return IRQ_HANDLED;
604}
605
606int cam_hw_cdm_alloc_genirq_mem(void *hw_priv)
607{
608 struct cam_hw_info *cdm_hw = hw_priv;
609 struct cam_mem_mgr_request_desc genirq_alloc_cmd;
610 struct cam_mem_mgr_memory_desc genirq_alloc_out;
611 struct cam_cdm *cdm_core = NULL;
612 int rc = -EINVAL;
613
614 if (!hw_priv)
615 return rc;
616
617 cdm_core = (struct cam_cdm *)cdm_hw->core_info;
618 genirq_alloc_cmd.align = 0;
619 genirq_alloc_cmd.size = (8 * CAM_CDM_HWFIFO_SIZE);
620 genirq_alloc_cmd.smmu_hdl = cdm_core->iommu_hdl.non_secure;
Seemanta Duttaa037cd12017-07-06 15:45:29 -0700621 genirq_alloc_cmd.flags = CAM_MEM_FLAG_HW_READ_WRITE;
Hariram Purushothamandc4402e2017-03-28 20:41:43 -0700622 rc = cam_mem_mgr_request_mem(&genirq_alloc_cmd,
623 &genirq_alloc_out);
624 if (rc) {
625 pr_err("Failed to get genirq cmd space rc=%d\n", rc);
626 goto end;
627 }
628 cdm_core->gen_irq.handle = genirq_alloc_out.mem_handle;
629 cdm_core->gen_irq.vaddr = (genirq_alloc_out.iova & 0xFFFFFFFF);
630 cdm_core->gen_irq.kmdvaddr = genirq_alloc_out.kva;
631 cdm_core->gen_irq.size = genirq_alloc_out.len;
632
633end:
634 return rc;
635}
636
637int cam_hw_cdm_release_genirq_mem(void *hw_priv)
638{
639 struct cam_hw_info *cdm_hw = hw_priv;
640 struct cam_cdm *cdm_core = NULL;
641 struct cam_mem_mgr_memory_desc genirq_release_cmd;
642 int rc = -EINVAL;
643
644 if (!hw_priv)
645 return rc;
646
647 cdm_core = (struct cam_cdm *)cdm_hw->core_info;
648 genirq_release_cmd.mem_handle = cdm_core->gen_irq.handle;
649 rc = cam_mem_mgr_release_mem(&genirq_release_cmd);
650 if (rc)
651 pr_err("Failed to put genirq cmd space for hw\n");
652
653 return rc;
654}
655
656int cam_hw_cdm_init(void *hw_priv,
657 void *init_hw_args, uint32_t arg_size)
658{
659 struct cam_hw_info *cdm_hw = hw_priv;
660 struct cam_hw_soc_info *soc_info = NULL;
661 struct cam_cdm *cdm_core = NULL;
662 int rc;
663 long time_left;
664
665 if (!hw_priv)
666 return -EINVAL;
667
668 soc_info = &cdm_hw->soc_info;
669 cdm_core = (struct cam_cdm *)cdm_hw->core_info;
670
Pavan Kumar Chilamkurthi7e7607b2017-06-22 20:02:50 -0700671 rc = cam_soc_util_enable_platform_resource(soc_info, true,
672 CAM_SVS_VOTE, true);
Hariram Purushothamandc4402e2017-03-28 20:41:43 -0700673 if (rc) {
674 pr_err("Enable platform failed\n");
675 goto end;
676 }
677
678 CDM_CDBG("Enable soc done\n");
679
680/* Before triggering the reset to HW, clear the reset complete */
681 reinit_completion(&cdm_core->reset_complete);
682 reinit_completion(&cdm_core->bl_complete);
683
684 if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003)) {
685 pr_err("Failed to Write CDM HW IRQ mask\n");
686 goto disable_return;
687 }
688 if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_RST_CMD, 0x9)) {
689 pr_err("Failed to Write CDM HW reset\n");
690 goto disable_return;
691 }
692
693 CDM_CDBG("Waiting for CDM HW resetdone\n");
694 time_left = wait_for_completion_timeout(&cdm_core->reset_complete,
Hariram Purushothaman81851c72017-06-15 18:26:32 -0700695 msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT));
696
697 /*
698 * Check for HW error and recover as a workaround
699 * Sometimes CDM HW triggers irq with invalid status for
700 * HW reset command, so ignore reset failure and proceed further
701 * as a workaround.
702 */
703 if (time_left <= 0) {
704 pr_err("CDM HW reset Wait failed time_left=%ld\n", time_left);
705 time_left = 1;
706 }
707
Hariram Purushothamandc4402e2017-03-28 20:41:43 -0700708 if (time_left <= 0) {
709 pr_err("CDM HW reset Wait failed rc=%d\n", rc);
710 goto disable_return;
711 } else {
712 CDM_CDBG("CDM Init success\n");
713 cdm_hw->hw_state = CAM_HW_STATE_POWER_UP;
714 cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003);
715 cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CGC_CFG, 0x7);
716 rc = 0;
717 goto end;
718 }
719
720disable_return:
721 rc = -1;
722 cam_soc_util_disable_platform_resource(soc_info, true, true);
723end:
724 return rc;
725}
726
727int cam_hw_cdm_deinit(void *hw_priv,
728 void *init_hw_args, uint32_t arg_size)
729{
730 struct cam_hw_info *cdm_hw = hw_priv;
731 struct cam_hw_soc_info *soc_info = NULL;
732 struct cam_cdm *cdm_core = NULL;
733 int rc = 0;
734
735 if (!hw_priv)
736 return -EINVAL;
737
738 soc_info = &cdm_hw->soc_info;
739 cdm_core = cdm_hw->core_info;
740 rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
741 if (rc) {
742 pr_err("disable platform failed\n");
743 } else {
744 CDM_CDBG("CDM Deinit success\n");
745 cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
746 }
747
748 return rc;
749}
750
751int cam_hw_cdm_probe(struct platform_device *pdev)
752{
753 int rc;
754 struct cam_hw_info *cdm_hw = NULL;
755 struct cam_hw_intf *cdm_hw_intf = NULL;
756 struct cam_cdm *cdm_core = NULL;
757 struct cam_cdm_private_dt_data *soc_private = NULL;
758 struct cam_cpas_register_params cpas_parms;
759
760 cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
761 if (!cdm_hw_intf)
762 return -ENOMEM;
763
764 cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
765 if (!cdm_hw) {
766 kfree(cdm_hw_intf);
767 return -ENOMEM;
768 }
769
770 cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
771 if (!cdm_hw->core_info) {
772 kfree(cdm_hw);
773 kfree(cdm_hw_intf);
774 return -ENOMEM;
775 }
776
777 cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
778 cdm_hw->soc_info.pdev = pdev;
779 cdm_hw_intf->hw_type = CAM_HW_CDM;
780 cdm_hw->open_count = 0;
781 mutex_init(&cdm_hw->hw_mutex);
782 spin_lock_init(&cdm_hw->hw_lock);
783 init_completion(&cdm_hw->hw_complete);
784
785 rc = cam_hw_cdm_soc_get_dt_properties(cdm_hw, msm_cam_hw_cdm_dt_match);
786 if (rc) {
787 pr_err("Failed to get dt properties\n");
788 goto release_mem;
789 }
790 cdm_hw_intf->hw_idx = cdm_hw->soc_info.pdev->id;
791 cdm_core = (struct cam_cdm *)cdm_hw->core_info;
792 soc_private = (struct cam_cdm_private_dt_data *)
793 cdm_hw->soc_info.soc_private;
794 if (soc_private->dt_cdm_shared == true)
795 cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
796 else
797 cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
798
799 cdm_core->bl_tag = 0;
800 atomic_set(&cdm_core->error, 0);
801 cdm_core->id = cam_hw_cdm_get_id_by_name(cdm_core->name);
802 if (cdm_core->id >= CAM_CDM_MAX) {
803 pr_err("Failed to get CDM HW name for %s\n", cdm_core->name);
804 goto release_private_mem;
805 }
806 INIT_LIST_HEAD(&cdm_core->bl_request_list);
807 init_completion(&cdm_core->reset_complete);
808 init_completion(&cdm_core->bl_complete);
809 cdm_hw_intf->hw_priv = cdm_hw;
810 cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
811 cdm_hw_intf->hw_ops.init = cam_hw_cdm_init;
812 cdm_hw_intf->hw_ops.deinit = cam_hw_cdm_deinit;
813 cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
814 cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
815 cdm_hw_intf->hw_ops.read = NULL;
816 cdm_hw_intf->hw_ops.write = NULL;
817 cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
818 mutex_lock(&cdm_hw->hw_mutex);
819
820 CDM_CDBG("type %d index %d\n", cdm_hw_intf->hw_type,
821 cdm_hw_intf->hw_idx);
822
823 platform_set_drvdata(pdev, cdm_hw_intf);
824
825 rc = cam_smmu_get_handle("cpas-cdm0", &cdm_core->iommu_hdl.non_secure);
826 if (rc < 0) {
827 pr_err("cpas-cdm get iommu handle failed\n");
828 goto unlock_release_mem;
829 }
830 cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
831 cam_hw_cdm_iommu_fault_handler, cdm_hw);
832
833 rc = cam_smmu_ops(cdm_core->iommu_hdl.non_secure, CAM_SMMU_ATTACH);
834 if (rc < 0) {
835 pr_err("Attach iommu non secure handle failed\n");
836 goto destroy_non_secure_hdl;
837 }
838 cdm_core->iommu_hdl.secure = -1;
839
840 cdm_core->work_queue = alloc_workqueue(cdm_core->name,
841 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
842 CAM_CDM_INFLIGHT_WORKS);
843
844 rc = cam_soc_util_request_platform_resource(&cdm_hw->soc_info,
845 cam_hw_cdm_irq, cdm_hw);
846 if (rc) {
847 pr_err("Failed to request platform resource\n");
848 goto destroy_non_secure_hdl;
849 }
850
851 cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
852 cpas_parms.cell_index = cdm_hw->soc_info.index;
853 cpas_parms.dev = &pdev->dev;
854 cpas_parms.userdata = cdm_hw_intf;
855 strlcpy(cpas_parms.identifier, "cpas-cdm", CAM_HW_IDENTIFIER_LENGTH);
856 rc = cam_cpas_register_client(&cpas_parms);
857 if (rc) {
858 pr_err("Virtual CDM CPAS registration failed\n");
859 goto release_platform_resource;
860 }
861 CDM_CDBG("CPAS registration successful handle=%d\n",
862 cpas_parms.client_handle);
863 cdm_core->cpas_handle = cpas_parms.client_handle;
864
865 rc = cam_hw_cdm_init(cdm_hw, NULL, 0);
866 if (rc) {
867 pr_err("Failed to Init CDM HW\n");
868 goto init_failed;
869 }
870 cdm_hw->open_count++;
871
872 if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
873 &cdm_core->hw_version)) {
874 pr_err("Failed to read CDM HW Version\n");
875 goto deinit;
876 }
877
878 if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
879 &cdm_core->hw_family_version)) {
880 pr_err("Failed to read CDM family Version\n");
881 goto deinit;
882 }
883
884 CDM_CDBG("CDM Hw version read success family =%x hw =%x\n",
885 cdm_core->hw_family_version, cdm_core->hw_version);
886 cdm_core->ops = cam_cdm_get_ops(cdm_core->hw_version, NULL,
887 false);
888 if (!cdm_core->ops) {
889 pr_err("Failed to util ops for hw\n");
890 goto deinit;
891 }
892
893 if (!cam_cdm_set_cam_hw_version(cdm_core->hw_version,
894 &cdm_core->version)) {
895 pr_err("Failed to set cam he version for hw\n");
896 goto deinit;
897 }
898
899 rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
900 if (rc) {
901 pr_err("Failed to Deinit CDM HW\n");
902 goto release_platform_resource;
903 }
904
905 rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
906 soc_private, CAM_HW_CDM, &cdm_core->index);
907 if (rc) {
908 pr_err("HW CDM Interface registration failed\n");
909 goto release_platform_resource;
910 }
911 cdm_hw->open_count--;
912 mutex_unlock(&cdm_hw->hw_mutex);
913
914 CDM_CDBG("CDM%d probe successful\n", cdm_hw_intf->hw_idx);
915
916 return rc;
917
918deinit:
919 if (cam_hw_cdm_deinit(cdm_hw, NULL, 0))
920 pr_err("Deinit failed for hw\n");
921 cdm_hw->open_count--;
922init_failed:
923 if (cam_cpas_unregister_client(cdm_core->cpas_handle))
924 pr_err("CPAS unregister failed\n");
925release_platform_resource:
926 if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
927 pr_err("Release platform resource failed\n");
928
929 flush_workqueue(cdm_core->work_queue);
930 destroy_workqueue(cdm_core->work_queue);
931destroy_non_secure_hdl:
932 cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
933 NULL, cdm_hw);
934 if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
935 pr_err("Release iommu secure hdl failed\n");
936unlock_release_mem:
937 mutex_unlock(&cdm_hw->hw_mutex);
938release_private_mem:
939 kfree(cdm_hw->soc_info.soc_private);
940release_mem:
941 mutex_destroy(&cdm_hw->hw_mutex);
942 kfree(cdm_hw_intf);
943 kfree(cdm_hw->core_info);
944 kfree(cdm_hw);
945 return rc;
946}
947
948int cam_hw_cdm_remove(struct platform_device *pdev)
949{
950 int rc = -EBUSY;
951 struct cam_hw_info *cdm_hw = NULL;
952 struct cam_hw_intf *cdm_hw_intf = NULL;
953 struct cam_cdm *cdm_core = NULL;
954
955 cdm_hw_intf = platform_get_drvdata(pdev);
956 if (!cdm_hw_intf) {
957 pr_err("Failed to get dev private data\n");
958 return rc;
959 }
960
961 cdm_hw = cdm_hw_intf->hw_priv;
962 if (!cdm_hw) {
963 pr_err("Failed to get hw private data for type=%d idx=%d\n",
964 cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
965 return rc;
966 }
967
968 cdm_core = cdm_hw->core_info;
969 if (!cdm_core) {
970 pr_err("Failed to get hw core data for type=%d idx=%d\n",
971 cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
972 return rc;
973 }
974
975 if (cdm_hw->open_count != 0) {
976 pr_err("Hw open count invalid type=%d idx=%d cnt=%d\n",
977 cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx,
978 cdm_hw->open_count);
979 return rc;
980 }
981
982 rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
983 if (rc) {
984 pr_err("Deinit failed for hw\n");
985 return rc;
986 }
987
988 rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
989 if (rc) {
990 pr_err("CPAS unregister failed\n");
991 return rc;
992 }
993
994 if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
995 pr_err("Release platform resource failed\n");
996
997 flush_workqueue(cdm_core->work_queue);
998 destroy_workqueue(cdm_core->work_queue);
999
1000 if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
1001 pr_err("Release iommu secure hdl failed\n");
1002 cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
1003 NULL, cdm_hw);
1004
1005 mutex_destroy(&cdm_hw->hw_mutex);
1006 kfree(cdm_hw->soc_info.soc_private);
1007 kfree(cdm_hw_intf);
1008 kfree(cdm_hw->core_info);
1009 kfree(cdm_hw);
1010
1011 return 0;
1012}
1013
1014static struct platform_driver cam_hw_cdm_driver = {
1015 .probe = cam_hw_cdm_probe,
1016 .remove = cam_hw_cdm_remove,
1017 .driver = {
1018 .name = "msm_cam_cdm",
1019 .owner = THIS_MODULE,
1020 .of_match_table = msm_cam_hw_cdm_dt_match,
1021 },
1022};
1023
1024static int __init cam_hw_cdm_init_module(void)
1025{
1026 return platform_driver_register(&cam_hw_cdm_driver);
1027}
1028
1029static void __exit cam_hw_cdm_exit_module(void)
1030{
1031 platform_driver_unregister(&cam_hw_cdm_driver);
1032}
1033
1034module_init(cam_hw_cdm_init_module);
1035module_exit(cam_hw_cdm_exit_module);
1036MODULE_DESCRIPTION("MSM Camera HW CDM driver");
1037MODULE_LICENSE("GPL v2");