blob: 4bee732b7e6048ef7893828b25ca63f2f337be8b [file] [log] [blame]
Harsh Shaha1af8822017-05-11 22:06:36 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Harsh Shaha1af8822017-05-11 22:06:36 -070013#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/list.h>
16#include "cam_io_util.h"
17#include "cam_irq_controller.h"
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -070018#include "cam_debug_util.h"
Harsh Shaha1af8822017-05-11 22:06:36 -070019
20/**
21 * struct cam_irq_evt_handler:
22 * @Brief: Event handler information
23 *
24 * @priority: Priority level of this event
25 * @evt_bit_mask_arr: evt_bit_mask that has the bits set for IRQs to
26 * subscribe for
27 * @handler_priv: Private data that will be passed to the Top/Bottom
28 * Half handler function
29 * @top_half_handler: Top half Handler callback function
30 * @bottom_half_handler: Bottom half Handler callback function
31 * @bottom_half: Pointer to bottom_half implementation on which to
32 * enqueue the event for further handling
33 * @bottom_half_enqueue_func:
34 * Function used to enqueue the bottom_half event
35 * @list_node: list_head struct used for overall handler List
36 * @th_list_node: list_head struct used for top half handler List
37 */
38struct cam_irq_evt_handler {
39 enum cam_irq_priority_level priority;
40 uint32_t *evt_bit_mask_arr;
41 void *handler_priv;
42 CAM_IRQ_HANDLER_TOP_HALF top_half_handler;
43 CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler;
44 void *bottom_half;
45 CAM_IRQ_BOTTOM_HALF_ENQUEUE_FUNC bottom_half_enqueue_func;
46 struct list_head list_node;
47 struct list_head th_list_node;
48 int index;
49};
50
51/**
52 * struct cam_irq_register_obj:
53 * @Brief: Structure containing information related to
54 * a particular register Set
55 *
56 * @index: Index of set in Array
57 * @mask_reg_offset: Offset of IRQ MASK register
58 * @clear_reg_offset: Offset of IRQ CLEAR register
59 * @status_reg_offset: Offset of IRQ STATUS register
60 * @top_half_enable_mask: Array of enabled bit_mask sorted by priority
61 */
62struct cam_irq_register_obj {
63 uint32_t index;
64 uint32_t mask_reg_offset;
65 uint32_t clear_reg_offset;
66 uint32_t status_reg_offset;
67 uint32_t top_half_enable_mask[CAM_IRQ_PRIORITY_MAX];
68};
69
70/**
71 * struct cam_irq_controller:
72 *
73 * @brief: IRQ Controller structure.
74 *
75 * @name: Name of IRQ Controller block
76 * @mem_base: Mapped base address of register space to which
77 * register offsets are added to access registers
78 * @num_registers: Number of sets(mask/clear/status) of IRQ registers
79 * @irq_register_arr: Array of Register object associated with this
80 * Controller
81 * @irq_status_arr: Array of IRQ Status values
82 * @global_clear_offset: Offset of Global IRQ Clear register. This register
83 * contains the BIT that needs to be set for the CLEAR
84 * to take effect
85 * @global_clear_bitmask: Bitmask needed to be used in Global Clear register
86 * for Clear IRQ cmd to take effect
87 * @evt_handler_list_head: List of all event handlers
88 * @th_list_head: List of handlers sorted by priority
89 * @hdl_idx: Unique identity of handler assigned on Subscribe.
90 * Used to Unsubscribe.
Jordan Croused6b16be2017-11-30 08:52:27 -070091 * @lock: Lock for use by controller
Harsh Shaha1af8822017-05-11 22:06:36 -070092 */
93struct cam_irq_controller {
94 const char *name;
95 void __iomem *mem_base;
96 uint32_t num_registers;
97 struct cam_irq_register_obj *irq_register_arr;
98 uint32_t *irq_status_arr;
99 uint32_t global_clear_offset;
100 uint32_t global_clear_bitmask;
101 struct list_head evt_handler_list_head;
102 struct list_head th_list_head[CAM_IRQ_PRIORITY_MAX];
103 uint32_t hdl_idx;
Jordan Croused6b16be2017-11-30 08:52:27 -0700104 spinlock_t lock;
Harsh Shah23557ae2017-05-13 18:14:34 -0700105 struct cam_irq_th_payload th_payload;
Harsh Shaha1af8822017-05-11 22:06:36 -0700106};
107
108int cam_irq_controller_deinit(void **irq_controller)
109{
110 struct cam_irq_controller *controller = *irq_controller;
111 struct cam_irq_evt_handler *evt_handler = NULL;
112
113 while (!list_empty(&controller->evt_handler_list_head)) {
114 evt_handler = list_first_entry(
115 &controller->evt_handler_list_head,
116 struct cam_irq_evt_handler, list_node);
117 list_del_init(&evt_handler->list_node);
Harsh Shah23557ae2017-05-13 18:14:34 -0700118 kfree(evt_handler->evt_bit_mask_arr);
Harsh Shaha1af8822017-05-11 22:06:36 -0700119 kfree(evt_handler);
120 }
121
Harsh Shah23557ae2017-05-13 18:14:34 -0700122 kfree(controller->th_payload.evt_status_arr);
Harsh Shaha1af8822017-05-11 22:06:36 -0700123 kfree(controller->irq_status_arr);
Harsh Shah23557ae2017-05-13 18:14:34 -0700124 kfree(controller->irq_register_arr);
Harsh Shaha1af8822017-05-11 22:06:36 -0700125 kfree(controller);
126 *irq_controller = NULL;
127 return 0;
128}
129
130int cam_irq_controller_init(const char *name,
131 void __iomem *mem_base,
132 struct cam_irq_controller_reg_info *register_info,
133 void **irq_controller)
134{
135 struct cam_irq_controller *controller = NULL;
136 int i, rc = 0;
137
138 *irq_controller = NULL;
139
140 if (!register_info->num_registers || !register_info->irq_reg_set ||
141 !name || !mem_base) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700142 CAM_ERR(CAM_ISP, "Invalid parameters");
Harsh Shaha1af8822017-05-11 22:06:36 -0700143 rc = -EINVAL;
144 return rc;
145 }
146
147 controller = kzalloc(sizeof(struct cam_irq_controller), GFP_KERNEL);
148 if (!controller) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700149 CAM_DBG(CAM_ISP, "Failed to allocate IRQ Controller");
Harsh Shaha1af8822017-05-11 22:06:36 -0700150 return -ENOMEM;
151 }
152
153 controller->irq_register_arr = kzalloc(register_info->num_registers *
154 sizeof(struct cam_irq_register_obj), GFP_KERNEL);
155 if (!controller->irq_register_arr) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700156 CAM_DBG(CAM_ISP, "Failed to allocate IRQ register Arr");
Harsh Shaha1af8822017-05-11 22:06:36 -0700157 rc = -ENOMEM;
158 goto reg_alloc_error;
159 }
160
161 controller->irq_status_arr = kzalloc(register_info->num_registers *
162 sizeof(uint32_t), GFP_KERNEL);
163 if (!controller->irq_status_arr) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700164 CAM_DBG(CAM_ISP, "Failed to allocate IRQ status Arr");
Harsh Shaha1af8822017-05-11 22:06:36 -0700165 rc = -ENOMEM;
166 goto status_alloc_error;
167 }
Harsh Shah23557ae2017-05-13 18:14:34 -0700168
169 controller->th_payload.evt_status_arr =
170 kzalloc(register_info->num_registers * sizeof(uint32_t),
171 GFP_KERNEL);
172 if (!controller->th_payload.evt_status_arr) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700173 CAM_DBG(CAM_ISP, "Failed to allocate BH payload bit mask Arr");
Harsh Shah23557ae2017-05-13 18:14:34 -0700174 rc = -ENOMEM;
175 goto evt_mask_alloc_error;
176 }
177
Harsh Shaha1af8822017-05-11 22:06:36 -0700178 controller->name = name;
179
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700180 CAM_DBG(CAM_ISP, "num_registers: %d", register_info->num_registers);
Harsh Shaha1af8822017-05-11 22:06:36 -0700181 for (i = 0; i < register_info->num_registers; i++) {
182 controller->irq_register_arr[i].index = i;
183 controller->irq_register_arr[i].mask_reg_offset =
184 register_info->irq_reg_set[i].mask_reg_offset;
185 controller->irq_register_arr[i].clear_reg_offset =
186 register_info->irq_reg_set[i].clear_reg_offset;
187 controller->irq_register_arr[i].status_reg_offset =
188 register_info->irq_reg_set[i].status_reg_offset;
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700189 CAM_DBG(CAM_ISP, "i %d mask_reg_offset: 0x%x", i,
Harsh Shaha1af8822017-05-11 22:06:36 -0700190 controller->irq_register_arr[i].mask_reg_offset);
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700191 CAM_DBG(CAM_ISP, "i %d clear_reg_offset: 0x%x", i,
Harsh Shaha1af8822017-05-11 22:06:36 -0700192 controller->irq_register_arr[i].clear_reg_offset);
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700193 CAM_DBG(CAM_ISP, "i %d status_reg_offset: 0x%x", i,
Harsh Shaha1af8822017-05-11 22:06:36 -0700194 controller->irq_register_arr[i].status_reg_offset);
195 }
196 controller->num_registers = register_info->num_registers;
197 controller->global_clear_bitmask = register_info->global_clear_bitmask;
198 controller->global_clear_offset = register_info->global_clear_offset;
199 controller->mem_base = mem_base;
200
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700201 CAM_DBG(CAM_ISP, "global_clear_bitmask: 0x%x",
Harsh Shaha1af8822017-05-11 22:06:36 -0700202 controller->global_clear_bitmask);
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700203 CAM_DBG(CAM_ISP, "global_clear_offset: 0x%x",
Harsh Shaha1af8822017-05-11 22:06:36 -0700204 controller->global_clear_offset);
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700205 CAM_DBG(CAM_ISP, "mem_base: %pK", (void __iomem *)controller->mem_base);
Harsh Shaha1af8822017-05-11 22:06:36 -0700206
207 INIT_LIST_HEAD(&controller->evt_handler_list_head);
208 for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++)
209 INIT_LIST_HEAD(&controller->th_list_head[i]);
210
Jordan Croused6b16be2017-11-30 08:52:27 -0700211 spin_lock_init(&controller->lock);
Harsh Shaha1af8822017-05-11 22:06:36 -0700212
213 controller->hdl_idx = 1;
214 *irq_controller = controller;
215
216 return rc;
217
Harsh Shah23557ae2017-05-13 18:14:34 -0700218evt_mask_alloc_error:
219 kfree(controller->irq_status_arr);
Harsh Shaha1af8822017-05-11 22:06:36 -0700220status_alloc_error:
221 kfree(controller->irq_register_arr);
222reg_alloc_error:
223 kfree(controller);
224
225 return rc;
226}
227
228int cam_irq_controller_subscribe_irq(void *irq_controller,
229 enum cam_irq_priority_level priority,
230 uint32_t *evt_bit_mask_arr,
231 void *handler_priv,
232 CAM_IRQ_HANDLER_TOP_HALF top_half_handler,
233 CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler,
234 void *bottom_half,
235 CAM_IRQ_BOTTOM_HALF_ENQUEUE_FUNC bottom_half_enqueue_func)
236{
237 struct cam_irq_controller *controller = irq_controller;
238 struct cam_irq_evt_handler *evt_handler = NULL;
239 int i;
240 int rc = 0;
241 uint32_t irq_mask;
Harsh Shahd8e3a412017-10-27 00:32:04 -0700242 unsigned long flags = 0;
243 bool need_lock;
Harsh Shaha1af8822017-05-11 22:06:36 -0700244
245 if (!controller || !handler_priv || !evt_bit_mask_arr) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700246 CAM_ERR(CAM_ISP,
247 "Inval params: ctlr=%pK hdl_priv=%pK bit_mask_arr=%pK",
Harsh Shaha1af8822017-05-11 22:06:36 -0700248 controller, handler_priv, evt_bit_mask_arr);
249 return -EINVAL;
250 }
251
252 if (!top_half_handler) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700253 CAM_ERR(CAM_ISP, "Missing top half handler");
Harsh Shaha1af8822017-05-11 22:06:36 -0700254 return -EINVAL;
255 }
256
257 if (bottom_half_handler &&
258 (!bottom_half || !bottom_half_enqueue_func)) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700259 CAM_ERR(CAM_ISP,
260 "Invalid params: bh_handler=%pK bh=%pK bh_enq_f=%pK",
Harsh Shaha1af8822017-05-11 22:06:36 -0700261 bottom_half_handler,
262 bottom_half,
263 bottom_half_enqueue_func);
264 return -EINVAL;
265 }
266
267 if (priority >= CAM_IRQ_PRIORITY_MAX) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700268 CAM_ERR(CAM_ISP, "Invalid priority=%u, max=%u", priority,
Harsh Shaha1af8822017-05-11 22:06:36 -0700269 CAM_IRQ_PRIORITY_MAX);
270 return -EINVAL;
271 }
272
Harsh Shaha1af8822017-05-11 22:06:36 -0700273 evt_handler = kzalloc(sizeof(struct cam_irq_evt_handler), GFP_KERNEL);
274 if (!evt_handler) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700275 CAM_DBG(CAM_ISP, "Error allocating hlist_node");
Harsh Shaha1af8822017-05-11 22:06:36 -0700276 return -ENOMEM;
277 }
278
279 evt_handler->evt_bit_mask_arr = kzalloc(sizeof(uint32_t) *
280 controller->num_registers, GFP_KERNEL);
281 if (!evt_handler->evt_bit_mask_arr) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700282 CAM_DBG(CAM_ISP, "Error allocating hlist_node");
Harsh Shaha1af8822017-05-11 22:06:36 -0700283 rc = -ENOMEM;
284 goto free_evt_handler;
285 }
286
287 INIT_LIST_HEAD(&evt_handler->list_node);
288 INIT_LIST_HEAD(&evt_handler->th_list_node);
289
290 for (i = 0; i < controller->num_registers; i++)
291 evt_handler->evt_bit_mask_arr[i] = evt_bit_mask_arr[i];
292
293 evt_handler->priority = priority;
294 evt_handler->handler_priv = handler_priv;
295 evt_handler->top_half_handler = top_half_handler;
296 evt_handler->bottom_half_handler = bottom_half_handler;
297 evt_handler->bottom_half = bottom_half;
298 evt_handler->bottom_half_enqueue_func = bottom_half_enqueue_func;
299 evt_handler->index = controller->hdl_idx++;
Harsh Shah19f55812017-06-26 18:58:49 -0700300
301 /* Avoid rollover to negative values */
Harsh Shaha1af8822017-05-11 22:06:36 -0700302 if (controller->hdl_idx > 0x3FFFFFFF)
303 controller->hdl_idx = 1;
304
Harsh Shahd8e3a412017-10-27 00:32:04 -0700305 need_lock = !in_irq();
306 if (need_lock)
Jordan Croused6b16be2017-11-30 08:52:27 -0700307 spin_lock_irqsave(&controller->lock, flags);
Harsh Shaha1af8822017-05-11 22:06:36 -0700308 for (i = 0; i < controller->num_registers; i++) {
309 controller->irq_register_arr[i].top_half_enable_mask[priority]
310 |= evt_bit_mask_arr[i];
311
312 irq_mask = cam_io_r_mb(controller->mem_base +
313 controller->irq_register_arr[i].mask_reg_offset);
314 irq_mask |= evt_bit_mask_arr[i];
315
316 cam_io_w_mb(irq_mask, controller->mem_base +
317 controller->irq_register_arr[i].mask_reg_offset);
318 }
Harsh Shahd8e3a412017-10-27 00:32:04 -0700319 if (need_lock)
Jordan Croused6b16be2017-11-30 08:52:27 -0700320 spin_unlock_irqrestore(&controller->lock, flags);
Harsh Shaha1af8822017-05-11 22:06:36 -0700321
322 list_add_tail(&evt_handler->list_node,
323 &controller->evt_handler_list_head);
324 list_add_tail(&evt_handler->th_list_node,
325 &controller->th_list_head[priority]);
326
327 return evt_handler->index;
328
329free_evt_handler:
330 kfree(evt_handler);
331 evt_handler = NULL;
332
333 return rc;
334}
335
Alok Pandey3a980c92017-09-22 14:17:42 +0530336int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle)
337{
338 struct cam_irq_controller *controller = irq_controller;
339 struct cam_irq_evt_handler *evt_handler = NULL;
340 struct cam_irq_evt_handler *evt_handler_temp;
Harsh Shahd8e3a412017-10-27 00:32:04 -0700341 unsigned long flags = 0;
Alok Pandey3a980c92017-09-22 14:17:42 +0530342 unsigned int i;
343 uint32_t irq_mask;
344 uint32_t found = 0;
345 int rc = -EINVAL;
Harsh Shahd8e3a412017-10-27 00:32:04 -0700346 bool need_lock;
Alok Pandey3a980c92017-09-22 14:17:42 +0530347
348 if (!controller)
349 return rc;
350
351 list_for_each_entry_safe(evt_handler, evt_handler_temp,
352 &controller->evt_handler_list_head, list_node) {
353 if (evt_handler->index == handle) {
354 CAM_DBG(CAM_ISP, "enable item %d", handle);
355 found = 1;
356 rc = 0;
357 break;
358 }
359 }
360
361 if (!found)
362 return rc;
363
Harsh Shahd8e3a412017-10-27 00:32:04 -0700364 need_lock = !in_irq();
365 if (need_lock)
Jordan Croused6b16be2017-11-30 08:52:27 -0700366 spin_lock_irqsave(&controller->lock, flags);
Alok Pandey3a980c92017-09-22 14:17:42 +0530367 for (i = 0; i < controller->num_registers; i++) {
368 controller->irq_register_arr[i].
369 top_half_enable_mask[evt_handler->priority] |=
370 evt_handler->evt_bit_mask_arr[i];
371
372 irq_mask = cam_io_r_mb(controller->mem_base +
373 controller->irq_register_arr[i].
374 mask_reg_offset);
375 irq_mask |= evt_handler->evt_bit_mask_arr[i];
376
377 cam_io_w_mb(irq_mask, controller->mem_base +
378 controller->irq_register_arr[i].mask_reg_offset);
379 }
Harsh Shahd8e3a412017-10-27 00:32:04 -0700380 if (need_lock)
Jordan Croused6b16be2017-11-30 08:52:27 -0700381 spin_unlock_irqrestore(&controller->lock, flags);
Alok Pandey3a980c92017-09-22 14:17:42 +0530382
383 return rc;
384}
385
386int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle)
387{
388 struct cam_irq_controller *controller = irq_controller;
389 struct cam_irq_evt_handler *evt_handler = NULL;
390 struct cam_irq_evt_handler *evt_handler_temp;
Harsh Shahd8e3a412017-10-27 00:32:04 -0700391 unsigned long flags = 0;
Alok Pandey3a980c92017-09-22 14:17:42 +0530392 unsigned int i;
393 uint32_t irq_mask;
394 uint32_t found = 0;
395 int rc = -EINVAL;
Harsh Shahd8e3a412017-10-27 00:32:04 -0700396 bool need_lock;
Alok Pandey3a980c92017-09-22 14:17:42 +0530397
398 if (!controller)
399 return rc;
400
401 list_for_each_entry_safe(evt_handler, evt_handler_temp,
402 &controller->evt_handler_list_head, list_node) {
403 if (evt_handler->index == handle) {
404 CAM_DBG(CAM_ISP, "disable item %d", handle);
405 found = 1;
406 rc = 0;
407 break;
408 }
409 }
410
411 if (!found)
412 return rc;
413
Harsh Shahd8e3a412017-10-27 00:32:04 -0700414 need_lock = !in_irq();
415 if (need_lock)
Jordan Croused6b16be2017-11-30 08:52:27 -0700416 spin_lock_irqsave(&controller->lock, flags);
Alok Pandey3a980c92017-09-22 14:17:42 +0530417 for (i = 0; i < controller->num_registers; i++) {
418 controller->irq_register_arr[i].
419 top_half_enable_mask[evt_handler->priority] &=
420 ~(evt_handler->evt_bit_mask_arr[i]);
421
422 irq_mask = cam_io_r_mb(controller->mem_base +
423 controller->irq_register_arr[i].
424 mask_reg_offset);
425 irq_mask &= ~(evt_handler->evt_bit_mask_arr[i]);
426
427 cam_io_w_mb(irq_mask, controller->mem_base +
428 controller->irq_register_arr[i].
429 mask_reg_offset);
430
431 /* Clear the IRQ bits of this handler */
432 cam_io_w_mb(evt_handler->evt_bit_mask_arr[i],
433 controller->mem_base +
434 controller->irq_register_arr[i].
435 clear_reg_offset);
436
437 if (controller->global_clear_offset)
438 cam_io_w_mb(
439 controller->global_clear_bitmask,
440 controller->mem_base +
441 controller->global_clear_offset);
442 }
Harsh Shahd8e3a412017-10-27 00:32:04 -0700443 if (need_lock)
Jordan Croused6b16be2017-11-30 08:52:27 -0700444 spin_unlock_irqrestore(&controller->lock, flags);
Alok Pandey3a980c92017-09-22 14:17:42 +0530445
446 return rc;
447}
448
Harsh Shaha1af8822017-05-11 22:06:36 -0700449int cam_irq_controller_unsubscribe_irq(void *irq_controller,
450 uint32_t handle)
451{
452 struct cam_irq_controller *controller = irq_controller;
453 struct cam_irq_evt_handler *evt_handler = NULL;
454 struct cam_irq_evt_handler *evt_handler_temp;
455 uint32_t i;
456 uint32_t found = 0;
457 uint32_t irq_mask;
Harsh Shahd8e3a412017-10-27 00:32:04 -0700458 unsigned long flags = 0;
Harsh Shaha1af8822017-05-11 22:06:36 -0700459 int rc = -EINVAL;
Harsh Shahd8e3a412017-10-27 00:32:04 -0700460 bool need_lock;
Harsh Shaha1af8822017-05-11 22:06:36 -0700461
462 list_for_each_entry_safe(evt_handler, evt_handler_temp,
463 &controller->evt_handler_list_head, list_node) {
464 if (evt_handler->index == handle) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700465 CAM_DBG(CAM_ISP, "unsubscribe item %d", handle);
Harsh Shaha1af8822017-05-11 22:06:36 -0700466 list_del_init(&evt_handler->list_node);
467 list_del_init(&evt_handler->th_list_node);
468 found = 1;
469 rc = 0;
470 break;
471 }
472 }
473
Harsh Shahd8e3a412017-10-27 00:32:04 -0700474 need_lock = !in_irq();
475
Harsh Shaha1af8822017-05-11 22:06:36 -0700476 if (found) {
Harsh Shahd8e3a412017-10-27 00:32:04 -0700477 if (need_lock)
Jordan Croused6b16be2017-11-30 08:52:27 -0700478 spin_lock_irqsave(&controller->lock, flags);
Harsh Shaha1af8822017-05-11 22:06:36 -0700479 for (i = 0; i < controller->num_registers; i++) {
480 controller->irq_register_arr[i].
481 top_half_enable_mask[evt_handler->priority] &=
482 ~(evt_handler->evt_bit_mask_arr[i]);
483
484 irq_mask = cam_io_r_mb(controller->mem_base +
485 controller->irq_register_arr[i].
486 mask_reg_offset);
487 irq_mask &= ~(evt_handler->evt_bit_mask_arr[i]);
488
489 cam_io_w_mb(irq_mask, controller->mem_base +
490 controller->irq_register_arr[i].
491 mask_reg_offset);
492
493 /* Clear the IRQ bits of this handler */
494 cam_io_w_mb(evt_handler->evt_bit_mask_arr[i],
495 controller->mem_base +
496 controller->irq_register_arr[i].
497 clear_reg_offset);
498 if (controller->global_clear_offset)
499 cam_io_w_mb(
500 controller->global_clear_bitmask,
501 controller->mem_base +
502 controller->global_clear_offset);
503 }
Harsh Shahd8e3a412017-10-27 00:32:04 -0700504 if (need_lock)
Jordan Croused6b16be2017-11-30 08:52:27 -0700505 spin_unlock_irqrestore(&controller->lock, flags);
Harsh Shaha1af8822017-05-11 22:06:36 -0700506
507 kfree(evt_handler->evt_bit_mask_arr);
508 kfree(evt_handler);
509 }
510
511 return rc;
512}
513
514/**
515 * cam_irq_controller_match_bit_mask()
516 *
517 * @Brief: This function checks if any of the enabled IRQ bits
518 * for a certain handler is Set in the Status values
519 * of the controller.
520 *
521 * @controller: IRQ Controller structure
522 * @evt_handler: Event handler structure
523 *
524 * @Return: True: If any interested IRQ Bit is Set
525 * False: Otherwise
526 */
527static bool cam_irq_controller_match_bit_mask(
528 struct cam_irq_controller *controller,
529 struct cam_irq_evt_handler *evt_handler)
530{
531 int i;
532
533 for (i = 0; i < controller->num_registers; i++) {
534 if (evt_handler->evt_bit_mask_arr[i] &
535 controller->irq_status_arr[i])
536 return true;
537 }
538
539 return false;
540}
541
542static void cam_irq_controller_th_processing(
543 struct cam_irq_controller *controller,
544 struct list_head *th_list_head)
545{
546 struct cam_irq_evt_handler *evt_handler = NULL;
Harsh Shah23557ae2017-05-13 18:14:34 -0700547 struct cam_irq_th_payload *th_payload = &controller->th_payload;
Harsh Shaha1af8822017-05-11 22:06:36 -0700548 bool is_irq_match;
549 int rc = -EINVAL;
Harsh Shah23557ae2017-05-13 18:14:34 -0700550 int i;
Harsh Shaha1af8822017-05-11 22:06:36 -0700551
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700552 CAM_DBG(CAM_ISP, "Enter");
Harsh Shaha1af8822017-05-11 22:06:36 -0700553
554 if (list_empty(th_list_head))
555 return;
556
557 list_for_each_entry(evt_handler, th_list_head, th_list_node) {
558 is_irq_match = cam_irq_controller_match_bit_mask(controller,
559 evt_handler);
560
561 if (!is_irq_match)
562 continue;
563
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700564 CAM_DBG(CAM_ISP, "match found");
Harsh Shaha1af8822017-05-11 22:06:36 -0700565
Harsh Shah23557ae2017-05-13 18:14:34 -0700566 cam_irq_th_payload_init(th_payload);
567 th_payload->handler_priv = evt_handler->handler_priv;
568 th_payload->num_registers = controller->num_registers;
569 for (i = 0; i < controller->num_registers; i++) {
570 th_payload->evt_status_arr[i] =
571 controller->irq_status_arr[i] &
572 evt_handler->evt_bit_mask_arr[i];
573 }
Harsh Shaha1af8822017-05-11 22:06:36 -0700574
575 /*
576 * irq_status_arr[0] is dummy argument passed. the entire
577 * status array is passed in th_payload.
578 */
579 if (evt_handler->top_half_handler)
580 rc = evt_handler->top_half_handler(
581 controller->irq_status_arr[0],
Harsh Shah23557ae2017-05-13 18:14:34 -0700582 (void *)th_payload);
Harsh Shaha1af8822017-05-11 22:06:36 -0700583
584 if (!rc && evt_handler->bottom_half_handler) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700585 CAM_DBG(CAM_ISP, "Enqueuing bottom half for %s",
Harsh Shah19f55812017-06-26 18:58:49 -0700586 controller->name);
Harsh Shaha1af8822017-05-11 22:06:36 -0700587 if (evt_handler->bottom_half_enqueue_func) {
588 evt_handler->bottom_half_enqueue_func(
589 evt_handler->bottom_half,
590 evt_handler->handler_priv,
Harsh Shah23557ae2017-05-13 18:14:34 -0700591 th_payload->evt_payload_priv,
Harsh Shaha1af8822017-05-11 22:06:36 -0700592 evt_handler->bottom_half_handler);
593 }
594 }
595 }
596
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700597 CAM_DBG(CAM_ISP, "Exit");
Harsh Shaha1af8822017-05-11 22:06:36 -0700598}
599
600irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv)
601{
Harsh Shah23557ae2017-05-13 18:14:34 -0700602 struct cam_irq_controller *controller = priv;
603 bool need_th_processing[CAM_IRQ_PRIORITY_MAX] = {false};
604 int i;
605 int j;
Harsh Shaha1af8822017-05-11 22:06:36 -0700606
607 if (!controller)
608 return IRQ_NONE;
609
Jordan Croused6b16be2017-11-30 08:52:27 -0700610 CAM_DBG(CAM_ISP, "locking controller %pK name %s lock %pK",
611 controller, controller->name, &controller->lock);
612 spin_lock(&controller->lock);
Harsh Shaha1af8822017-05-11 22:06:36 -0700613 for (i = 0; i < controller->num_registers; i++) {
614 controller->irq_status_arr[i] = cam_io_r_mb(
615 controller->mem_base +
616 controller->irq_register_arr[i].status_reg_offset);
617 cam_io_w_mb(controller->irq_status_arr[i],
618 controller->mem_base +
619 controller->irq_register_arr[i].clear_reg_offset);
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700620 CAM_DBG(CAM_ISP, "Read irq status%d (0x%x) = 0x%x", i,
Harsh Shah19f55812017-06-26 18:58:49 -0700621 controller->irq_register_arr[i].status_reg_offset,
Harsh Shaha1af8822017-05-11 22:06:36 -0700622 controller->irq_status_arr[i]);
623 for (j = 0; j < CAM_IRQ_PRIORITY_MAX; j++) {
624 if (controller->irq_register_arr[i].
625 top_half_enable_mask[j] &
626 controller->irq_status_arr[i])
627 need_th_processing[j] = true;
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700628 CAM_DBG(CAM_ISP,
629 "i %d j %d need_th_processing = %d",
Harsh Shaha1af8822017-05-11 22:06:36 -0700630 i, j, need_th_processing[j]);
631 }
632 }
Jordan Croused6b16be2017-11-30 08:52:27 -0700633 CAM_DBG(CAM_ISP, "unlocked controller %pK name %s lock %pK",
634 controller, controller->name, &controller->lock);
Harsh Shaha1af8822017-05-11 22:06:36 -0700635
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700636 CAM_DBG(CAM_ISP, "Status Registers read Successful");
Harsh Shaha1af8822017-05-11 22:06:36 -0700637
638 if (controller->global_clear_offset)
639 cam_io_w_mb(controller->global_clear_bitmask,
640 controller->mem_base + controller->global_clear_offset);
641
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700642 CAM_DBG(CAM_ISP, "Status Clear done");
Harsh Shaha1af8822017-05-11 22:06:36 -0700643
644 for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++) {
645 if (need_th_processing[i]) {
Jigarkumar Zala7c4fd372017-07-24 18:43:04 -0700646 CAM_DBG(CAM_ISP, "Invoke TH processing");
Harsh Shaha1af8822017-05-11 22:06:36 -0700647 cam_irq_controller_th_processing(controller,
648 &controller->th_list_head[i]);
649 }
650 }
Jordan Croused6b16be2017-11-30 08:52:27 -0700651 spin_unlock(&controller->lock);
Harsh Shaha1af8822017-05-11 22:06:36 -0700652
653 return IRQ_HANDLED;
654}