blob: 6a148ca267a2d389e44893090c5e1ecc4236649e [file] [log] [blame]
Sagar Gore8d91a622017-02-23 14:57:18 -08001/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/of_platform.h>
15#include <linux/slab.h>
16#include <linux/mutex.h>
17#include "cam_req_mgr_interface.h"
18#include "cam_req_mgr_util.h"
19#include "cam_req_mgr_core.h"
20#include "cam_req_mgr_workq.h"
Sagar Gored79f95e2017-03-14 18:32:17 -070021#include "cam_req_mgr_debug.h"
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -060022#include "cam_trace.h"
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070023#include "cam_debug_util.h"
Sagar Gore8d91a622017-02-23 14:57:18 -080024
25static struct cam_req_mgr_core_device *g_crm_core_dev;
26
Sagar Gored79f95e2017-03-14 18:32:17 -070027static int __cam_req_mgr_setup_payload(struct cam_req_mgr_core_workq *workq)
28{
29 int32_t i = 0;
30 int rc = 0;
31 struct crm_task_payload *task_data = NULL;
32
33 task_data = kcalloc(
34 workq->task.num_task, sizeof(*task_data),
35 GFP_KERNEL);
36 if (!task_data) {
37 rc = -ENOMEM;
38 } else {
39 for (i = 0; i < workq->task.num_task; i++)
40 workq->task.pool[i].payload = &task_data[i];
41 }
42
43 return rc;
44}
Sagar Gore8d91a622017-02-23 14:57:18 -080045
46/**
Sagar Gored79f95e2017-03-14 18:32:17 -070047 * __cam_req_mgr_reset_req_tbl()
Sagar Gore8d91a622017-02-23 14:57:18 -080048 *
Sagar Gored79f95e2017-03-14 18:32:17 -070049 * @brief : Initialize req table data
50 * @in_q : request queue pointer
Sagar Gore8d91a622017-02-23 14:57:18 -080051 *
Sagar Gored79f95e2017-03-14 18:32:17 -070052 * @return: 0 for success, negative for failure
53 *
Sagar Gore8d91a622017-02-23 14:57:18 -080054 */
Sagar Gored79f95e2017-03-14 18:32:17 -070055static int __cam_req_mgr_print_req_tbl(struct cam_req_mgr_req_data *req)
Sagar Gore8d91a622017-02-23 14:57:18 -080056{
Sagar Gored79f95e2017-03-14 18:32:17 -070057 int rc = 0;
58 int32_t i = 0;
59 struct cam_req_mgr_req_queue *in_q = req->in_q;
60 struct cam_req_mgr_req_tbl *req_tbl = req->l_tbl;
61
62 if (!in_q || !req_tbl) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070063 CAM_WARN(CAM_CRM, "NULL pointer %pK %pK", in_q, req_tbl);
Sagar Gored79f95e2017-03-14 18:32:17 -070064 return -EINVAL;
65 }
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070066 CAM_DBG(CAM_CRM, "in_q %pK %pK %d", in_q, req_tbl, req_tbl->num_slots);
Sagar Gored79f95e2017-03-14 18:32:17 -070067 mutex_lock(&req->lock);
68 for (i = 0; i < in_q->num_slots; i++) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070069 CAM_DBG(CAM_CRM, "IN_Q %d: idx %d, red_id %lld", i,
Sagar Gored79f95e2017-03-14 18:32:17 -070070 in_q->slot[i].idx, CRM_GET_REQ_ID(in_q, i));
71 }
72
73 while (req_tbl != NULL) {
74 for (i = 0; i < req_tbl->num_slots; i++) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070075 CAM_DBG(CAM_CRM, "idx= %d, map= %x, state= %d",
Sagar Gored79f95e2017-03-14 18:32:17 -070076 req_tbl->slot[i].idx,
77 req_tbl->slot[i].req_ready_map,
78 req_tbl->slot[i].state);
79 }
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070080 CAM_DBG(CAM_CRM,
81 "TBL:id= %d, pd=%d cnt=%d mask=%x skip=%d num_slt= %d",
Sagar Gored79f95e2017-03-14 18:32:17 -070082 req_tbl->id, req_tbl->pd, req_tbl->dev_count,
83 req_tbl->dev_mask, req_tbl->skip_traverse,
84 req_tbl->num_slots);
85 req_tbl = req_tbl->next;
86 }
87 mutex_unlock(&req->lock);
88
89 return rc;
90}
91
92/**
93 * __cam_req_mgr_find_pd_tbl()
94 *
95 * @brief : Find pipeline delay based table pointer which matches delay
96 * @tbl : Pointer to list of request table
97 * @delay : Pipeline delay value to be searched for comparison
98 *
99 * @return : pointer to request table for matching pipeline delay table.
100 *
101 */
102static struct cam_req_mgr_req_tbl *__cam_req_mgr_find_pd_tbl(
103 struct cam_req_mgr_req_tbl *tbl, int32_t delay)
104{
105 if (!tbl)
106 return NULL;
107
108 do {
109 if (delay != tbl->pd)
110 tbl = tbl->next;
111 else
112 return tbl;
113 } while (tbl != NULL);
114
115 return NULL;
116}
117
118/**
119 * __cam_req_mgr_inc_idx()
120 *
121 * @brief : Increment val passed by step size and rollover after max_val
122 * @val : value to be incremented
123 * @step : amount/step by which val is incremented
124 * @max_val : max val after which idx will roll over
125 *
126 */
127static void __cam_req_mgr_inc_idx(int32_t *val, int32_t step, int32_t max_val)
128{
129 *val = (*val + step) % max_val;
130}
131
132/**
133 * __cam_req_mgr_dec_idx()
134 *
135 * @brief : Decrement val passed by step size and rollover after max_val
136 * @val : value to be decremented
137 * @step : amount/step by which val is decremented
138 * @max_val : after zero value will roll over to max val
139 *
140 */
141static void __cam_req_mgr_dec_idx(int32_t *val, int32_t step, int32_t max_val)
142{
143 *val = *val - step;
144 if (*val < 0)
145 *val = max_val + (*val);
146}
147
148/**
149 * __cam_req_mgr_traverse()
150 *
151 * @brief : Traverse through pd tables, it will internally cover all linked
152 * pd tables. Each pd table visited will check if idx passed to its
153 * in ready state. If ready means all devices linked to the pd table
154 * have this request id packet ready. Then it calls subsequent pd
155 * tbl with new idx. New idx value takes into account the delta
156 * between current pd table and next one.
157 * @traverse_data: contains all the info to traverse through pd tables
158 *
159 * @return: 0 for success, negative for failure
160 *
161 */
162static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data)
163{
164 int rc = 0;
165 int32_t next_idx = traverse_data->idx;
166 int32_t curr_idx = traverse_data->idx;
167 struct cam_req_mgr_req_tbl *tbl;
168 struct cam_req_mgr_apply *apply_data;
169
170 if (!traverse_data->tbl || !traverse_data->apply_data) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700171 CAM_ERR(CAM_CRM, "NULL pointer %pK %pK",
Sagar Gored79f95e2017-03-14 18:32:17 -0700172 traverse_data->tbl, traverse_data->apply_data);
173 traverse_data->result = 0;
174 return -EINVAL;
175 }
176
177 tbl = traverse_data->tbl;
178 apply_data = traverse_data->apply_data;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700179 CAM_DBG(CAM_CRM, "Enter pd %d idx %d state %d skip %d status %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700180 tbl->pd, curr_idx, tbl->slot[curr_idx].state,
181 tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status);
182
183 /* Check if req is ready or in skip mode or pd tbl is in skip mode */
184 if (tbl->slot[curr_idx].state == CRM_REQ_STATE_READY ||
185 traverse_data->in_q->slot[curr_idx].skip_idx == 1 ||
186 tbl->skip_traverse > 0) {
187 if (tbl->next) {
188 __cam_req_mgr_dec_idx(&next_idx, tbl->pd_delta,
189 tbl->num_slots);
190 traverse_data->idx = next_idx;
191 traverse_data->tbl = tbl->next;
192 rc = __cam_req_mgr_traverse(traverse_data);
193 }
194 if (rc >= 0) {
195 SET_SUCCESS_BIT(traverse_data->result, tbl->pd);
196 apply_data[tbl->pd].pd = tbl->pd;
197 apply_data[tbl->pd].req_id =
198 CRM_GET_REQ_ID(traverse_data->in_q, curr_idx);
199 apply_data[tbl->pd].idx = curr_idx;
200
201 /* If traverse is sucessful decrement traverse skip */
202 if (tbl->skip_traverse > 0) {
203 apply_data[tbl->pd].req_id = -1;
204 tbl->skip_traverse--;
205 }
206 } else {
207 /* linked pd table is not ready for this traverse yet */
208 return rc;
209 }
210 } else {
211 /* This pd table is not ready to proceed with asked idx */
212 SET_FAILURE_BIT(traverse_data->result, tbl->pd);
213 return -EAGAIN;
214 }
215 return 0;
216}
217
218/**
219 * __cam_req_mgr_in_q_skip_idx()
220 *
221 * @brief : Decrement val passed by step size and rollover after max_val
222 * @in_q : input queue pointer
223 * @idx : Sets skip_idx bit of the particular slot to true so when traverse
224 * happens for this idx, no req will be submitted for devices
225 * handling this idx.
226 *
227 */
228static void __cam_req_mgr_in_q_skip_idx(struct cam_req_mgr_req_queue *in_q,
229 int32_t idx)
230{
231 in_q->slot[idx].req_id = -1;
232 in_q->slot[idx].skip_idx = 1;
233 in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700234 CAM_DBG(CAM_CRM, "SET IDX SKIP on slot= %d", idx);
Sagar Gored79f95e2017-03-14 18:32:17 -0700235}
236
237/**
238 * __cam_req_mgr_tbl_set_id()
239 *
240 * @brief : Set unique id to table
241 * @tbl : pipeline based table which requires new id
242 * @req : pointer to request data wihch contains num_tables counter
243 *
244 */
245static void __cam_req_mgr_tbl_set_id(struct cam_req_mgr_req_tbl *tbl,
246 struct cam_req_mgr_req_data *req)
247{
248 if (!tbl)
249 return;
250 do {
251 tbl->id = req->num_tbl++;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700252 CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700253 tbl->id, tbl->pd, tbl->skip_traverse,
254 tbl->pd_delta);
255 tbl = tbl->next;
256 } while (tbl != NULL);
257}
258
259/**
260 * __cam_req_mgr_tbl_set_all_skip_cnt()
261 *
262 * @brief : Each pd table sets skip value based on delta between itself and
263 * max pd value. During initial streamon or bubble case this is
264 * used. That way each pd table skips required num of traverse and
265 * align themselve with req mgr connected devs.
266 * @l_tbl : iterates through list of pd tables and sets skip traverse
267 *
268 */
269static void __cam_req_mgr_tbl_set_all_skip_cnt(
270 struct cam_req_mgr_req_tbl **l_tbl)
271{
272 struct cam_req_mgr_req_tbl *tbl = *l_tbl;
273 int32_t max_pd;
274
275 if (!tbl)
276 return;
277
278 max_pd = tbl->pd;
279 do {
280 tbl->skip_traverse = max_pd - tbl->pd;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700281 CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700282 tbl->id, tbl->pd, tbl->skip_traverse,
283 tbl->pd_delta);
284 tbl = tbl->next;
285 } while (tbl != NULL);
286}
287
288/**
289 * __cam_req_mgr_reset_req_slot()
290 *
291 * @brief : reset specified idx/slot in input queue as well as all pd tables
292 * @link : link pointer
293 * @idx : slot index which will be reset
294 *
295 */
296static void __cam_req_mgr_reset_req_slot(struct cam_req_mgr_core_link *link,
297 int32_t idx)
298{
299 struct cam_req_mgr_slot *slot;
300 struct cam_req_mgr_req_tbl *tbl = link->req.l_tbl;
301 struct cam_req_mgr_req_queue *in_q = link->req.in_q;
302
303 slot = &in_q->slot[idx];
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700304 CAM_DBG(CAM_CRM, "RESET: idx: %d: slot->status %d", idx, slot->status);
Sagar Gored79f95e2017-03-14 18:32:17 -0700305
306 /* Check if CSL has already pushed new request*/
307 if (slot->status == CRM_SLOT_STATUS_REQ_ADDED)
308 return;
309
310 /* Reset input queue slot */
311 slot->req_id = -1;
312 slot->skip_idx = 0;
313 slot->recover = 0;
314 slot->status = CRM_SLOT_STATUS_NO_REQ;
315
316 /* Reset all pd table slot */
317 while (tbl != NULL) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700318 CAM_DBG(CAM_CRM, "pd: %d: idx %d state %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700319 tbl->pd, idx, tbl->slot[idx].state);
320 tbl->slot[idx].req_ready_map = 0;
321 tbl->slot[idx].state = CRM_REQ_STATE_EMPTY;
322 tbl = tbl->next;
323 }
324}
325
326/**
327 * __cam_req_mgr_check_next_req_slot()
328 *
329 * @brief : While streaming if input queue does not contain any pending
330 * request, req mgr still needs to submit pending request ids to
331 * devices with lower pipeline delay value.
332 * @in_q : Pointer to input queue where req mgr wil peep into
333 *
334 */
335static void __cam_req_mgr_check_next_req_slot(
336 struct cam_req_mgr_req_queue *in_q)
337{
338 int32_t idx = in_q->rd_idx;
339 struct cam_req_mgr_slot *slot;
340
341 __cam_req_mgr_inc_idx(&idx, 1, in_q->num_slots);
342 slot = &in_q->slot[idx];
343
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700344 CAM_DBG(CAM_CRM, "idx: %d: slot->status %d", idx, slot->status);
Sagar Gored79f95e2017-03-14 18:32:17 -0700345
346 /* Check if there is new req from CSL, if not complete req */
347 if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
348 __cam_req_mgr_in_q_skip_idx(in_q, idx);
349 if (in_q->wr_idx != idx)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700350 CAM_WARN(CAM_CRM,
351 "CHECK here wr %d, rd %d", in_q->wr_idx, idx);
Sagar Gored79f95e2017-03-14 18:32:17 -0700352 __cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
353 }
354}
355
356/**
357 * __cam_req_mgr_send_req()
358 *
359 * @brief : send request id to be applied to each device connected on link
360 * @link : pointer to link whose input queue and req tbl are
361 * traversed through
362 * @in_q : pointer to input request queue
363 *
364 * @return : 0 for success, negative for failure
365 *
366 */
367static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
Junzhe Zou2df84502017-05-26 13:20:23 -0700368 struct cam_req_mgr_req_queue *in_q, uint32_t trigger)
Sagar Gored79f95e2017-03-14 18:32:17 -0700369{
370 int rc = 0, pd, i, idx;
371 struct cam_req_mgr_connected_device *dev = NULL;
372 struct cam_req_mgr_apply_request apply_req;
373 struct cam_req_mgr_link_evt_data evt_data;
374
375 apply_req.link_hdl = link->link_hdl;
376 apply_req.report_if_bubble = 0;
377
378 for (i = 0; i < link->num_devs; i++) {
379 dev = &link->l_dev[i];
380 if (dev) {
381 pd = dev->dev_info.p_delay;
382 if (pd >= CAM_PIPELINE_DELAY_MAX) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700383 CAM_WARN(CAM_CRM, "pd %d greater than max",
Sagar Gored79f95e2017-03-14 18:32:17 -0700384 pd);
385 continue;
386 }
387 if (link->req.apply_data[pd].skip_idx ||
388 link->req.apply_data[pd].req_id < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700389 CAM_DBG(CAM_CRM, "skip %d req_id %lld",
Sagar Gored79f95e2017-03-14 18:32:17 -0700390 link->req.apply_data[pd].skip_idx,
391 link->req.apply_data[pd].req_id);
392 continue;
393 }
Junzhe Zou2df84502017-05-26 13:20:23 -0700394 if (!(dev->dev_info.trigger & trigger))
395 continue;
396
Sagar Gored79f95e2017-03-14 18:32:17 -0700397 apply_req.dev_hdl = dev->dev_hdl;
398 apply_req.request_id =
399 link->req.apply_data[pd].req_id;
400 idx = link->req.apply_data[pd].idx;
401 apply_req.report_if_bubble =
402 in_q->slot[idx].recover;
Gregory Bergschneider60679932017-07-19 15:27:16 -0600403
404 trace_cam_req_mgr_apply_request(link, &apply_req, dev);
405
Junzhe Zou2df84502017-05-26 13:20:23 -0700406 apply_req.trigger_point = trigger;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700407 CAM_DBG(CAM_CRM, "SEND: pd %d req_id %lld",
Sagar Gored79f95e2017-03-14 18:32:17 -0700408 pd, apply_req.request_id);
409 if (dev->ops && dev->ops->apply_req) {
410 rc = dev->ops->apply_req(&apply_req);
411 if (rc < 0)
412 break;
413 }
414 }
415 }
416 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700417 CAM_ERR(CAM_CRM, "APPLY FAILED pd %d req_id %lld",
Sagar Gored79f95e2017-03-14 18:32:17 -0700418 dev->dev_info.p_delay, apply_req.request_id);
419 /* Apply req failed notify already applied devs */
420 for (; i >= 0; i--) {
421 dev = &link->l_dev[i];
422 evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_ERR;
423 evt_data.link_hdl = link->link_hdl;
424 evt_data.req_id = apply_req.request_id;
425 evt_data.u.error = CRM_KMD_ERR_BUBBLE;
426 if (dev->ops && dev->ops->process_evt)
427 dev->ops->process_evt(&evt_data);
428 }
429 }
430 return rc;
431}
432
433/**
434 * __cam_req_mgr_check_link_is_ready()
435 *
436 * @brief : traverse through all request tables and see if all devices are
437 * ready to apply request settings.
438 * @link : pointer to link whose input queue and req tbl are
439 * traversed through
440 * @idx : index within input request queue
441 *
442 * @return : 0 for success, negative for failure
443 *
444 */
445static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link,
446 int32_t idx)
447{
448 int rc;
449 struct cam_req_mgr_traverse traverse_data;
450 struct cam_req_mgr_req_queue *in_q;
451 struct cam_req_mgr_apply *apply_data;
452
453 in_q = link->req.in_q;
454
455 apply_data = link->req.apply_data;
456 memset(apply_data, 0,
457 sizeof(struct cam_req_mgr_apply) * CAM_PIPELINE_DELAY_MAX);
458
459 traverse_data.apply_data = apply_data;
460 traverse_data.idx = idx;
461 traverse_data.tbl = link->req.l_tbl;
462 traverse_data.in_q = in_q;
463 traverse_data.result = 0;
464 /*
465 * Traverse through all pd tables, if result is success,
466 * apply the settings
467 */
468
469 rc = __cam_req_mgr_traverse(&traverse_data);
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700470 CAM_DBG(CAM_CRM, "SOF: idx %d result %x pd_mask %x rc %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700471 idx, traverse_data.result, link->pd_mask, rc);
472
473 if (!rc && traverse_data.result == link->pd_mask) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700474 CAM_DBG(CAM_CRM,
475 "APPLY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
Sagar Gored79f95e2017-03-14 18:32:17 -0700476 link->link_hdl, idx,
477 apply_data[2].req_id, apply_data[1].req_id,
478 apply_data[0].req_id);
479 } else
480 rc = -EAGAIN;
481
482 return rc;
483}
484
485/**
486 * __cam_req_mgr_process_req()
487 *
488 * @brief : processes read index in request queue and traverse through table
489 * @link : pointer to link whose input queue and req tbl are
490 * traversed through
491 *
492 * @return : 0 for success, negative for failure
493 *
494 */
Junzhe Zou2df84502017-05-26 13:20:23 -0700495static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
496 uint32_t trigger)
Sagar Gored79f95e2017-03-14 18:32:17 -0700497{
498 int rc = 0, idx;
499 struct cam_req_mgr_slot *slot = NULL;
500 struct cam_req_mgr_req_queue *in_q;
501 struct cam_req_mgr_core_session *session;
502
503 in_q = link->req.in_q;
504 session = (struct cam_req_mgr_core_session *)link->parent;
505
506 /*
Junzhe Zou2df84502017-05-26 13:20:23 -0700507 * Check if new read index,
Sagar Gored79f95e2017-03-14 18:32:17 -0700508 * - if in pending state, traverse again to complete
509 * transaction of this read index.
510 * - if in applied_state, somthign wrong.
511 * - if in no_req state, no new req
512 */
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700513 CAM_DBG(CAM_CRM, "idx %d req_status %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700514 in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
515
516 slot = &in_q->slot[in_q->rd_idx];
517 if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700518 CAM_DBG(CAM_CRM, "No Pending req");
Sagar Gored79f95e2017-03-14 18:32:17 -0700519 return 0;
520 }
521
Junzhe Zou2df84502017-05-26 13:20:23 -0700522 if (trigger != CAM_TRIGGER_POINT_SOF &&
523 trigger != CAM_TRIGGER_POINT_EOF)
524 return rc;
Sagar Gored79f95e2017-03-14 18:32:17 -0700525
Junzhe Zou2df84502017-05-26 13:20:23 -0700526 if (trigger == CAM_TRIGGER_POINT_SOF) {
Junzhe Zou3f77d832017-08-25 14:55:23 -0700527 if (link->trigger_mask) {
Junzhe Zou2df84502017-05-26 13:20:23 -0700528 CAM_ERR(CAM_CRM, "Applying for last EOF fails");
529 return -EINVAL;
530 }
531 rc = __cam_req_mgr_check_link_is_ready(link, slot->idx);
532 if (rc < 0) {
533
534 /* If traverse result is not success, then some devices
535 * are not ready with packet for the asked request id,
536 * hence try again in next sof
537 */
538 slot->status = CRM_SLOT_STATUS_REQ_PENDING;
Sagar Gored79f95e2017-03-14 18:32:17 -0700539 if (link->state == CAM_CRM_LINK_STATE_ERR) {
Junzhe Zou2df84502017-05-26 13:20:23 -0700540 /*
541 * During error recovery all tables should be
542 * ready, don't expect to enter here.
543 * @TODO: gracefully handle if recovery fails.
544 */
545 CAM_ERR(CAM_CRM,
546 "FATAL recovery cant finish idx %d status %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700547 in_q->rd_idx,
548 in_q->slot[in_q->rd_idx].status);
Junzhe Zou2df84502017-05-26 13:20:23 -0700549 rc = -EPERM;
Sagar Gored79f95e2017-03-14 18:32:17 -0700550 }
Junzhe Zou2df84502017-05-26 13:20:23 -0700551 return rc;
552 }
553 }
554 if (trigger == CAM_TRIGGER_POINT_EOF &&
555 (!(link->trigger_mask & CAM_TRIGGER_POINT_SOF))) {
556 CAM_ERR(CAM_CRM, "Applying for last SOF fails");
557 return -EINVAL;
558 }
Sagar Gored79f95e2017-03-14 18:32:17 -0700559
Junzhe Zou2df84502017-05-26 13:20:23 -0700560 rc = __cam_req_mgr_send_req(link, link->req.in_q, trigger);
561 if (rc < 0) {
562 /* Apply req failed retry at next sof */
563 slot->status = CRM_SLOT_STATUS_REQ_PENDING;
564 } else {
565 link->trigger_mask |= trigger;
566
567 if (link->state == CAM_CRM_LINK_STATE_ERR) {
568 CAM_WARN(CAM_CRM, "Err recovery done idx %d",
569 in_q->rd_idx);
570 mutex_lock(&link->lock);
571 link->state = CAM_CRM_LINK_STATE_READY;
572 mutex_unlock(&link->lock);
573 }
574 if (link->trigger_mask == link->subscribe_event) {
575 slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
576 link->trigger_mask = 0;
577 CAM_DBG(CAM_CRM, "req is applied\n");
Sagar Gored79f95e2017-03-14 18:32:17 -0700578 __cam_req_mgr_check_next_req_slot(in_q);
579
Sagar Gored79f95e2017-03-14 18:32:17 -0700580 idx = in_q->rd_idx;
Junzhe Zou2df84502017-05-26 13:20:23 -0700581 __cam_req_mgr_dec_idx(
582 &idx, link->max_delay + 1,
Sagar Gored79f95e2017-03-14 18:32:17 -0700583 in_q->num_slots);
584 __cam_req_mgr_reset_req_slot(link, idx);
585 }
Sagar Gored79f95e2017-03-14 18:32:17 -0700586 }
587
588 return rc;
589}
590
591/**
592 * __cam_req_mgr_add_tbl_to_link()
593 *
594 * @brief : Add table to list under link sorted by pd decremeting order
595 * @l_tbl : list of pipeline delay tables.
Junzhe Zou2df84502017-05-26 13:20:23 -0700596 * @new_tbl : new tbl which will be appended to above list as per its pd value
Sagar Gored79f95e2017-03-14 18:32:17 -0700597 *
598 */
599static void __cam_req_mgr_add_tbl_to_link(struct cam_req_mgr_req_tbl **l_tbl,
600 struct cam_req_mgr_req_tbl *new_tbl)
601{
602 struct cam_req_mgr_req_tbl *tbl;
603
604 if (!(*l_tbl) || (*l_tbl)->pd < new_tbl->pd) {
605 new_tbl->next = *l_tbl;
606 if (*l_tbl) {
607 new_tbl->pd_delta =
608 new_tbl->pd - (*l_tbl)->pd;
609 }
610 *l_tbl = new_tbl;
611 } else {
612 tbl = *l_tbl;
613
614 /* Reach existing tbl which has less pd value */
615 while (tbl->next != NULL &&
616 new_tbl->pd < tbl->next->pd) {
617 tbl = tbl->next;
618 }
619 if (tbl->next != NULL) {
620 new_tbl->pd_delta =
621 new_tbl->pd - tbl->next->pd;
622 } else {
623 /* This is last table in linked list*/
624 new_tbl->pd_delta = 0;
625 }
626 new_tbl->next = tbl->next;
627 tbl->next = new_tbl;
628 tbl->pd_delta = tbl->pd - new_tbl->pd;
629 }
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700630 CAM_DBG(CAM_CRM, "added pd %d tbl to link delta %d", new_tbl->pd,
Sagar Gored79f95e2017-03-14 18:32:17 -0700631 new_tbl->pd_delta);
632}
633
634/**
635 * __cam_req_mgr_create_pd_tbl()
636 *
637 * @brief : Creates new request table for new delay value
638 * @delay : New pd table allocated will have this delay value
639 *
640 * @return : pointer to newly allocated table, NULL for failure
641 *
642 */
643static struct cam_req_mgr_req_tbl *__cam_req_mgr_create_pd_tbl(int32_t delay)
644{
645 struct cam_req_mgr_req_tbl *tbl =
646 kzalloc(sizeof(struct cam_req_mgr_req_tbl), GFP_KERNEL);
647 if (tbl != NULL) {
648 tbl->num_slots = MAX_REQ_SLOTS;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700649 CAM_DBG(CAM_CRM, "pd= %d slots= %d", delay, tbl->num_slots);
Sagar Gored79f95e2017-03-14 18:32:17 -0700650 }
651
652 return tbl;
653}
654
655/**
656 * __cam_req_mgr_destroy_all_tbl()
657 *
658 * @brief : This func will destroy all pipeline delay based req table structs
659 * @l_tbl : pointer to first table in list and it has max pd .
660 *
661 */
662static void __cam_req_mgr_destroy_all_tbl(struct cam_req_mgr_req_tbl **l_tbl)
663{
664 struct cam_req_mgr_req_tbl *tbl = *l_tbl, *temp;
665
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700666 CAM_DBG(CAM_CRM, "*l_tbl %pK", tbl);
Sagar Gored79f95e2017-03-14 18:32:17 -0700667 while (tbl != NULL) {
668 temp = tbl->next;
669 kfree(tbl);
670 tbl = temp;
671 }
672 *l_tbl = NULL;
673}
674
675/**
676 * __cam_req_mgr_find_slot_for_req()
677 *
678 * @brief : Find idx from input queue at which req id is enqueued
679 * @in_q : input request queue pointer
680 * @req_id : request id which needs to be searched in input queue
681 *
682 * @return : slot index where passed request id is stored, -1 for failure
683 *
684 */
685static int32_t __cam_req_mgr_find_slot_for_req(
686 struct cam_req_mgr_req_queue *in_q, int64_t req_id)
687{
688 int32_t idx, i;
689 struct cam_req_mgr_slot *slot;
690
691 idx = in_q->wr_idx;
692 for (i = 0; i < in_q->num_slots; i++) {
693 slot = &in_q->slot[idx];
694 if (slot->req_id == req_id) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700695 CAM_DBG(CAM_CRM, "req %lld found at %d %d status %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700696 req_id, idx, slot->idx,
697 slot->status);
698 break;
699 }
700 __cam_req_mgr_dec_idx(&idx, 1, in_q->num_slots);
701 }
702 if (i >= in_q->num_slots)
703 idx = -1;
704
705 return idx;
706}
707
708/**
709 * __cam_req_mgr_setup_in_q()
710 *
711 * @brief : Initialize req table data
712 * @req : request data pointer
713 *
714 * @return: 0 for success, negative for failure
715 *
716 */
717static int __cam_req_mgr_setup_in_q(struct cam_req_mgr_req_data *req)
718{
719 int i;
720 struct cam_req_mgr_req_queue *in_q = req->in_q;
721
722 if (!in_q) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700723 CAM_ERR(CAM_CRM, "NULL in_q");
Sagar Gored79f95e2017-03-14 18:32:17 -0700724 return -EINVAL;
725 }
726
727 mutex_lock(&req->lock);
728 in_q->num_slots = MAX_REQ_SLOTS;
729
730 for (i = 0; i < in_q->num_slots; i++) {
731 in_q->slot[i].idx = i;
732 in_q->slot[i].req_id = -1;
733 in_q->slot[i].skip_idx = 0;
734 in_q->slot[i].status = CRM_SLOT_STATUS_NO_REQ;
735 }
736
737 in_q->wr_idx = 0;
738 in_q->rd_idx = 0;
739 mutex_unlock(&req->lock);
740
741 return 0;
742}
743
744/**
745 * __cam_req_mgr_reset_req_tbl()
746 *
747 * @brief : Initialize req table data
748 * @req : request queue pointer
749 *
750 * @return: 0 for success, negative for failure
751 *
752 */
753static int __cam_req_mgr_reset_in_q(struct cam_req_mgr_req_data *req)
754{
755 struct cam_req_mgr_req_queue *in_q = req->in_q;
756
757 if (!in_q) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700758 CAM_ERR(CAM_CRM, "NULL in_q");
Sagar Gored79f95e2017-03-14 18:32:17 -0700759 return -EINVAL;
760 }
761
762 mutex_lock(&req->lock);
763 memset(in_q->slot, 0,
764 sizeof(struct cam_req_mgr_slot) * in_q->num_slots);
765 in_q->num_slots = 0;
766
767 in_q->wr_idx = 0;
768 in_q->rd_idx = 0;
769 mutex_unlock(&req->lock);
770
771 return 0;
772}
773
774/**
775 * __cam_req_mgr_sof_freeze()
776 *
777 * @brief : Apoptosis - Handles case when connected devices are not responding
778 * @data : timer pointer
779 *
780 */
781static void __cam_req_mgr_sof_freeze(unsigned long data)
782{
783 struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
Sagar Gore8d91a622017-02-23 14:57:18 -0800784 struct cam_req_mgr_core_link *link = NULL;
785
Sagar Gored79f95e2017-03-14 18:32:17 -0700786 if (!timer) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700787 CAM_ERR(CAM_CRM, "NULL timer");
Sagar Gored79f95e2017-03-14 18:32:17 -0700788 return;
789 }
790 link = (struct cam_req_mgr_core_link *)timer->parent;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700791 CAM_ERR(CAM_CRM, "SOF freeze for link %x", link->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -0700792}
793
794/**
795 * __cam_req_mgr_create_subdevs()
796 *
797 * @brief : Create new crm subdev to link with realtime devices
798 * @l_dev : list of subdevs internal to crm
799 * @num_dev : num of subdevs to be created for link
800 *
801 * @return : pointer to allocated list of devices
802 */
803static int __cam_req_mgr_create_subdevs(
804 struct cam_req_mgr_connected_device **l_dev, int32_t num_dev)
805{
806 int rc = 0;
807 *l_dev = (struct cam_req_mgr_connected_device *)
808 kzalloc(sizeof(struct cam_req_mgr_connected_device) * num_dev,
809 GFP_KERNEL);
810 if (!*l_dev)
811 rc = -ENOMEM;
812
813 return rc;
814}
815
816/**
817 * __cam_req_mgr_destroy_subdev()
818 *
819 * @brief : Cleans up the subdevs allocated by crm for link
820 * @l_device : pointer to list of subdevs crm created
821 *
822 */
823static void __cam_req_mgr_destroy_subdev(
824 struct cam_req_mgr_connected_device *l_device)
825{
826 kfree(l_device);
827 l_device = NULL;
828}
829
830/**
831 * __cam_req_mgr_destroy_link_info()
832 *
833 * @brief : Cleans up the mem allocated while linking
834 * @link : pointer to link, mem associated with this link is freed
835 *
836 */
837static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
838{
839 int32_t i = 0;
840 struct cam_req_mgr_connected_device *dev;
841 struct cam_req_mgr_core_dev_link_setup link_data;
842
843 mutex_lock(&link->lock);
844
845 link_data.link_enable = 0;
846 link_data.link_hdl = link->link_hdl;
847 link_data.crm_cb = NULL;
Junzhe Zou2df84502017-05-26 13:20:23 -0700848 link_data.subscribe_event = 0;
Sagar Gored79f95e2017-03-14 18:32:17 -0700849
850 /* Using device ops unlink devices */
851 for (i = 0; i < link->num_devs; i++) {
852 dev = &link->l_dev[i];
853 if (dev != NULL) {
Soundrapandian Jeyaprakash74946262017-08-11 18:23:47 -0700854 link_data.dev_hdl = dev->dev_hdl;
Sagar Gored79f95e2017-03-14 18:32:17 -0700855 if (dev->ops && dev->ops->link_setup)
856 dev->ops->link_setup(&link_data);
857 dev->dev_hdl = 0;
858 dev->parent = NULL;
859 dev->ops = NULL;
860 }
861 }
862 __cam_req_mgr_destroy_all_tbl(&link->req.l_tbl);
863 __cam_req_mgr_reset_in_q(&link->req);
864 link->req.num_tbl = 0;
865 mutex_destroy(&link->req.lock);
866
867 link->pd_mask = 0;
868 link->num_devs = 0;
869 link->max_delay = 0;
870
871 mutex_unlock(&link->lock);
872}
873
874/**
875 * __cam_req_mgr_reserve_link()
876 *
877 * @brief: Reserves one link data struct within session
878 * @session: session identifier
879 *
880 * @return: pointer to link reserved
881 *
882 */
883static struct cam_req_mgr_core_link *__cam_req_mgr_reserve_link(
884 struct cam_req_mgr_core_session *session)
885{
886 struct cam_req_mgr_core_link *link;
887 struct cam_req_mgr_req_queue *in_q;
888
889 if (!session || !g_crm_core_dev) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700890 CAM_ERR(CAM_CRM, "NULL session/core_dev ptr");
Sagar Gore8d91a622017-02-23 14:57:18 -0800891 return NULL;
892 }
893
Sagar Gored79f95e2017-03-14 18:32:17 -0700894 if (session->num_links >= MAX_LINKS_PER_SESSION) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700895 CAM_ERR(CAM_CRM, "Reached max links %d per session limit %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700896 session->num_links, MAX_LINKS_PER_SESSION);
897 return NULL;
Sagar Gore8d91a622017-02-23 14:57:18 -0800898 }
Sagar Gored79f95e2017-03-14 18:32:17 -0700899
900 link = (struct cam_req_mgr_core_link *)
901 kzalloc(sizeof(struct cam_req_mgr_core_link), GFP_KERNEL);
902 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700903 CAM_ERR(CAM_CRM, "failed to create link, no mem");
Sagar Gored79f95e2017-03-14 18:32:17 -0700904 return NULL;
905 }
906 in_q = &session->in_q;
907 mutex_init(&link->lock);
908
909 mutex_lock(&link->lock);
910 link->state = CAM_CRM_LINK_STATE_AVAILABLE;
911 link->num_devs = 0;
912 link->max_delay = 0;
913 memset(in_q->slot, 0,
914 sizeof(struct cam_req_mgr_slot) * MAX_REQ_SLOTS);
915 link->req.in_q = in_q;
916 in_q->num_slots = 0;
917 link->state = CAM_CRM_LINK_STATE_IDLE;
918 link->parent = (void *)session;
919 mutex_unlock(&link->lock);
920
921 mutex_lock(&session->lock);
922 session->links[session->num_links] = link;
923 session->num_links++;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700924 CAM_DBG(CAM_CRM, "Active session links (%d)",
Sagar Gored79f95e2017-03-14 18:32:17 -0700925 session->num_links);
926 mutex_unlock(&session->lock);
Sagar Gore8d91a622017-02-23 14:57:18 -0800927
928 return link;
929}
930
931/**
Sagar Gored79f95e2017-03-14 18:32:17 -0700932 * __cam_req_mgr_reserve_link()
933 *
934 * @brief : Reserves one link data struct within session
935 * @session: session identifier
936 * @link : link identifier
937 *
938 */
939static void __cam_req_mgr_unreserve_link(
940 struct cam_req_mgr_core_session *session,
941 struct cam_req_mgr_core_link **link)
942{
943 int32_t i = 0;
944
945 if (!session || !*link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700946 CAM_ERR(CAM_CRM, "NULL session/link ptr %pK %pK",
Sagar Gored79f95e2017-03-14 18:32:17 -0700947 session, *link);
948 return;
949 }
950
951 mutex_lock(&session->lock);
952 if (!session->num_links)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700953 CAM_WARN(CAM_CRM, "No active link or invalid state %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700954 session->num_links);
955 else {
956 for (i = 0; i < session->num_links; i++) {
957 if (session->links[i] == *link)
958 session->links[i] = NULL;
959 }
960 session->num_links--;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700961 CAM_DBG(CAM_CRM, "Active session links (%d)",
Sagar Gored79f95e2017-03-14 18:32:17 -0700962 session->num_links);
963 }
964 kfree(*link);
965 *link = NULL;
966 mutex_unlock(&session->lock);
967
968}
969
970/* Workqueue context processing section */
971
972/**
973 * cam_req_mgr_process_send_req()
974 *
975 * @brief: This runs in workque thread context. Call core funcs to send
976 * apply request id to drivers.
977 * @priv : link information.
978 * @data : contains information about frame_id, link etc.
979 *
980 * @return: 0 on success.
981 */
982int cam_req_mgr_process_send_req(void *priv, void *data)
983{
984 int rc = 0;
985 struct cam_req_mgr_core_link *link = NULL;
986 struct cam_req_mgr_send_request *send_req = NULL;
987 struct cam_req_mgr_req_queue *in_q = NULL;
988
989 if (!data || !priv) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700990 CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
Sagar Gored79f95e2017-03-14 18:32:17 -0700991 rc = -EINVAL;
992 goto end;
993 }
994 link = (struct cam_req_mgr_core_link *)priv;
995 send_req = (struct cam_req_mgr_send_request *)data;
996 in_q = send_req->in_q;
997
Junzhe Zou2df84502017-05-26 13:20:23 -0700998 rc = __cam_req_mgr_send_req(link, in_q, CAM_TRIGGER_POINT_SOF);
Sagar Gored79f95e2017-03-14 18:32:17 -0700999end:
1000 return rc;
1001}
1002
1003/**
1004 * cam_req_mgr_process_flush_req()
1005 *
1006 * @brief: This runs in workque thread context. Call core funcs to check
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001007 * which requests need to be removed/cancelled.
Sagar Gored79f95e2017-03-14 18:32:17 -07001008 * @priv : link information.
1009 * @data : contains information about frame_id, link etc.
1010 *
1011 * @return: 0 on success.
1012 */
1013int cam_req_mgr_process_flush_req(void *priv, void *data)
1014{
1015 int rc = 0, i = 0, idx = -1;
1016 struct cam_req_mgr_flush_info *flush_info = NULL;
1017 struct cam_req_mgr_core_link *link = NULL;
1018 struct cam_req_mgr_req_queue *in_q = NULL;
1019 struct cam_req_mgr_slot *slot = NULL;
1020 struct cam_req_mgr_connected_device *device = NULL;
1021 struct cam_req_mgr_flush_request flush_req;
1022 struct crm_task_payload *task_data = NULL;
1023
1024 if (!data || !priv) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001025 CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
Sagar Gored79f95e2017-03-14 18:32:17 -07001026 rc = -EINVAL;
1027 goto end;
1028 }
1029 link = (struct cam_req_mgr_core_link *)priv;
1030 task_data = (struct crm_task_payload *)data;
1031 flush_info = (struct cam_req_mgr_flush_info *)&task_data->u;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001032 CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld type %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001033 flush_info->link_hdl,
1034 flush_info->req_id,
1035 flush_info->flush_type);
1036
1037 in_q = link->req.in_q;
1038
Gregory Bergschneider60679932017-07-19 15:27:16 -06001039 trace_cam_flush_req(link, flush_info);
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001040
Sagar Gored79f95e2017-03-14 18:32:17 -07001041 mutex_lock(&link->req.lock);
1042 if (flush_info->flush_type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
1043 for (i = 0; i < in_q->num_slots; i++) {
1044 slot = &in_q->slot[i];
1045 slot->req_id = -1;
1046 slot->skip_idx = 1;
1047 slot->status = CRM_SLOT_STATUS_NO_REQ;
1048 }
1049 in_q->wr_idx = 0;
1050 in_q->rd_idx = 0;
1051 } else if (flush_info->flush_type ==
1052 CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
1053 idx = __cam_req_mgr_find_slot_for_req(in_q, flush_info->req_id);
1054 if (idx < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001055 CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
Sagar Gored79f95e2017-03-14 18:32:17 -07001056 flush_info->req_id);
1057 } else {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001058 CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001059 flush_info->req_id, idx);
1060 slot = &in_q->slot[idx];
1061 if (slot->status == CRM_SLOT_STATUS_REQ_PENDING ||
1062 slot->status == CRM_SLOT_STATUS_REQ_APPLIED) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001063 CAM_WARN(CAM_CRM,
1064 "req_id %lld can not be cancelled",
Sagar Gored79f95e2017-03-14 18:32:17 -07001065 flush_info->req_id);
1066 mutex_unlock(&link->req.lock);
1067 return -EINVAL;
1068 }
1069 __cam_req_mgr_in_q_skip_idx(in_q, idx);
1070 }
1071 }
1072
1073 for (i = 0; i < link->num_devs; i++) {
1074 device = &link->l_dev[i];
1075 flush_req.link_hdl = flush_info->link_hdl;
1076 flush_req.dev_hdl = device->dev_hdl;
1077 flush_req.req_id = flush_info->req_id;
1078 flush_req.type = flush_info->flush_type;
1079 /* @TODO: error return handling from drivers */
1080 if (device->ops && device->ops->flush_req)
1081 rc = device->ops->flush_req(&flush_req);
1082 }
1083 mutex_unlock(&link->req.lock);
1084
1085 complete(&link->workq_comp);
1086end:
1087 return rc;
1088}
1089
1090/**
1091 * cam_req_mgr_process_sched_req()
1092 *
1093 * @brief: This runs in workque thread context. Call core funcs to check
1094 * which peding requests can be processed.
1095 * @priv : link information.
1096 * @data : contains information about frame_id, link etc.
1097 *
1098 * @return: 0 on success.
1099 */
1100int cam_req_mgr_process_sched_req(void *priv, void *data)
1101{
1102 int rc = 0;
1103 struct cam_req_mgr_sched_request *sched_req = NULL;
1104 struct cam_req_mgr_core_link *link = NULL;
1105 struct cam_req_mgr_req_queue *in_q = NULL;
1106 struct cam_req_mgr_slot *slot = NULL;
1107 struct crm_task_payload *task_data = NULL;
1108
1109 if (!data || !priv) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001110 CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
Sagar Gored79f95e2017-03-14 18:32:17 -07001111 rc = -EINVAL;
1112 goto end;
1113 }
1114 link = (struct cam_req_mgr_core_link *)priv;
1115 task_data = (struct crm_task_payload *)data;
1116 sched_req = (struct cam_req_mgr_sched_request *)&task_data->u;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001117 CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld",
Sagar Gored79f95e2017-03-14 18:32:17 -07001118 sched_req->link_hdl,
1119 sched_req->req_id);
1120
1121 in_q = link->req.in_q;
1122
1123 mutex_lock(&link->req.lock);
1124 slot = &in_q->slot[in_q->wr_idx];
1125
1126 if (slot->status != CRM_SLOT_STATUS_NO_REQ &&
Sagar Goreb56c81e2017-05-08 17:15:47 -07001127 slot->status != CRM_SLOT_STATUS_REQ_APPLIED)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001128 CAM_WARN(CAM_CRM, "in_q overwrite %d", slot->status);
Sagar Goreb56c81e2017-05-08 17:15:47 -07001129
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001130 CAM_DBG(CAM_CRM, "sched_req %lld at slot %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001131 sched_req->req_id, in_q->wr_idx);
1132
1133 slot->status = CRM_SLOT_STATUS_REQ_ADDED;
1134 slot->req_id = sched_req->req_id;
1135 slot->skip_idx = 0;
1136 slot->recover = sched_req->bubble_enable;
1137 __cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
1138 mutex_unlock(&link->req.lock);
1139
Sagar Gored79f95e2017-03-14 18:32:17 -07001140end:
1141 return rc;
1142}
1143
1144/**
1145 * cam_req_mgr_process_add_req()
1146 *
1147 * @brief: This runs in workque thread context. Call core funcs to check
1148 * which peding requests can be processed.
1149 * @priv : link information.
1150 * @data : contains information about frame_id, link etc.
1151 *
1152 * @return: 0 on success.
1153 */
1154int cam_req_mgr_process_add_req(void *priv, void *data)
1155{
1156 int rc = 0, i = 0, idx;
1157 struct cam_req_mgr_add_request *add_req = NULL;
1158 struct cam_req_mgr_core_link *link = NULL;
1159 struct cam_req_mgr_connected_device *device = NULL;
1160 struct cam_req_mgr_req_tbl *tbl = NULL;
1161 struct cam_req_mgr_tbl_slot *slot = NULL;
1162 struct crm_task_payload *task_data = NULL;
1163
1164 if (!data || !priv) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001165 CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
Sagar Gored79f95e2017-03-14 18:32:17 -07001166 rc = -EINVAL;
1167 goto end;
1168 }
1169
1170 link = (struct cam_req_mgr_core_link *)priv;
1171 task_data = (struct crm_task_payload *)data;
1172 add_req = (struct cam_req_mgr_add_request *)&task_data->u;
1173
1174 for (i = 0; i < link->num_devs; i++) {
1175 device = &link->l_dev[i];
1176 if (device->dev_hdl == add_req->dev_hdl) {
1177 tbl = device->pd_tbl;
1178 break;
1179 }
1180 }
1181 if (!tbl) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001182 CAM_ERR(CAM_CRM, "dev_hdl not found %x, %x %x",
Sagar Gored79f95e2017-03-14 18:32:17 -07001183 add_req->dev_hdl,
1184 link->l_dev[0].dev_hdl,
1185 link->l_dev[1].dev_hdl);
1186 rc = -EINVAL;
1187 goto end;
1188 }
1189 /*
1190 * Go through request table and add
1191 * request id to proper table
1192 * 1. find req slot in in_q matching req_id.sent by dev
1193 * 2. goto table of this device based on p_delay
1194 * 3. mark req_ready_map with this dev_bit.
1195 */
1196
1197 mutex_lock(&link->req.lock);
1198 idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
1199 if (idx < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001200 CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07001201 rc = -EBADSLT;
1202 mutex_unlock(&link->req.lock);
1203 goto end;
1204 }
1205 slot = &tbl->slot[idx];
1206 if (slot->state != CRM_REQ_STATE_PENDING &&
1207 slot->state != CRM_REQ_STATE_EMPTY) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001208 CAM_WARN(CAM_CRM, "Unexpected state %d for slot %d map %x",
Sagar Gored79f95e2017-03-14 18:32:17 -07001209 slot->state, idx, slot->req_ready_map);
1210 }
1211
1212 slot->state = CRM_REQ_STATE_PENDING;
1213 slot->req_ready_map |= (1 << device->dev_bit);
1214
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001215 CAM_DBG(CAM_CRM, "idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
Sagar Gored79f95e2017-03-14 18:32:17 -07001216 idx, add_req->dev_hdl, add_req->req_id, tbl->pd,
1217 slot->req_ready_map);
1218
Gregory Bergschneider60679932017-07-19 15:27:16 -06001219 trace_cam_req_mgr_add_req(link, idx, add_req, tbl, device);
1220
Sagar Gored79f95e2017-03-14 18:32:17 -07001221 if (slot->req_ready_map == tbl->dev_mask) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001222 CAM_DBG(CAM_CRM, "idx %d req_id %lld pd %d SLOT READY",
Sagar Gored79f95e2017-03-14 18:32:17 -07001223 idx, add_req->req_id, tbl->pd);
1224 slot->state = CRM_REQ_STATE_READY;
1225 }
1226 mutex_unlock(&link->req.lock);
1227
1228end:
1229 return rc;
1230}
1231
1232/**
1233 * cam_req_mgr_process_error()
1234 *
1235 * @brief: This runs in workque thread context. bubble /err recovery.
1236 * @priv : link information.
1237 * @data : contains information about frame_id, link etc.
1238 *
1239 * @return: 0 on success.
1240 */
1241int cam_req_mgr_process_error(void *priv, void *data)
1242{
1243 int rc = 0, idx = -1, i;
1244 struct cam_req_mgr_error_notify *err_info = NULL;
1245 struct cam_req_mgr_core_link *link = NULL;
1246 struct cam_req_mgr_req_queue *in_q = NULL;
1247 struct cam_req_mgr_slot *slot = NULL;
1248 struct cam_req_mgr_connected_device *device = NULL;
1249 struct cam_req_mgr_link_evt_data evt_data;
1250 struct crm_task_payload *task_data = NULL;
1251
1252 if (!data || !priv) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001253 CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
Sagar Gored79f95e2017-03-14 18:32:17 -07001254 rc = -EINVAL;
1255 goto end;
1256 }
1257 link = (struct cam_req_mgr_core_link *)priv;
1258 task_data = (struct crm_task_payload *)data;
1259 err_info = (struct cam_req_mgr_error_notify *)&task_data->u;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001260 CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld error %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001261 err_info->link_hdl,
1262 err_info->req_id,
1263 err_info->error);
1264
1265 in_q = link->req.in_q;
1266
1267 mutex_lock(&link->req.lock);
1268 if (err_info->error == CRM_KMD_ERR_BUBBLE) {
1269 idx = __cam_req_mgr_find_slot_for_req(in_q, err_info->req_id);
1270 if (idx < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001271 CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
Sagar Gored79f95e2017-03-14 18:32:17 -07001272 err_info->req_id);
1273 } else {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001274 CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001275 err_info->req_id, idx);
1276 slot = &in_q->slot[idx];
1277 if (!slot->recover) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001278 CAM_WARN(CAM_CRM,
1279 "err recovery disabled req_id %lld",
Sagar Gored79f95e2017-03-14 18:32:17 -07001280 err_info->req_id);
1281 mutex_unlock(&link->req.lock);
1282 return 0;
1283 } else if (slot->status != CRM_SLOT_STATUS_REQ_PENDING
1284 && slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001285 CAM_WARN(CAM_CRM,
1286 "req_id %lld can not be recovered %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001287 err_info->req_id, slot->status);
1288 mutex_unlock(&link->req.lock);
1289 return -EINVAL;
1290 }
1291 /* Notify all devices in the link about error */
1292 for (i = 0; i < link->num_devs; i++) {
1293 device = &link->l_dev[i];
1294 if (device != NULL) {
1295 evt_data.dev_hdl = device->dev_hdl;
1296 evt_data.evt_type =
1297 CAM_REQ_MGR_LINK_EVT_ERR;
1298 evt_data.link_hdl = link->link_hdl;
1299 evt_data.req_id = err_info->req_id;
1300 evt_data.u.error = err_info->error;
1301 if (device->ops &&
1302 device->ops->process_evt)
1303 rc = device->ops->
1304 process_evt(&evt_data);
1305 }
1306 }
1307 /* Bring processing pointer to bubbled req id */
1308 __cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
1309 in_q->rd_idx = idx;
1310 in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
1311 mutex_lock(&link->lock);
1312 link->state = CAM_CRM_LINK_STATE_ERR;
1313 mutex_unlock(&link->lock);
1314 }
1315 }
1316 mutex_unlock(&link->req.lock);
1317
1318end:
1319 return rc;
1320}
1321
1322/**
Junzhe Zou2df84502017-05-26 13:20:23 -07001323 * cam_req_mgr_process_trigger()
Sagar Gore8d91a622017-02-23 14:57:18 -08001324 *
1325 * @brief: This runs in workque thread context. Call core funcs to check
Sagar Gored79f95e2017-03-14 18:32:17 -07001326 * which peding requests can be processed.
1327 * @priv : link information.
1328 * @data : contains information about frame_id, link etc.
Sagar Gore8d91a622017-02-23 14:57:18 -08001329 *
Sagar Gored79f95e2017-03-14 18:32:17 -07001330 * @return: 0 on success.
Sagar Gore8d91a622017-02-23 14:57:18 -08001331 */
Junzhe Zou2df84502017-05-26 13:20:23 -07001332static int cam_req_mgr_process_trigger(void *priv, void *data)
Sagar Gore8d91a622017-02-23 14:57:18 -08001333{
Sagar Gored79f95e2017-03-14 18:32:17 -07001334 int rc = 0;
Junzhe Zou2df84502017-05-26 13:20:23 -07001335 struct cam_req_mgr_trigger_notify *trigger_data = NULL;
Sagar Gored79f95e2017-03-14 18:32:17 -07001336 struct cam_req_mgr_core_link *link = NULL;
1337 struct cam_req_mgr_req_queue *in_q = NULL;
1338 struct crm_task_payload *task_data = NULL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001339
1340 if (!data || !priv) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001341 CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
Sagar Gored79f95e2017-03-14 18:32:17 -07001342 rc = -EINVAL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001343 goto end;
1344 }
1345 link = (struct cam_req_mgr_core_link *)priv;
Sagar Gored79f95e2017-03-14 18:32:17 -07001346 task_data = (struct crm_task_payload *)data;
Junzhe Zou2df84502017-05-26 13:20:23 -07001347 trigger_data = (struct cam_req_mgr_trigger_notify *)&task_data->u;
Sagar Gore8d91a622017-02-23 14:57:18 -08001348
Junzhe Zou2df84502017-05-26 13:20:23 -07001349 CAM_DBG(CAM_CRM, "link_hdl %x frame_id %lld, trigger %x\n",
1350 trigger_data->link_hdl,
1351 trigger_data->frame_id,
1352 trigger_data->trigger);
Sagar Gore8d91a622017-02-23 14:57:18 -08001353
Sagar Gored79f95e2017-03-14 18:32:17 -07001354 in_q = link->req.in_q;
1355
1356 mutex_lock(&link->req.lock);
1357 /*
1358 * Check if current read index is in applied state, if yes make it free
1359 * and increment read index to next slot.
Sagar Gore8d91a622017-02-23 14:57:18 -08001360 */
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001361 CAM_DBG(CAM_CRM, "link_hdl %x curent idx %d req_status %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001362 link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
Sagar Gore8d91a622017-02-23 14:57:18 -08001363
Sagar Gored79f95e2017-03-14 18:32:17 -07001364 if (link->state == CAM_CRM_LINK_STATE_ERR)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001365 CAM_WARN(CAM_CRM, "Error recovery idx %d status %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001366 in_q->rd_idx,
1367 in_q->slot[in_q->rd_idx].status);
Sagar Gore8d91a622017-02-23 14:57:18 -08001368
Sagar Gored79f95e2017-03-14 18:32:17 -07001369 if (in_q->slot[in_q->rd_idx].status == CRM_SLOT_STATUS_REQ_APPLIED) {
1370 /*
1371 * Do NOT reset req q slot data here, it can not be done
1372 * here because we need to preserve the data to handle bubble.
1373 */
1374 __cam_req_mgr_inc_idx(&in_q->rd_idx, 1, in_q->num_slots);
Sagar Gore8d91a622017-02-23 14:57:18 -08001375 }
Junzhe Zou2df84502017-05-26 13:20:23 -07001376 rc = __cam_req_mgr_process_req(link, trigger_data->trigger);
Sagar Gored79f95e2017-03-14 18:32:17 -07001377 mutex_unlock(&link->req.lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001378
1379end:
Sagar Gored79f95e2017-03-14 18:32:17 -07001380 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001381}
1382
Sagar Gore8d91a622017-02-23 14:57:18 -08001383
Sagar Gored79f95e2017-03-14 18:32:17 -07001384/* Linked devices' Callback section */
1385
1386/**
1387 * cam_req_mgr_cb_add_req()
1388 *
1389 * @brief : Drivers call this function to notify new packet is available.
1390 * @add_req : Information about new request available at a device.
1391 *
1392 * @return : 0 on success, negative in case of failure
1393 *
1394 */
1395static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
1396{
1397 int rc = 0, idx;
1398 struct crm_workq_task *task = NULL;
1399 struct cam_req_mgr_core_link *link = NULL;
1400 struct cam_req_mgr_add_request *dev_req;
1401 struct crm_task_payload *task_data;
1402
1403 if (!add_req) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001404 CAM_ERR(CAM_CRM, "sof_data is NULL");
Sagar Gored79f95e2017-03-14 18:32:17 -07001405 rc = -EINVAL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001406 goto end;
1407 }
1408
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001409 CAM_DBG(CAM_CRM, "E: dev %x dev req %lld",
1410 add_req->dev_hdl, add_req->req_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07001411 link = (struct cam_req_mgr_core_link *)
1412 cam_get_device_priv(add_req->link_hdl);
1413
1414 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001415 CAM_DBG(CAM_CRM, "link ptr NULL %x", add_req->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001416 rc = -EINVAL;
1417 goto end;
1418 }
1419
1420 /* Validate if req id is present in input queue */
1421 idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
1422 if (idx < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001423 CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07001424 rc = -ENOENT;
1425 goto end;
1426 }
1427
1428 task = cam_req_mgr_workq_get_task(link->workq);
1429 if (!task) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001430 CAM_ERR(CAM_CRM, "no empty task dev %x req %lld",
Sagar Gored79f95e2017-03-14 18:32:17 -07001431 add_req->dev_hdl, add_req->req_id);
1432 rc = -EBUSY;
1433 goto end;
1434 }
1435
1436 task_data = (struct crm_task_payload *)task->payload;
1437 task_data->type = CRM_WORKQ_TASK_DEV_ADD_REQ;
1438 dev_req = (struct cam_req_mgr_add_request *)&task_data->u;
1439 dev_req->req_id = add_req->req_id;
1440 dev_req->link_hdl = add_req->link_hdl;
1441 dev_req->dev_hdl = add_req->dev_hdl;
1442 task->process_cb = &cam_req_mgr_process_add_req;
1443 rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001444 CAM_DBG(CAM_CRM, "X: dev %x dev req %lld",
1445 add_req->dev_hdl, add_req->req_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07001446
1447end:
1448 return rc;
1449}
1450
1451/**
1452 * cam_req_mgr_cb_notify_err()
1453 *
1454 * @brief : Error received from device, sends bubble recovery
1455 * @err_info : contains information about error occurred like bubble/overflow
1456 *
1457 * @return : 0 on success, negative in case of failure
1458 *
1459 */
1460static int cam_req_mgr_cb_notify_err(
1461 struct cam_req_mgr_error_notify *err_info)
1462{
1463 int rc = 0;
1464 struct crm_workq_task *task = NULL;
1465 struct cam_req_mgr_core_link *link = NULL;
1466 struct cam_req_mgr_error_notify *notify_err;
1467 struct crm_task_payload *task_data;
1468
1469 if (!err_info) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001470 CAM_ERR(CAM_CRM, "err_info is NULL");
Sagar Gored79f95e2017-03-14 18:32:17 -07001471 rc = -EINVAL;
1472 goto end;
1473 }
1474
1475 link = (struct cam_req_mgr_core_link *)
1476 cam_get_device_priv(err_info->link_hdl);
1477 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001478 CAM_DBG(CAM_CRM, "link ptr NULL %x", err_info->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001479 rc = -EINVAL;
1480 goto end;
1481 }
1482
1483 crm_timer_reset(link->watchdog);
1484 task = cam_req_mgr_workq_get_task(link->workq);
1485 if (!task) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001486 CAM_ERR(CAM_CRM, "no empty task req_id %lld", err_info->req_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07001487 rc = -EBUSY;
1488 goto end;
1489 }
1490
1491 task_data = (struct crm_task_payload *)task->payload;
1492 task_data->type = CRM_WORKQ_TASK_NOTIFY_ERR;
1493 notify_err = (struct cam_req_mgr_error_notify *)&task_data->u;
1494 notify_err->req_id = err_info->req_id;
1495 notify_err->link_hdl = err_info->link_hdl;
1496 notify_err->dev_hdl = err_info->dev_hdl;
1497 notify_err->error = err_info->error;
1498 task->process_cb = &cam_req_mgr_process_error;
1499 rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
1500
1501end:
1502 return rc;
1503}
1504
1505/**
Junzhe Zou2df84502017-05-26 13:20:23 -07001506 * cam_req_mgr_cb_notify_trigger()
Sagar Gored79f95e2017-03-14 18:32:17 -07001507 *
1508 * @brief : SOF received from device, sends trigger through workqueue
1509 * @sof_data: contains information about frame_id, link etc.
1510 *
1511 * @return : 0 on success
1512 *
1513 */
Junzhe Zou2df84502017-05-26 13:20:23 -07001514static int cam_req_mgr_cb_notify_trigger(
1515 struct cam_req_mgr_trigger_notify *trigger_data)
Sagar Gored79f95e2017-03-14 18:32:17 -07001516{
1517 int rc = 0;
1518 struct crm_workq_task *task = NULL;
1519 struct cam_req_mgr_core_link *link = NULL;
Junzhe Zou2df84502017-05-26 13:20:23 -07001520 struct cam_req_mgr_trigger_notify *notify_trigger;
Sagar Gored79f95e2017-03-14 18:32:17 -07001521 struct crm_task_payload *task_data;
1522
Junzhe Zou2df84502017-05-26 13:20:23 -07001523 if (!trigger_data) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001524 CAM_ERR(CAM_CRM, "sof_data is NULL");
Sagar Gored79f95e2017-03-14 18:32:17 -07001525 rc = -EINVAL;
1526 goto end;
1527 }
Sagar Gore8d91a622017-02-23 14:57:18 -08001528
1529 link = (struct cam_req_mgr_core_link *)
Junzhe Zou2df84502017-05-26 13:20:23 -07001530 cam_get_device_priv(trigger_data->link_hdl);
Sagar Gore8d91a622017-02-23 14:57:18 -08001531 if (!link) {
Junzhe Zou2df84502017-05-26 13:20:23 -07001532 CAM_DBG(CAM_CRM, "link ptr NULL %x", trigger_data->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001533 rc = -EINVAL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001534 goto end;
Sagar Gore8d91a622017-02-23 14:57:18 -08001535 }
1536
Sagar Gored79f95e2017-03-14 18:32:17 -07001537 crm_timer_reset(link->watchdog);
Sagar Gore8d91a622017-02-23 14:57:18 -08001538 task = cam_req_mgr_workq_get_task(link->workq);
1539 if (!task) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001540 CAM_ERR(CAM_CRM, "no empty task frame %lld",
Junzhe Zou2df84502017-05-26 13:20:23 -07001541 trigger_data->frame_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07001542 rc = -EBUSY;
Sagar Gore8d91a622017-02-23 14:57:18 -08001543 goto end;
1544 }
Sagar Gored79f95e2017-03-14 18:32:17 -07001545 task_data = (struct crm_task_payload *)task->payload;
1546 task_data->type = CRM_WORKQ_TASK_NOTIFY_SOF;
Junzhe Zou2df84502017-05-26 13:20:23 -07001547 notify_trigger = (struct cam_req_mgr_trigger_notify *)&task_data->u;
1548 notify_trigger->frame_id = trigger_data->frame_id;
1549 notify_trigger->link_hdl = trigger_data->link_hdl;
1550 notify_trigger->dev_hdl = trigger_data->dev_hdl;
1551 notify_trigger->trigger = trigger_data->trigger;
1552 task->process_cb = &cam_req_mgr_process_trigger;
Sagar Gored79f95e2017-03-14 18:32:17 -07001553 rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
Sagar Gore8d91a622017-02-23 14:57:18 -08001554
1555end:
Sagar Gored79f95e2017-03-14 18:32:17 -07001556 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001557}
1558
Sagar Gored79f95e2017-03-14 18:32:17 -07001559static struct cam_req_mgr_crm_cb cam_req_mgr_ops = {
Junzhe Zou2df84502017-05-26 13:20:23 -07001560 .notify_trigger = cam_req_mgr_cb_notify_trigger,
1561 .notify_err = cam_req_mgr_cb_notify_err,
1562 .add_req = cam_req_mgr_cb_add_req,
Sagar Gored79f95e2017-03-14 18:32:17 -07001563};
1564
Sagar Gore8d91a622017-02-23 14:57:18 -08001565/**
Sagar Gored79f95e2017-03-14 18:32:17 -07001566 * __cam_req_mgr_setup_link_info()
Sagar Gore8d91a622017-02-23 14:57:18 -08001567 *
Sagar Gored79f95e2017-03-14 18:32:17 -07001568 * @brief : Sets up input queue, create pd based tables, communicate with
1569 * devs connected on this link and setup communication.
1570 * @link : pointer to link to setup
1571 * @link_info : link_info coming from CSL to prepare link
Sagar Gore8d91a622017-02-23 14:57:18 -08001572 *
Sagar Gored79f95e2017-03-14 18:32:17 -07001573 * @return : 0 on success, negative in case of failure
1574 *
Sagar Gore8d91a622017-02-23 14:57:18 -08001575 */
Sagar Gored79f95e2017-03-14 18:32:17 -07001576static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
1577 struct cam_req_mgr_link_info *link_info)
Sagar Gore8d91a622017-02-23 14:57:18 -08001578{
Sagar Gored79f95e2017-03-14 18:32:17 -07001579 int rc = 0, i = 0;
1580 struct cam_req_mgr_core_dev_link_setup link_data;
1581 struct cam_req_mgr_connected_device *dev;
1582 struct cam_req_mgr_req_tbl *pd_tbl;
1583 enum cam_pipeline_delay max_delay;
Junzhe Zou2df84502017-05-26 13:20:23 -07001584 uint32_t subscribe_event = 0;
Sagar Gore8d91a622017-02-23 14:57:18 -08001585
Sagar Gored79f95e2017-03-14 18:32:17 -07001586 if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES)
1587 return -EPERM;
Sagar Gore8d91a622017-02-23 14:57:18 -08001588
Sagar Gored79f95e2017-03-14 18:32:17 -07001589 mutex_init(&link->req.lock);
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001590 CAM_DBG(CAM_CRM, "LOCK_DBG in_q lock %pK", &link->req.lock);
Sagar Gored79f95e2017-03-14 18:32:17 -07001591 link->req.num_tbl = 0;
1592
1593 rc = __cam_req_mgr_setup_in_q(&link->req);
1594 if (rc < 0)
1595 return rc;
1596
1597 mutex_lock(&link->lock);
1598 max_delay = CAM_PIPELINE_DELAY_0;
1599 for (i = 0; i < link_info->num_devices; i++) {
1600 dev = &link->l_dev[i];
1601 /* Using dev hdl, get ops ptr to communicate with device */
1602 dev->ops = (struct cam_req_mgr_kmd_ops *)
1603 cam_get_device_ops(link_info->dev_hdls[i]);
1604 if (!dev->ops ||
1605 !dev->ops->get_dev_info ||
1606 !dev->ops->link_setup) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001607 CAM_ERR(CAM_CRM, "FATAL: device ops NULL");
Sagar Gored79f95e2017-03-14 18:32:17 -07001608 rc = -ENXIO;
1609 goto error;
Sagar Gore8d91a622017-02-23 14:57:18 -08001610 }
Sagar Gored79f95e2017-03-14 18:32:17 -07001611 dev->dev_hdl = link_info->dev_hdls[i];
1612 dev->parent = (void *)link;
1613 dev->dev_info.dev_hdl = dev->dev_hdl;
1614 rc = dev->ops->get_dev_info(&dev->dev_info);
Gregory Bergschneider60679932017-07-19 15:27:16 -06001615
1616 trace_cam_req_mgr_connect_device(link, &dev->dev_info);
1617
Junzhe Zou2df84502017-05-26 13:20:23 -07001618 CAM_DBG(CAM_CRM,
1619 "%x: connected: %s, id %d, delay %d, trigger %x",
Sagar Gored79f95e2017-03-14 18:32:17 -07001620 link_info->session_hdl, dev->dev_info.name,
Junzhe Zou2df84502017-05-26 13:20:23 -07001621 dev->dev_info.dev_id, dev->dev_info.p_delay,
1622 dev->dev_info.trigger);
Sagar Gored79f95e2017-03-14 18:32:17 -07001623 if (rc < 0 ||
1624 dev->dev_info.p_delay >=
1625 CAM_PIPELINE_DELAY_MAX ||
1626 dev->dev_info.p_delay <
1627 CAM_PIPELINE_DELAY_0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001628 CAM_ERR(CAM_CRM, "get device info failed");
Sagar Gored79f95e2017-03-14 18:32:17 -07001629 goto error;
1630 } else {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001631 CAM_DBG(CAM_CRM, "%x: connected: %s, delay %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001632 link_info->session_hdl,
1633 dev->dev_info.name,
1634 dev->dev_info.p_delay);
Junzhe Zou2df84502017-05-26 13:20:23 -07001635 if (dev->dev_info.p_delay > max_delay)
1636 max_delay = dev->dev_info.p_delay;
1637
1638 subscribe_event |= (uint32_t)dev->dev_info.trigger;
Sagar Gored79f95e2017-03-14 18:32:17 -07001639 }
Sagar Gore8d91a622017-02-23 14:57:18 -08001640 }
1641
Junzhe Zou2df84502017-05-26 13:20:23 -07001642 link->subscribe_event = subscribe_event;
Sagar Gored79f95e2017-03-14 18:32:17 -07001643 link_data.link_enable = 1;
1644 link_data.link_hdl = link->link_hdl;
1645 link_data.crm_cb = &cam_req_mgr_ops;
1646 link_data.max_delay = max_delay;
Junzhe Zou2df84502017-05-26 13:20:23 -07001647 link_data.subscribe_event = subscribe_event;
Sagar Gored79f95e2017-03-14 18:32:17 -07001648
1649 for (i = 0; i < link_info->num_devices; i++) {
1650 dev = &link->l_dev[i];
1651
1652 link_data.dev_hdl = dev->dev_hdl;
1653 /*
1654 * For unique pipeline delay table create request
1655 * tracking table
1656 */
1657 if (link->pd_mask & (1 << dev->dev_info.p_delay)) {
1658 pd_tbl = __cam_req_mgr_find_pd_tbl(link->req.l_tbl,
1659 dev->dev_info.p_delay);
1660 if (!pd_tbl) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001661 CAM_ERR(CAM_CRM, "pd %d tbl not found",
Sagar Gored79f95e2017-03-14 18:32:17 -07001662 dev->dev_info.p_delay);
1663 rc = -ENXIO;
1664 goto error;
1665 }
1666 } else {
1667 pd_tbl = __cam_req_mgr_create_pd_tbl(
1668 dev->dev_info.p_delay);
1669 if (pd_tbl == NULL) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001670 CAM_ERR(CAM_CRM, "create new pd tbl failed");
Sagar Gored79f95e2017-03-14 18:32:17 -07001671 rc = -ENXIO;
1672 goto error;
1673 }
1674 pd_tbl->pd = dev->dev_info.p_delay;
1675 link->pd_mask |= (1 << pd_tbl->pd);
1676 /*
1677 * Add table to list and also sort list
1678 * from max pd to lowest
1679 */
1680 __cam_req_mgr_add_tbl_to_link(&link->req.l_tbl, pd_tbl);
1681 }
1682 dev->dev_bit = pd_tbl->dev_count++;
1683 dev->pd_tbl = pd_tbl;
1684 pd_tbl->dev_mask |= (1 << dev->dev_bit);
1685
1686 /* Communicate with dev to establish the link */
1687 dev->ops->link_setup(&link_data);
1688
1689 if (link->max_delay < dev->dev_info.p_delay)
1690 link->max_delay = dev->dev_info.p_delay;
1691 }
1692 link->num_devs = link_info->num_devices;
1693
1694 /* Assign id for pd tables */
1695 __cam_req_mgr_tbl_set_id(link->req.l_tbl, &link->req);
1696
1697 /* At start, expect max pd devices, all are in skip state */
1698 __cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
1699
1700 mutex_unlock(&link->lock);
1701 return 0;
1702
1703error:
1704 __cam_req_mgr_destroy_link_info(link);
1705 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001706}
1707
Sagar Gored79f95e2017-03-14 18:32:17 -07001708/* IOCTLs handling section */
Sagar Gore8d91a622017-02-23 14:57:18 -08001709int cam_req_mgr_create_session(
1710 struct cam_req_mgr_session_info *ses_info)
1711{
Sagar Gored79f95e2017-03-14 18:32:17 -07001712 int rc = 0;
1713 int32_t session_hdl;
1714 struct cam_req_mgr_core_session *cam_session = NULL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001715
1716 if (!ses_info) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001717 CAM_DBG(CAM_CRM, "NULL session info pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08001718 return -EINVAL;
1719 }
1720 mutex_lock(&g_crm_core_dev->crm_lock);
1721 cam_session = (struct cam_req_mgr_core_session *)
1722 kzalloc(sizeof(*cam_session), GFP_KERNEL);
1723 if (!cam_session) {
Sagar Gored79f95e2017-03-14 18:32:17 -07001724 rc = -ENOMEM;
Sagar Gore8d91a622017-02-23 14:57:18 -08001725 goto end;
1726 }
1727
1728 session_hdl = cam_create_session_hdl((void *)cam_session);
1729 if (session_hdl < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001730 CAM_ERR(CAM_CRM, "unable to create session_hdl = %x",
1731 session_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001732 rc = session_hdl;
1733 kfree(cam_session);
1734 goto end;
Sagar Gore8d91a622017-02-23 14:57:18 -08001735 }
1736 ses_info->session_hdl = session_hdl;
Sagar Gored79f95e2017-03-14 18:32:17 -07001737
1738 mutex_init(&cam_session->lock);
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001739 CAM_DBG(CAM_CRM, "LOCK_DBG session lock %pK", &cam_session->lock);
Sagar Gored79f95e2017-03-14 18:32:17 -07001740
1741 mutex_lock(&cam_session->lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001742 cam_session->session_hdl = session_hdl;
Sagar Gored79f95e2017-03-14 18:32:17 -07001743 cam_session->num_links = 0;
Sagar Gore8d91a622017-02-23 14:57:18 -08001744 list_add(&cam_session->entry, &g_crm_core_dev->session_head);
Sagar Gored79f95e2017-03-14 18:32:17 -07001745 mutex_unlock(&cam_session->lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001746end:
1747 mutex_unlock(&g_crm_core_dev->crm_lock);
Sagar Gored79f95e2017-03-14 18:32:17 -07001748 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001749}
1750
1751int cam_req_mgr_destroy_session(
1752 struct cam_req_mgr_session_info *ses_info)
1753{
Sagar Gored79f95e2017-03-14 18:32:17 -07001754 int rc;
1755 struct cam_req_mgr_core_session *cam_session = NULL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001756
1757 if (!ses_info) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001758 CAM_DBG(CAM_CRM, "NULL session info pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08001759 return -EINVAL;
1760 }
1761
1762 mutex_lock(&g_crm_core_dev->crm_lock);
1763 cam_session = (struct cam_req_mgr_core_session *)
1764 cam_get_device_priv(ses_info->session_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001765 if (!cam_session) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001766 CAM_ERR(CAM_CRM, "failed to get session priv");
Sagar Gored79f95e2017-03-14 18:32:17 -07001767 rc = -ENOENT;
Sagar Gore8d91a622017-02-23 14:57:18 -08001768 goto end;
1769
1770 }
Sagar Gored79f95e2017-03-14 18:32:17 -07001771 mutex_lock(&cam_session->lock);
1772 if (cam_session->num_links) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001773 CAM_ERR(CAM_CRM, "destroy session %x num_active_links %d",
Sagar Gore8d91a622017-02-23 14:57:18 -08001774 ses_info->session_hdl,
Sagar Gored79f95e2017-03-14 18:32:17 -07001775 cam_session->num_links);
1776 /* @TODO : Go through active links and destroy ? */
Sagar Gore8d91a622017-02-23 14:57:18 -08001777 }
1778 list_del(&cam_session->entry);
Sagar Gored79f95e2017-03-14 18:32:17 -07001779 mutex_unlock(&cam_session->lock);
1780 mutex_destroy(&cam_session->lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001781 kfree(cam_session);
1782
Sagar Gored79f95e2017-03-14 18:32:17 -07001783 rc = cam_destroy_session_hdl(ses_info->session_hdl);
1784 if (rc < 0)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001785 CAM_ERR(CAM_CRM, "unable to destroy session_hdl = %x rc %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001786 ses_info->session_hdl, rc);
Sagar Gore8d91a622017-02-23 14:57:18 -08001787
1788end:
1789 mutex_unlock(&g_crm_core_dev->crm_lock);
Sagar Gored79f95e2017-03-14 18:32:17 -07001790 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001791}
1792
1793int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
1794{
Sagar Gored79f95e2017-03-14 18:32:17 -07001795 int rc = 0;
1796 char buf[128];
1797 struct cam_create_dev_hdl root_dev;
1798 struct cam_req_mgr_core_session *cam_session;
1799 struct cam_req_mgr_core_link *link;
Sagar Gore8d91a622017-02-23 14:57:18 -08001800
1801 if (!link_info) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001802 CAM_DBG(CAM_CRM, "NULL pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08001803 return -EINVAL;
1804 }
Sagar Gore8d91a622017-02-23 14:57:18 -08001805 if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001806 CAM_ERR(CAM_CRM, "Invalid num devices %d",
1807 link_info->num_devices);
Sagar Gore8d91a622017-02-23 14:57:18 -08001808 return -EINVAL;
1809 }
1810
Sagar Gored79f95e2017-03-14 18:32:17 -07001811 /* session hdl's priv data is cam session struct */
Sagar Gore8d91a622017-02-23 14:57:18 -08001812 cam_session = (struct cam_req_mgr_core_session *)
1813 cam_get_device_priv(link_info->session_hdl);
1814 if (!cam_session) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001815 CAM_DBG(CAM_CRM, "NULL pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08001816 return -EINVAL;
1817 }
1818
Sagar Gored79f95e2017-03-14 18:32:17 -07001819 mutex_lock(&g_crm_core_dev->crm_lock);
1820
1821 /* Allocate link struct and map it with session's request queue */
1822 link = __cam_req_mgr_reserve_link(cam_session);
Sagar Gore8d91a622017-02-23 14:57:18 -08001823 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001824 CAM_ERR(CAM_CRM, "failed to reserve new link");
Sagar Gored79f95e2017-03-14 18:32:17 -07001825 mutex_unlock(&g_crm_core_dev->crm_lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001826 return -EINVAL;
1827 }
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001828 CAM_DBG(CAM_CRM, "link reserved %pK %x", link, link->link_hdl);
Sagar Gore8d91a622017-02-23 14:57:18 -08001829
1830 memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
1831 root_dev.session_hdl = link_info->session_hdl;
1832 root_dev.priv = (void *)link;
1833
Sagar Gored79f95e2017-03-14 18:32:17 -07001834 mutex_lock(&link->lock);
1835 /* Create unique dev handle for link */
1836 link->link_hdl = cam_create_device_hdl(&root_dev);
1837 if (link->link_hdl < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001838 CAM_ERR(CAM_CRM,
1839 "Insufficient memory to create new device handle");
Sagar Gored79f95e2017-03-14 18:32:17 -07001840 mutex_unlock(&link->lock);
1841 rc = link->link_hdl;
Sagar Gore8d91a622017-02-23 14:57:18 -08001842 goto link_hdl_fail;
1843 }
Sagar Gored79f95e2017-03-14 18:32:17 -07001844 mutex_unlock(&link->lock);
1845 link_info->link_hdl = link->link_hdl;
Sagar Gore8d91a622017-02-23 14:57:18 -08001846
Sagar Gored79f95e2017-03-14 18:32:17 -07001847 /* Allocate memory to hold data of all linked devs */
1848 rc = __cam_req_mgr_create_subdevs(&link->l_dev,
1849 link_info->num_devices);
1850 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001851 CAM_ERR(CAM_CRM,
1852 "Insufficient memory to create new crm subdevs");
Sagar Gore8d91a622017-02-23 14:57:18 -08001853 goto create_subdev_failed;
1854 }
1855
Sagar Gored79f95e2017-03-14 18:32:17 -07001856 /* Using device ops query connected devs, prepare request tables */
1857 rc = __cam_req_mgr_setup_link_info(link, link_info);
1858 if (rc < 0)
1859 goto setup_failed;
Sagar Gore8d91a622017-02-23 14:57:18 -08001860
Sagar Gored79f95e2017-03-14 18:32:17 -07001861 mutex_lock(&link->lock);
1862 link->state = CAM_CRM_LINK_STATE_READY;
1863 mutex_unlock(&link->lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001864
1865 /* Create worker for current link */
Sagar Gored79f95e2017-03-14 18:32:17 -07001866 snprintf(buf, sizeof(buf), "%x-%x",
1867 link_info->session_hdl, link->link_hdl);
Sagar Gore9f404712017-05-22 16:57:25 -07001868 rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS,
1869 &link->workq, CRM_WORKQ_USAGE_NON_IRQ);
Sagar Gored79f95e2017-03-14 18:32:17 -07001870 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001871 CAM_ERR(CAM_CRM, "FATAL: unable to create worker");
Sagar Gored79f95e2017-03-14 18:32:17 -07001872 __cam_req_mgr_destroy_link_info(link);
1873 goto setup_failed;
Sagar Gore8d91a622017-02-23 14:57:18 -08001874 }
1875
Sagar Gored79f95e2017-03-14 18:32:17 -07001876 /* Assign payload to workqueue tasks */
1877 rc = __cam_req_mgr_setup_payload(link->workq);
1878 if (rc < 0) {
1879 __cam_req_mgr_destroy_link_info(link);
1880 cam_req_mgr_workq_destroy(&link->workq);
1881 goto setup_failed;
1882 }
Sagar Gore8d91a622017-02-23 14:57:18 -08001883
Sagar Gored79f95e2017-03-14 18:32:17 -07001884 /* Start watchdong timer to detect if camera hw goes into bad state */
1885 rc = crm_timer_init(&link->watchdog, CAM_REQ_MGR_WATCHDOG_TIMEOUT,
1886 link, &__cam_req_mgr_sof_freeze);
1887 if (rc < 0) {
1888 kfree(link->workq->task.pool[0].payload);
1889 __cam_req_mgr_destroy_link_info(link);
1890 cam_req_mgr_workq_destroy(&link->workq);
1891 goto setup_failed;
1892 }
Sagar Gore8d91a622017-02-23 14:57:18 -08001893
Sagar Gored79f95e2017-03-14 18:32:17 -07001894 mutex_unlock(&g_crm_core_dev->crm_lock);
1895 return rc;
1896setup_failed:
1897 __cam_req_mgr_destroy_subdev(link->l_dev);
Sagar Gore8d91a622017-02-23 14:57:18 -08001898create_subdev_failed:
Sagar Gored79f95e2017-03-14 18:32:17 -07001899 cam_destroy_device_hdl(link->link_hdl);
1900 link_info->link_hdl = 0;
Sagar Gore8d91a622017-02-23 14:57:18 -08001901link_hdl_fail:
Sagar Gored79f95e2017-03-14 18:32:17 -07001902 mutex_lock(&link->lock);
1903 link->state = CAM_CRM_LINK_STATE_AVAILABLE;
1904 mutex_unlock(&link->lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001905
Sagar Gored79f95e2017-03-14 18:32:17 -07001906 mutex_unlock(&g_crm_core_dev->crm_lock);
1907 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001908}
1909
1910int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
1911{
Sagar Gored79f95e2017-03-14 18:32:17 -07001912 int rc = 0;
Sagar Gore8d91a622017-02-23 14:57:18 -08001913 struct cam_req_mgr_core_session *cam_session;
Sagar Gored79f95e2017-03-14 18:32:17 -07001914 struct cam_req_mgr_core_link *link;
Sagar Gore8d91a622017-02-23 14:57:18 -08001915
1916 if (!unlink_info) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001917 CAM_ERR(CAM_CRM, "NULL pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08001918 return -EINVAL;
1919 }
Sagar Gored79f95e2017-03-14 18:32:17 -07001920
1921 mutex_lock(&g_crm_core_dev->crm_lock);
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001922 CAM_DBG(CAM_CRM, "link_hdl %x", unlink_info->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001923
1924 /* session hdl's priv data is cam session struct */
Sagar Gore8d91a622017-02-23 14:57:18 -08001925 cam_session = (struct cam_req_mgr_core_session *)
Sagar Gored79f95e2017-03-14 18:32:17 -07001926 cam_get_device_priv(unlink_info->session_hdl);
Sagar Gore8d91a622017-02-23 14:57:18 -08001927 if (!cam_session) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001928 CAM_ERR(CAM_CRM, "NULL pointer");
Sagar Gored79f95e2017-03-14 18:32:17 -07001929 mutex_unlock(&g_crm_core_dev->crm_lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001930 return -EINVAL;
1931 }
1932
Sagar Gored79f95e2017-03-14 18:32:17 -07001933 /* link hdl's priv data is core_link struct */
1934 link = cam_get_device_priv(unlink_info->link_hdl);
Sagar Gore8d91a622017-02-23 14:57:18 -08001935 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001936 CAM_ERR(CAM_CRM, "NULL pointer");
Sagar Gored79f95e2017-03-14 18:32:17 -07001937 mutex_unlock(&g_crm_core_dev->crm_lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001938 return -EINVAL;
1939 }
Sagar Gored79f95e2017-03-14 18:32:17 -07001940 __cam_req_mgr_print_req_tbl(&link->req);
Sagar Gore8d91a622017-02-23 14:57:18 -08001941
Sagar Gored79f95e2017-03-14 18:32:17 -07001942 /* Destroy workq payload data */
1943 kfree(link->workq->task.pool[0].payload);
1944 link->workq->task.pool[0].payload = NULL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001945
Sagar Gored79f95e2017-03-14 18:32:17 -07001946 /* Destroy workq and timer of link */
1947 crm_timer_exit(&link->watchdog);
Sagar Gore8d91a622017-02-23 14:57:18 -08001948
Sagar Gored79f95e2017-03-14 18:32:17 -07001949 cam_req_mgr_workq_destroy(&link->workq);
1950
1951 /* Cleanuprequest tables */
1952 __cam_req_mgr_destroy_link_info(link);
1953
1954 /* Free memory holding data of linked devs */
1955 __cam_req_mgr_destroy_subdev(link->l_dev);
1956
1957 /* Destroy the link handle */
1958 rc = cam_destroy_device_hdl(unlink_info->link_hdl);
1959 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001960 CAM_ERR(CAM_CRM, "error while destroying dev handle %d %x",
Sagar Gored79f95e2017-03-14 18:32:17 -07001961 rc, link->link_hdl);
Sagar Gore8d91a622017-02-23 14:57:18 -08001962 }
1963
Sagar Gored79f95e2017-03-14 18:32:17 -07001964 /* Free curent link and put back into session's free pool of links */
1965 __cam_req_mgr_unreserve_link(cam_session, &link);
1966 mutex_unlock(&g_crm_core_dev->crm_lock);
1967
1968 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001969}
1970
1971int cam_req_mgr_schedule_request(
1972 struct cam_req_mgr_sched_request *sched_req)
1973{
Sagar Gored79f95e2017-03-14 18:32:17 -07001974 int rc = 0;
Sagar Gored79f95e2017-03-14 18:32:17 -07001975 struct cam_req_mgr_core_link *link = NULL;
1976 struct cam_req_mgr_core_session *session = NULL;
1977 struct cam_req_mgr_sched_request *sched;
Sagar Goreb56c81e2017-05-08 17:15:47 -07001978 struct crm_task_payload task_data;
Sagar Gored79f95e2017-03-14 18:32:17 -07001979
Sagar Gore8d91a622017-02-23 14:57:18 -08001980 if (!sched_req) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001981 CAM_ERR(CAM_CRM, "csl_req is NULL");
Sagar Gored79f95e2017-03-14 18:32:17 -07001982 rc = -EINVAL;
1983 goto end;
1984 }
1985
1986 link = (struct cam_req_mgr_core_link *)
1987 cam_get_device_priv(sched_req->link_hdl);
1988 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001989 CAM_DBG(CAM_CRM, "link ptr NULL %x", sched_req->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001990 return -EINVAL;
1991 }
1992 session = (struct cam_req_mgr_core_session *)link->parent;
1993 if (!session) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001994 CAM_WARN(CAM_CRM, "session ptr NULL %x", sched_req->link_hdl);
Sagar Gore8d91a622017-02-23 14:57:18 -08001995 return -EINVAL;
1996 }
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001997 CAM_DBG(CAM_CRM, "link %x req %lld",
1998 sched_req->link_hdl, sched_req->req_id);
Sagar Gore8d91a622017-02-23 14:57:18 -08001999
Sagar Goreb56c81e2017-05-08 17:15:47 -07002000 task_data.type = CRM_WORKQ_TASK_SCHED_REQ;
2001 sched = (struct cam_req_mgr_sched_request *)&task_data.u;
Sagar Gored79f95e2017-03-14 18:32:17 -07002002 sched->req_id = sched_req->req_id;
2003 sched->link_hdl = sched_req->link_hdl;
2004 if (session->force_err_recovery == AUTO_RECOVERY) {
2005 sched->bubble_enable = sched_req->bubble_enable;
2006 } else {
2007 sched->bubble_enable =
2008 (session->force_err_recovery == FORCE_ENABLE_RECOVERY) ? 1 : 0;
2009 }
Sagar Gored79f95e2017-03-14 18:32:17 -07002010
Sagar Goreb56c81e2017-05-08 17:15:47 -07002011 rc = cam_req_mgr_process_sched_req(link, &task_data);
2012
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002013 CAM_DBG(CAM_CRM, "DONE dev %x req %lld",
2014 sched_req->link_hdl, sched_req->req_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07002015end:
2016 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08002017}
2018
Sagar Gored79f95e2017-03-14 18:32:17 -07002019int cam_req_mgr_sync_link(
2020 struct cam_req_mgr_sync_mode *sync_links)
Sagar Gore8d91a622017-02-23 14:57:18 -08002021{
2022 if (!sync_links) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002023 CAM_ERR(CAM_CRM, "NULL pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08002024 return -EINVAL;
2025 }
2026
2027 /* This function handles ioctl, implementation pending */
2028 return 0;
2029}
2030
2031int cam_req_mgr_flush_requests(
Sagar Gored79f95e2017-03-14 18:32:17 -07002032 struct cam_req_mgr_flush_info *flush_info)
Sagar Gore8d91a622017-02-23 14:57:18 -08002033{
Sagar Gored79f95e2017-03-14 18:32:17 -07002034 int rc = 0;
2035 struct crm_workq_task *task = NULL;
2036 struct cam_req_mgr_core_link *link = NULL;
2037 struct cam_req_mgr_flush_info *flush;
2038 struct crm_task_payload *task_data;
2039 struct cam_req_mgr_core_session *session = NULL;
2040
Sagar Gore8d91a622017-02-23 14:57:18 -08002041 if (!flush_info) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002042 CAM_ERR(CAM_CRM, "flush req is NULL");
Sagar Gored79f95e2017-03-14 18:32:17 -07002043 rc = -EFAULT;
2044 goto end;
2045 }
2046 if (flush_info->flush_type >= CAM_REQ_MGR_FLUSH_TYPE_MAX) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002047 CAM_ERR(CAM_CRM, "incorrect flush type %x",
2048 flush_info->flush_type);
Sagar Gored79f95e2017-03-14 18:32:17 -07002049 rc = -EINVAL;
2050 goto end;
Sagar Gore8d91a622017-02-23 14:57:18 -08002051 }
2052
Sagar Gored79f95e2017-03-14 18:32:17 -07002053 /* session hdl's priv data is cam session struct */
2054 session = (struct cam_req_mgr_core_session *)
2055 cam_get_device_priv(flush_info->session_hdl);
2056 if (!session) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002057 CAM_ERR(CAM_CRM, "Invalid session %x", flush_info->session_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07002058 rc = -EINVAL;
2059 goto end;
2060 }
2061 if (session->num_links <= 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002062 CAM_WARN(CAM_CRM, "No active links in session %x",
Sagar Gored79f95e2017-03-14 18:32:17 -07002063 flush_info->session_hdl);
2064 goto end;
2065 }
2066
2067 link = (struct cam_req_mgr_core_link *)
2068 cam_get_device_priv(flush_info->link_hdl);
2069 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002070 CAM_DBG(CAM_CRM, "link ptr NULL %x", flush_info->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07002071 rc = -EINVAL;
2072 goto end;
2073 }
2074
2075 task = cam_req_mgr_workq_get_task(link->workq);
2076 if (!task) {
2077 rc = -ENOMEM;
2078 goto end;
2079 }
2080
2081 task_data = (struct crm_task_payload *)task->payload;
2082 task_data->type = CRM_WORKQ_TASK_FLUSH_REQ;
2083 flush = (struct cam_req_mgr_flush_info *)&task_data->u;
2084 flush->req_id = flush_info->req_id;
2085 flush->link_hdl = flush_info->link_hdl;
2086 flush->flush_type = flush_info->flush_type;
2087 task->process_cb = &cam_req_mgr_process_flush_req;
2088 rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
2089
2090 /* Blocking call */
2091 init_completion(&link->workq_comp);
2092 rc = wait_for_completion_timeout(
2093 &link->workq_comp,
2094 msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
2095end:
2096 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08002097}
2098
2099
2100int cam_req_mgr_core_device_init(void)
2101{
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002102 CAM_DBG(CAM_CRM, "Enter g_crm_core_dev %pK", g_crm_core_dev);
Sagar Gore8d91a622017-02-23 14:57:18 -08002103
2104 if (g_crm_core_dev) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002105 CAM_WARN(CAM_CRM, "core device is already initialized");
Sagar Gore8d91a622017-02-23 14:57:18 -08002106 return 0;
2107 }
2108 g_crm_core_dev = (struct cam_req_mgr_core_device *)
2109 kzalloc(sizeof(*g_crm_core_dev), GFP_KERNEL);
2110 if (!g_crm_core_dev)
2111 return -ENOMEM;
2112
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002113 CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
Sagar Gore8d91a622017-02-23 14:57:18 -08002114 INIT_LIST_HEAD(&g_crm_core_dev->session_head);
2115 mutex_init(&g_crm_core_dev->crm_lock);
Sagar Gored79f95e2017-03-14 18:32:17 -07002116 cam_req_mgr_debug_register(g_crm_core_dev);
Sagar Gore8d91a622017-02-23 14:57:18 -08002117
2118 return 0;
2119}
2120
2121int cam_req_mgr_core_device_deinit(void)
2122{
2123 if (!g_crm_core_dev) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002124 CAM_ERR(CAM_CRM, "NULL pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08002125 return -EINVAL;
2126 }
2127
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002128 CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
Sagar Gore8d91a622017-02-23 14:57:18 -08002129 mutex_destroy(&g_crm_core_dev->crm_lock);
2130 kfree(g_crm_core_dev);
2131 g_crm_core_dev = NULL;
2132
2133 return 0;
2134}