blob: 96d5b6e991d0bf169f86b3e40866d3bfee8b1221 [file] [log] [blame]
Sagar Gore8d91a622017-02-23 14:57:18 -08001/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/of_platform.h>
15#include <linux/slab.h>
16#include <linux/mutex.h>
17#include "cam_req_mgr_interface.h"
18#include "cam_req_mgr_util.h"
19#include "cam_req_mgr_core.h"
20#include "cam_req_mgr_workq.h"
Sagar Gored79f95e2017-03-14 18:32:17 -070021#include "cam_req_mgr_debug.h"
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -060022#include "cam_trace.h"
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070023#include "cam_debug_util.h"
Sagar Gore8d91a622017-02-23 14:57:18 -080024
25static struct cam_req_mgr_core_device *g_crm_core_dev;
26
Sagar Gored79f95e2017-03-14 18:32:17 -070027static int __cam_req_mgr_setup_payload(struct cam_req_mgr_core_workq *workq)
28{
29 int32_t i = 0;
30 int rc = 0;
31 struct crm_task_payload *task_data = NULL;
32
33 task_data = kcalloc(
34 workq->task.num_task, sizeof(*task_data),
35 GFP_KERNEL);
36 if (!task_data) {
37 rc = -ENOMEM;
38 } else {
39 for (i = 0; i < workq->task.num_task; i++)
40 workq->task.pool[i].payload = &task_data[i];
41 }
42
43 return rc;
44}
Sagar Gore8d91a622017-02-23 14:57:18 -080045
46/**
Sagar Gored79f95e2017-03-14 18:32:17 -070047 * __cam_req_mgr_reset_req_tbl()
Sagar Gore8d91a622017-02-23 14:57:18 -080048 *
Sagar Gored79f95e2017-03-14 18:32:17 -070049 * @brief : Initialize req table data
50 * @in_q : request queue pointer
Sagar Gore8d91a622017-02-23 14:57:18 -080051 *
Sagar Gored79f95e2017-03-14 18:32:17 -070052 * @return: 0 for success, negative for failure
53 *
Sagar Gore8d91a622017-02-23 14:57:18 -080054 */
Sagar Gored79f95e2017-03-14 18:32:17 -070055static int __cam_req_mgr_print_req_tbl(struct cam_req_mgr_req_data *req)
Sagar Gore8d91a622017-02-23 14:57:18 -080056{
Sagar Gored79f95e2017-03-14 18:32:17 -070057 int rc = 0;
58 int32_t i = 0;
59 struct cam_req_mgr_req_queue *in_q = req->in_q;
60 struct cam_req_mgr_req_tbl *req_tbl = req->l_tbl;
61
62 if (!in_q || !req_tbl) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070063 CAM_WARN(CAM_CRM, "NULL pointer %pK %pK", in_q, req_tbl);
Sagar Gored79f95e2017-03-14 18:32:17 -070064 return -EINVAL;
65 }
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070066 CAM_DBG(CAM_CRM, "in_q %pK %pK %d", in_q, req_tbl, req_tbl->num_slots);
Sagar Gored79f95e2017-03-14 18:32:17 -070067 mutex_lock(&req->lock);
68 for (i = 0; i < in_q->num_slots; i++) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070069 CAM_DBG(CAM_CRM, "IN_Q %d: idx %d, red_id %lld", i,
Sagar Gored79f95e2017-03-14 18:32:17 -070070 in_q->slot[i].idx, CRM_GET_REQ_ID(in_q, i));
71 }
72
73 while (req_tbl != NULL) {
74 for (i = 0; i < req_tbl->num_slots; i++) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070075 CAM_DBG(CAM_CRM, "idx= %d, map= %x, state= %d",
Sagar Gored79f95e2017-03-14 18:32:17 -070076 req_tbl->slot[i].idx,
77 req_tbl->slot[i].req_ready_map,
78 req_tbl->slot[i].state);
79 }
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070080 CAM_DBG(CAM_CRM,
81 "TBL:id= %d, pd=%d cnt=%d mask=%x skip=%d num_slt= %d",
Sagar Gored79f95e2017-03-14 18:32:17 -070082 req_tbl->id, req_tbl->pd, req_tbl->dev_count,
83 req_tbl->dev_mask, req_tbl->skip_traverse,
84 req_tbl->num_slots);
85 req_tbl = req_tbl->next;
86 }
87 mutex_unlock(&req->lock);
88
89 return rc;
90}
91
92/**
93 * __cam_req_mgr_find_pd_tbl()
94 *
95 * @brief : Find pipeline delay based table pointer which matches delay
96 * @tbl : Pointer to list of request table
97 * @delay : Pipeline delay value to be searched for comparison
98 *
99 * @return : pointer to request table for matching pipeline delay table.
100 *
101 */
102static struct cam_req_mgr_req_tbl *__cam_req_mgr_find_pd_tbl(
103 struct cam_req_mgr_req_tbl *tbl, int32_t delay)
104{
105 if (!tbl)
106 return NULL;
107
108 do {
109 if (delay != tbl->pd)
110 tbl = tbl->next;
111 else
112 return tbl;
113 } while (tbl != NULL);
114
115 return NULL;
116}
117
118/**
119 * __cam_req_mgr_inc_idx()
120 *
121 * @brief : Increment val passed by step size and rollover after max_val
122 * @val : value to be incremented
123 * @step : amount/step by which val is incremented
124 * @max_val : max val after which idx will roll over
125 *
126 */
127static void __cam_req_mgr_inc_idx(int32_t *val, int32_t step, int32_t max_val)
128{
129 *val = (*val + step) % max_val;
130}
131
132/**
133 * __cam_req_mgr_dec_idx()
134 *
135 * @brief : Decrement val passed by step size and rollover after max_val
136 * @val : value to be decremented
137 * @step : amount/step by which val is decremented
138 * @max_val : after zero value will roll over to max val
139 *
140 */
141static void __cam_req_mgr_dec_idx(int32_t *val, int32_t step, int32_t max_val)
142{
143 *val = *val - step;
144 if (*val < 0)
145 *val = max_val + (*val);
146}
147
148/**
149 * __cam_req_mgr_traverse()
150 *
151 * @brief : Traverse through pd tables, it will internally cover all linked
152 * pd tables. Each pd table visited will check if idx passed to its
153 * in ready state. If ready means all devices linked to the pd table
154 * have this request id packet ready. Then it calls subsequent pd
155 * tbl with new idx. New idx value takes into account the delta
156 * between current pd table and next one.
157 * @traverse_data: contains all the info to traverse through pd tables
158 *
159 * @return: 0 for success, negative for failure
160 *
161 */
162static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data)
163{
164 int rc = 0;
165 int32_t next_idx = traverse_data->idx;
166 int32_t curr_idx = traverse_data->idx;
167 struct cam_req_mgr_req_tbl *tbl;
168 struct cam_req_mgr_apply *apply_data;
169
170 if (!traverse_data->tbl || !traverse_data->apply_data) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700171 CAM_ERR(CAM_CRM, "NULL pointer %pK %pK",
Sagar Gored79f95e2017-03-14 18:32:17 -0700172 traverse_data->tbl, traverse_data->apply_data);
173 traverse_data->result = 0;
174 return -EINVAL;
175 }
176
177 tbl = traverse_data->tbl;
178 apply_data = traverse_data->apply_data;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700179 CAM_DBG(CAM_CRM, "Enter pd %d idx %d state %d skip %d status %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700180 tbl->pd, curr_idx, tbl->slot[curr_idx].state,
181 tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status);
182
183 /* Check if req is ready or in skip mode or pd tbl is in skip mode */
184 if (tbl->slot[curr_idx].state == CRM_REQ_STATE_READY ||
185 traverse_data->in_q->slot[curr_idx].skip_idx == 1 ||
186 tbl->skip_traverse > 0) {
187 if (tbl->next) {
188 __cam_req_mgr_dec_idx(&next_idx, tbl->pd_delta,
189 tbl->num_slots);
190 traverse_data->idx = next_idx;
191 traverse_data->tbl = tbl->next;
192 rc = __cam_req_mgr_traverse(traverse_data);
193 }
194 if (rc >= 0) {
195 SET_SUCCESS_BIT(traverse_data->result, tbl->pd);
196 apply_data[tbl->pd].pd = tbl->pd;
197 apply_data[tbl->pd].req_id =
198 CRM_GET_REQ_ID(traverse_data->in_q, curr_idx);
199 apply_data[tbl->pd].idx = curr_idx;
200
201 /* If traverse is sucessful decrement traverse skip */
202 if (tbl->skip_traverse > 0) {
203 apply_data[tbl->pd].req_id = -1;
204 tbl->skip_traverse--;
205 }
206 } else {
207 /* linked pd table is not ready for this traverse yet */
208 return rc;
209 }
210 } else {
211 /* This pd table is not ready to proceed with asked idx */
212 SET_FAILURE_BIT(traverse_data->result, tbl->pd);
213 return -EAGAIN;
214 }
215 return 0;
216}
217
218/**
219 * __cam_req_mgr_in_q_skip_idx()
220 *
221 * @brief : Decrement val passed by step size and rollover after max_val
222 * @in_q : input queue pointer
223 * @idx : Sets skip_idx bit of the particular slot to true so when traverse
224 * happens for this idx, no req will be submitted for devices
225 * handling this idx.
226 *
227 */
228static void __cam_req_mgr_in_q_skip_idx(struct cam_req_mgr_req_queue *in_q,
229 int32_t idx)
230{
231 in_q->slot[idx].req_id = -1;
232 in_q->slot[idx].skip_idx = 1;
233 in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700234 CAM_DBG(CAM_CRM, "SET IDX SKIP on slot= %d", idx);
Sagar Gored79f95e2017-03-14 18:32:17 -0700235}
236
237/**
238 * __cam_req_mgr_tbl_set_id()
239 *
240 * @brief : Set unique id to table
241 * @tbl : pipeline based table which requires new id
242 * @req : pointer to request data wihch contains num_tables counter
243 *
244 */
245static void __cam_req_mgr_tbl_set_id(struct cam_req_mgr_req_tbl *tbl,
246 struct cam_req_mgr_req_data *req)
247{
248 if (!tbl)
249 return;
250 do {
251 tbl->id = req->num_tbl++;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700252 CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700253 tbl->id, tbl->pd, tbl->skip_traverse,
254 tbl->pd_delta);
255 tbl = tbl->next;
256 } while (tbl != NULL);
257}
258
259/**
260 * __cam_req_mgr_tbl_set_all_skip_cnt()
261 *
262 * @brief : Each pd table sets skip value based on delta between itself and
263 * max pd value. During initial streamon or bubble case this is
264 * used. That way each pd table skips required num of traverse and
265 * align themselve with req mgr connected devs.
266 * @l_tbl : iterates through list of pd tables and sets skip traverse
267 *
268 */
269static void __cam_req_mgr_tbl_set_all_skip_cnt(
270 struct cam_req_mgr_req_tbl **l_tbl)
271{
272 struct cam_req_mgr_req_tbl *tbl = *l_tbl;
273 int32_t max_pd;
274
275 if (!tbl)
276 return;
277
278 max_pd = tbl->pd;
279 do {
280 tbl->skip_traverse = max_pd - tbl->pd;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700281 CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700282 tbl->id, tbl->pd, tbl->skip_traverse,
283 tbl->pd_delta);
284 tbl = tbl->next;
285 } while (tbl != NULL);
286}
287
288/**
289 * __cam_req_mgr_reset_req_slot()
290 *
291 * @brief : reset specified idx/slot in input queue as well as all pd tables
292 * @link : link pointer
293 * @idx : slot index which will be reset
294 *
295 */
296static void __cam_req_mgr_reset_req_slot(struct cam_req_mgr_core_link *link,
297 int32_t idx)
298{
299 struct cam_req_mgr_slot *slot;
300 struct cam_req_mgr_req_tbl *tbl = link->req.l_tbl;
301 struct cam_req_mgr_req_queue *in_q = link->req.in_q;
302
303 slot = &in_q->slot[idx];
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700304 CAM_DBG(CAM_CRM, "RESET: idx: %d: slot->status %d", idx, slot->status);
Sagar Gored79f95e2017-03-14 18:32:17 -0700305
306 /* Check if CSL has already pushed new request*/
307 if (slot->status == CRM_SLOT_STATUS_REQ_ADDED)
308 return;
309
310 /* Reset input queue slot */
311 slot->req_id = -1;
312 slot->skip_idx = 0;
313 slot->recover = 0;
314 slot->status = CRM_SLOT_STATUS_NO_REQ;
315
316 /* Reset all pd table slot */
317 while (tbl != NULL) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700318 CAM_DBG(CAM_CRM, "pd: %d: idx %d state %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700319 tbl->pd, idx, tbl->slot[idx].state);
320 tbl->slot[idx].req_ready_map = 0;
321 tbl->slot[idx].state = CRM_REQ_STATE_EMPTY;
322 tbl = tbl->next;
323 }
324}
325
326/**
327 * __cam_req_mgr_check_next_req_slot()
328 *
329 * @brief : While streaming if input queue does not contain any pending
330 * request, req mgr still needs to submit pending request ids to
331 * devices with lower pipeline delay value.
332 * @in_q : Pointer to input queue where req mgr wil peep into
333 *
334 */
335static void __cam_req_mgr_check_next_req_slot(
336 struct cam_req_mgr_req_queue *in_q)
337{
338 int32_t idx = in_q->rd_idx;
339 struct cam_req_mgr_slot *slot;
340
341 __cam_req_mgr_inc_idx(&idx, 1, in_q->num_slots);
342 slot = &in_q->slot[idx];
343
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700344 CAM_DBG(CAM_CRM, "idx: %d: slot->status %d", idx, slot->status);
Sagar Gored79f95e2017-03-14 18:32:17 -0700345
346 /* Check if there is new req from CSL, if not complete req */
347 if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
348 __cam_req_mgr_in_q_skip_idx(in_q, idx);
349 if (in_q->wr_idx != idx)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700350 CAM_WARN(CAM_CRM,
351 "CHECK here wr %d, rd %d", in_q->wr_idx, idx);
Sagar Gored79f95e2017-03-14 18:32:17 -0700352 __cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
353 }
354}
355
356/**
357 * __cam_req_mgr_send_req()
358 *
359 * @brief : send request id to be applied to each device connected on link
360 * @link : pointer to link whose input queue and req tbl are
361 * traversed through
362 * @in_q : pointer to input request queue
363 *
364 * @return : 0 for success, negative for failure
365 *
366 */
367static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
Junzhe Zou2df84502017-05-26 13:20:23 -0700368 struct cam_req_mgr_req_queue *in_q, uint32_t trigger)
Sagar Gored79f95e2017-03-14 18:32:17 -0700369{
370 int rc = 0, pd, i, idx;
371 struct cam_req_mgr_connected_device *dev = NULL;
372 struct cam_req_mgr_apply_request apply_req;
373 struct cam_req_mgr_link_evt_data evt_data;
374
375 apply_req.link_hdl = link->link_hdl;
376 apply_req.report_if_bubble = 0;
377
378 for (i = 0; i < link->num_devs; i++) {
379 dev = &link->l_dev[i];
380 if (dev) {
381 pd = dev->dev_info.p_delay;
382 if (pd >= CAM_PIPELINE_DELAY_MAX) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700383 CAM_WARN(CAM_CRM, "pd %d greater than max",
Sagar Gored79f95e2017-03-14 18:32:17 -0700384 pd);
385 continue;
386 }
387 if (link->req.apply_data[pd].skip_idx ||
388 link->req.apply_data[pd].req_id < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700389 CAM_DBG(CAM_CRM, "skip %d req_id %lld",
Sagar Gored79f95e2017-03-14 18:32:17 -0700390 link->req.apply_data[pd].skip_idx,
391 link->req.apply_data[pd].req_id);
392 continue;
393 }
Junzhe Zou2df84502017-05-26 13:20:23 -0700394 if (!(dev->dev_info.trigger & trigger))
395 continue;
396
Sagar Gored79f95e2017-03-14 18:32:17 -0700397 apply_req.dev_hdl = dev->dev_hdl;
398 apply_req.request_id =
399 link->req.apply_data[pd].req_id;
400 idx = link->req.apply_data[pd].idx;
401 apply_req.report_if_bubble =
402 in_q->slot[idx].recover;
Gregory Bergschneider60679932017-07-19 15:27:16 -0600403
404 trace_cam_req_mgr_apply_request(link, &apply_req, dev);
405
Junzhe Zou2df84502017-05-26 13:20:23 -0700406 apply_req.trigger_point = trigger;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700407 CAM_DBG(CAM_CRM, "SEND: pd %d req_id %lld",
Sagar Gored79f95e2017-03-14 18:32:17 -0700408 pd, apply_req.request_id);
409 if (dev->ops && dev->ops->apply_req) {
410 rc = dev->ops->apply_req(&apply_req);
411 if (rc < 0)
412 break;
413 }
414 }
415 }
416 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700417 CAM_ERR(CAM_CRM, "APPLY FAILED pd %d req_id %lld",
Sagar Gored79f95e2017-03-14 18:32:17 -0700418 dev->dev_info.p_delay, apply_req.request_id);
419 /* Apply req failed notify already applied devs */
420 for (; i >= 0; i--) {
421 dev = &link->l_dev[i];
422 evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_ERR;
423 evt_data.link_hdl = link->link_hdl;
424 evt_data.req_id = apply_req.request_id;
425 evt_data.u.error = CRM_KMD_ERR_BUBBLE;
426 if (dev->ops && dev->ops->process_evt)
427 dev->ops->process_evt(&evt_data);
428 }
429 }
430 return rc;
431}
432
433/**
434 * __cam_req_mgr_check_link_is_ready()
435 *
436 * @brief : traverse through all request tables and see if all devices are
437 * ready to apply request settings.
438 * @link : pointer to link whose input queue and req tbl are
439 * traversed through
440 * @idx : index within input request queue
441 *
442 * @return : 0 for success, negative for failure
443 *
444 */
445static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link,
446 int32_t idx)
447{
448 int rc;
449 struct cam_req_mgr_traverse traverse_data;
450 struct cam_req_mgr_req_queue *in_q;
451 struct cam_req_mgr_apply *apply_data;
452
453 in_q = link->req.in_q;
454
455 apply_data = link->req.apply_data;
456 memset(apply_data, 0,
457 sizeof(struct cam_req_mgr_apply) * CAM_PIPELINE_DELAY_MAX);
458
459 traverse_data.apply_data = apply_data;
460 traverse_data.idx = idx;
461 traverse_data.tbl = link->req.l_tbl;
462 traverse_data.in_q = in_q;
463 traverse_data.result = 0;
464 /*
465 * Traverse through all pd tables, if result is success,
466 * apply the settings
467 */
468
469 rc = __cam_req_mgr_traverse(&traverse_data);
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700470 CAM_DBG(CAM_CRM, "SOF: idx %d result %x pd_mask %x rc %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700471 idx, traverse_data.result, link->pd_mask, rc);
472
473 if (!rc && traverse_data.result == link->pd_mask) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700474 CAM_DBG(CAM_CRM,
475 "APPLY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
Sagar Gored79f95e2017-03-14 18:32:17 -0700476 link->link_hdl, idx,
477 apply_data[2].req_id, apply_data[1].req_id,
478 apply_data[0].req_id);
479 } else
480 rc = -EAGAIN;
481
482 return rc;
483}
484
485/**
486 * __cam_req_mgr_process_req()
487 *
488 * @brief : processes read index in request queue and traverse through table
489 * @link : pointer to link whose input queue and req tbl are
490 * traversed through
491 *
492 * @return : 0 for success, negative for failure
493 *
494 */
Junzhe Zou2df84502017-05-26 13:20:23 -0700495static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
496 uint32_t trigger)
Sagar Gored79f95e2017-03-14 18:32:17 -0700497{
498 int rc = 0, idx;
499 struct cam_req_mgr_slot *slot = NULL;
500 struct cam_req_mgr_req_queue *in_q;
501 struct cam_req_mgr_core_session *session;
502
503 in_q = link->req.in_q;
504 session = (struct cam_req_mgr_core_session *)link->parent;
505
506 /*
Junzhe Zou2df84502017-05-26 13:20:23 -0700507 * Check if new read index,
Sagar Gored79f95e2017-03-14 18:32:17 -0700508 * - if in pending state, traverse again to complete
509 * transaction of this read index.
510 * - if in applied_state, somthign wrong.
511 * - if in no_req state, no new req
512 */
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700513 CAM_DBG(CAM_CRM, "idx %d req_status %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700514 in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
515
516 slot = &in_q->slot[in_q->rd_idx];
517 if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700518 CAM_DBG(CAM_CRM, "No Pending req");
Sagar Gored79f95e2017-03-14 18:32:17 -0700519 return 0;
520 }
521
Junzhe Zou2df84502017-05-26 13:20:23 -0700522 if (trigger != CAM_TRIGGER_POINT_SOF &&
523 trigger != CAM_TRIGGER_POINT_EOF)
524 return rc;
Sagar Gored79f95e2017-03-14 18:32:17 -0700525
Junzhe Zou2df84502017-05-26 13:20:23 -0700526 if (trigger == CAM_TRIGGER_POINT_SOF) {
Junzhe Zou3f77d832017-08-25 14:55:23 -0700527 if (link->trigger_mask) {
Junzhe Zou2df84502017-05-26 13:20:23 -0700528 CAM_ERR(CAM_CRM, "Applying for last EOF fails");
529 return -EINVAL;
530 }
531 rc = __cam_req_mgr_check_link_is_ready(link, slot->idx);
532 if (rc < 0) {
533
534 /* If traverse result is not success, then some devices
535 * are not ready with packet for the asked request id,
536 * hence try again in next sof
537 */
538 slot->status = CRM_SLOT_STATUS_REQ_PENDING;
Sagar Gored79f95e2017-03-14 18:32:17 -0700539 if (link->state == CAM_CRM_LINK_STATE_ERR) {
Junzhe Zou2df84502017-05-26 13:20:23 -0700540 /*
541 * During error recovery all tables should be
542 * ready, don't expect to enter here.
543 * @TODO: gracefully handle if recovery fails.
544 */
545 CAM_ERR(CAM_CRM,
546 "FATAL recovery cant finish idx %d status %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700547 in_q->rd_idx,
548 in_q->slot[in_q->rd_idx].status);
Junzhe Zou2df84502017-05-26 13:20:23 -0700549 rc = -EPERM;
Sagar Gored79f95e2017-03-14 18:32:17 -0700550 }
Junzhe Zou2df84502017-05-26 13:20:23 -0700551 return rc;
552 }
553 }
554 if (trigger == CAM_TRIGGER_POINT_EOF &&
555 (!(link->trigger_mask & CAM_TRIGGER_POINT_SOF))) {
556 CAM_ERR(CAM_CRM, "Applying for last SOF fails");
557 return -EINVAL;
558 }
Sagar Gored79f95e2017-03-14 18:32:17 -0700559
Junzhe Zou2df84502017-05-26 13:20:23 -0700560 rc = __cam_req_mgr_send_req(link, link->req.in_q, trigger);
561 if (rc < 0) {
562 /* Apply req failed retry at next sof */
563 slot->status = CRM_SLOT_STATUS_REQ_PENDING;
564 } else {
565 link->trigger_mask |= trigger;
566
567 if (link->state == CAM_CRM_LINK_STATE_ERR) {
568 CAM_WARN(CAM_CRM, "Err recovery done idx %d",
569 in_q->rd_idx);
570 mutex_lock(&link->lock);
571 link->state = CAM_CRM_LINK_STATE_READY;
572 mutex_unlock(&link->lock);
573 }
574 if (link->trigger_mask == link->subscribe_event) {
575 slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
576 link->trigger_mask = 0;
577 CAM_DBG(CAM_CRM, "req is applied\n");
Sagar Gored79f95e2017-03-14 18:32:17 -0700578
Sagar Gored79f95e2017-03-14 18:32:17 -0700579 idx = in_q->rd_idx;
Junzhe Zou2df84502017-05-26 13:20:23 -0700580 __cam_req_mgr_dec_idx(
581 &idx, link->max_delay + 1,
Sagar Gored79f95e2017-03-14 18:32:17 -0700582 in_q->num_slots);
583 __cam_req_mgr_reset_req_slot(link, idx);
584 }
Sagar Gored79f95e2017-03-14 18:32:17 -0700585 }
586
587 return rc;
588}
589
590/**
591 * __cam_req_mgr_add_tbl_to_link()
592 *
593 * @brief : Add table to list under link sorted by pd decremeting order
594 * @l_tbl : list of pipeline delay tables.
Junzhe Zou2df84502017-05-26 13:20:23 -0700595 * @new_tbl : new tbl which will be appended to above list as per its pd value
Sagar Gored79f95e2017-03-14 18:32:17 -0700596 *
597 */
598static void __cam_req_mgr_add_tbl_to_link(struct cam_req_mgr_req_tbl **l_tbl,
599 struct cam_req_mgr_req_tbl *new_tbl)
600{
601 struct cam_req_mgr_req_tbl *tbl;
602
603 if (!(*l_tbl) || (*l_tbl)->pd < new_tbl->pd) {
604 new_tbl->next = *l_tbl;
605 if (*l_tbl) {
606 new_tbl->pd_delta =
607 new_tbl->pd - (*l_tbl)->pd;
608 }
609 *l_tbl = new_tbl;
610 } else {
611 tbl = *l_tbl;
612
613 /* Reach existing tbl which has less pd value */
614 while (tbl->next != NULL &&
615 new_tbl->pd < tbl->next->pd) {
616 tbl = tbl->next;
617 }
618 if (tbl->next != NULL) {
619 new_tbl->pd_delta =
620 new_tbl->pd - tbl->next->pd;
621 } else {
622 /* This is last table in linked list*/
623 new_tbl->pd_delta = 0;
624 }
625 new_tbl->next = tbl->next;
626 tbl->next = new_tbl;
627 tbl->pd_delta = tbl->pd - new_tbl->pd;
628 }
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700629 CAM_DBG(CAM_CRM, "added pd %d tbl to link delta %d", new_tbl->pd,
Sagar Gored79f95e2017-03-14 18:32:17 -0700630 new_tbl->pd_delta);
631}
632
633/**
634 * __cam_req_mgr_create_pd_tbl()
635 *
636 * @brief : Creates new request table for new delay value
637 * @delay : New pd table allocated will have this delay value
638 *
639 * @return : pointer to newly allocated table, NULL for failure
640 *
641 */
642static struct cam_req_mgr_req_tbl *__cam_req_mgr_create_pd_tbl(int32_t delay)
643{
644 struct cam_req_mgr_req_tbl *tbl =
645 kzalloc(sizeof(struct cam_req_mgr_req_tbl), GFP_KERNEL);
646 if (tbl != NULL) {
647 tbl->num_slots = MAX_REQ_SLOTS;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700648 CAM_DBG(CAM_CRM, "pd= %d slots= %d", delay, tbl->num_slots);
Sagar Gored79f95e2017-03-14 18:32:17 -0700649 }
650
651 return tbl;
652}
653
654/**
655 * __cam_req_mgr_destroy_all_tbl()
656 *
657 * @brief : This func will destroy all pipeline delay based req table structs
658 * @l_tbl : pointer to first table in list and it has max pd .
659 *
660 */
661static void __cam_req_mgr_destroy_all_tbl(struct cam_req_mgr_req_tbl **l_tbl)
662{
663 struct cam_req_mgr_req_tbl *tbl = *l_tbl, *temp;
664
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700665 CAM_DBG(CAM_CRM, "*l_tbl %pK", tbl);
Sagar Gored79f95e2017-03-14 18:32:17 -0700666 while (tbl != NULL) {
667 temp = tbl->next;
668 kfree(tbl);
669 tbl = temp;
670 }
671 *l_tbl = NULL;
672}
673
674/**
675 * __cam_req_mgr_find_slot_for_req()
676 *
677 * @brief : Find idx from input queue at which req id is enqueued
678 * @in_q : input request queue pointer
679 * @req_id : request id which needs to be searched in input queue
680 *
681 * @return : slot index where passed request id is stored, -1 for failure
682 *
683 */
684static int32_t __cam_req_mgr_find_slot_for_req(
685 struct cam_req_mgr_req_queue *in_q, int64_t req_id)
686{
687 int32_t idx, i;
688 struct cam_req_mgr_slot *slot;
689
690 idx = in_q->wr_idx;
691 for (i = 0; i < in_q->num_slots; i++) {
692 slot = &in_q->slot[idx];
693 if (slot->req_id == req_id) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700694 CAM_DBG(CAM_CRM, "req %lld found at %d %d status %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700695 req_id, idx, slot->idx,
696 slot->status);
697 break;
698 }
699 __cam_req_mgr_dec_idx(&idx, 1, in_q->num_slots);
700 }
701 if (i >= in_q->num_slots)
702 idx = -1;
703
704 return idx;
705}
706
707/**
708 * __cam_req_mgr_setup_in_q()
709 *
710 * @brief : Initialize req table data
711 * @req : request data pointer
712 *
713 * @return: 0 for success, negative for failure
714 *
715 */
716static int __cam_req_mgr_setup_in_q(struct cam_req_mgr_req_data *req)
717{
718 int i;
719 struct cam_req_mgr_req_queue *in_q = req->in_q;
720
721 if (!in_q) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700722 CAM_ERR(CAM_CRM, "NULL in_q");
Sagar Gored79f95e2017-03-14 18:32:17 -0700723 return -EINVAL;
724 }
725
726 mutex_lock(&req->lock);
727 in_q->num_slots = MAX_REQ_SLOTS;
728
729 for (i = 0; i < in_q->num_slots; i++) {
730 in_q->slot[i].idx = i;
731 in_q->slot[i].req_id = -1;
732 in_q->slot[i].skip_idx = 0;
733 in_q->slot[i].status = CRM_SLOT_STATUS_NO_REQ;
734 }
735
736 in_q->wr_idx = 0;
737 in_q->rd_idx = 0;
738 mutex_unlock(&req->lock);
739
740 return 0;
741}
742
743/**
744 * __cam_req_mgr_reset_req_tbl()
745 *
746 * @brief : Initialize req table data
747 * @req : request queue pointer
748 *
749 * @return: 0 for success, negative for failure
750 *
751 */
752static int __cam_req_mgr_reset_in_q(struct cam_req_mgr_req_data *req)
753{
754 struct cam_req_mgr_req_queue *in_q = req->in_q;
755
756 if (!in_q) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700757 CAM_ERR(CAM_CRM, "NULL in_q");
Sagar Gored79f95e2017-03-14 18:32:17 -0700758 return -EINVAL;
759 }
760
761 mutex_lock(&req->lock);
762 memset(in_q->slot, 0,
763 sizeof(struct cam_req_mgr_slot) * in_q->num_slots);
764 in_q->num_slots = 0;
765
766 in_q->wr_idx = 0;
767 in_q->rd_idx = 0;
768 mutex_unlock(&req->lock);
769
770 return 0;
771}
772
773/**
774 * __cam_req_mgr_sof_freeze()
775 *
776 * @brief : Apoptosis - Handles case when connected devices are not responding
777 * @data : timer pointer
778 *
779 */
780static void __cam_req_mgr_sof_freeze(unsigned long data)
781{
782 struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
Sagar Gore8d91a622017-02-23 14:57:18 -0800783 struct cam_req_mgr_core_link *link = NULL;
784
Sagar Gored79f95e2017-03-14 18:32:17 -0700785 if (!timer) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700786 CAM_ERR(CAM_CRM, "NULL timer");
Sagar Gored79f95e2017-03-14 18:32:17 -0700787 return;
788 }
789 link = (struct cam_req_mgr_core_link *)timer->parent;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700790 CAM_ERR(CAM_CRM, "SOF freeze for link %x", link->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -0700791}
792
793/**
794 * __cam_req_mgr_create_subdevs()
795 *
796 * @brief : Create new crm subdev to link with realtime devices
797 * @l_dev : list of subdevs internal to crm
798 * @num_dev : num of subdevs to be created for link
799 *
800 * @return : pointer to allocated list of devices
801 */
802static int __cam_req_mgr_create_subdevs(
803 struct cam_req_mgr_connected_device **l_dev, int32_t num_dev)
804{
805 int rc = 0;
806 *l_dev = (struct cam_req_mgr_connected_device *)
807 kzalloc(sizeof(struct cam_req_mgr_connected_device) * num_dev,
808 GFP_KERNEL);
809 if (!*l_dev)
810 rc = -ENOMEM;
811
812 return rc;
813}
814
815/**
816 * __cam_req_mgr_destroy_subdev()
817 *
818 * @brief : Cleans up the subdevs allocated by crm for link
819 * @l_device : pointer to list of subdevs crm created
820 *
821 */
822static void __cam_req_mgr_destroy_subdev(
823 struct cam_req_mgr_connected_device *l_device)
824{
825 kfree(l_device);
826 l_device = NULL;
827}
828
829/**
830 * __cam_req_mgr_destroy_link_info()
831 *
832 * @brief : Cleans up the mem allocated while linking
833 * @link : pointer to link, mem associated with this link is freed
834 *
835 */
836static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
837{
838 int32_t i = 0;
839 struct cam_req_mgr_connected_device *dev;
840 struct cam_req_mgr_core_dev_link_setup link_data;
841
842 mutex_lock(&link->lock);
843
844 link_data.link_enable = 0;
845 link_data.link_hdl = link->link_hdl;
846 link_data.crm_cb = NULL;
Junzhe Zou2df84502017-05-26 13:20:23 -0700847 link_data.subscribe_event = 0;
Sagar Gored79f95e2017-03-14 18:32:17 -0700848
849 /* Using device ops unlink devices */
850 for (i = 0; i < link->num_devs; i++) {
851 dev = &link->l_dev[i];
852 if (dev != NULL) {
Soundrapandian Jeyaprakash74946262017-08-11 18:23:47 -0700853 link_data.dev_hdl = dev->dev_hdl;
Sagar Gored79f95e2017-03-14 18:32:17 -0700854 if (dev->ops && dev->ops->link_setup)
855 dev->ops->link_setup(&link_data);
856 dev->dev_hdl = 0;
857 dev->parent = NULL;
858 dev->ops = NULL;
859 }
860 }
861 __cam_req_mgr_destroy_all_tbl(&link->req.l_tbl);
862 __cam_req_mgr_reset_in_q(&link->req);
863 link->req.num_tbl = 0;
864 mutex_destroy(&link->req.lock);
865
866 link->pd_mask = 0;
867 link->num_devs = 0;
868 link->max_delay = 0;
869
870 mutex_unlock(&link->lock);
871}
872
873/**
874 * __cam_req_mgr_reserve_link()
875 *
876 * @brief: Reserves one link data struct within session
877 * @session: session identifier
878 *
879 * @return: pointer to link reserved
880 *
881 */
882static struct cam_req_mgr_core_link *__cam_req_mgr_reserve_link(
883 struct cam_req_mgr_core_session *session)
884{
885 struct cam_req_mgr_core_link *link;
886 struct cam_req_mgr_req_queue *in_q;
887
888 if (!session || !g_crm_core_dev) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700889 CAM_ERR(CAM_CRM, "NULL session/core_dev ptr");
Sagar Gore8d91a622017-02-23 14:57:18 -0800890 return NULL;
891 }
892
Sagar Gored79f95e2017-03-14 18:32:17 -0700893 if (session->num_links >= MAX_LINKS_PER_SESSION) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700894 CAM_ERR(CAM_CRM, "Reached max links %d per session limit %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700895 session->num_links, MAX_LINKS_PER_SESSION);
896 return NULL;
Sagar Gore8d91a622017-02-23 14:57:18 -0800897 }
Sagar Gored79f95e2017-03-14 18:32:17 -0700898
899 link = (struct cam_req_mgr_core_link *)
900 kzalloc(sizeof(struct cam_req_mgr_core_link), GFP_KERNEL);
901 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700902 CAM_ERR(CAM_CRM, "failed to create link, no mem");
Sagar Gored79f95e2017-03-14 18:32:17 -0700903 return NULL;
904 }
905 in_q = &session->in_q;
906 mutex_init(&link->lock);
907
908 mutex_lock(&link->lock);
909 link->state = CAM_CRM_LINK_STATE_AVAILABLE;
910 link->num_devs = 0;
911 link->max_delay = 0;
912 memset(in_q->slot, 0,
913 sizeof(struct cam_req_mgr_slot) * MAX_REQ_SLOTS);
914 link->req.in_q = in_q;
915 in_q->num_slots = 0;
916 link->state = CAM_CRM_LINK_STATE_IDLE;
917 link->parent = (void *)session;
918 mutex_unlock(&link->lock);
919
920 mutex_lock(&session->lock);
921 session->links[session->num_links] = link;
922 session->num_links++;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700923 CAM_DBG(CAM_CRM, "Active session links (%d)",
Sagar Gored79f95e2017-03-14 18:32:17 -0700924 session->num_links);
925 mutex_unlock(&session->lock);
Sagar Gore8d91a622017-02-23 14:57:18 -0800926
927 return link;
928}
929
930/**
Sagar Gored79f95e2017-03-14 18:32:17 -0700931 * __cam_req_mgr_reserve_link()
932 *
933 * @brief : Reserves one link data struct within session
934 * @session: session identifier
935 * @link : link identifier
936 *
937 */
938static void __cam_req_mgr_unreserve_link(
939 struct cam_req_mgr_core_session *session,
940 struct cam_req_mgr_core_link **link)
941{
942 int32_t i = 0;
943
944 if (!session || !*link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700945 CAM_ERR(CAM_CRM, "NULL session/link ptr %pK %pK",
Sagar Gored79f95e2017-03-14 18:32:17 -0700946 session, *link);
947 return;
948 }
949
950 mutex_lock(&session->lock);
951 if (!session->num_links)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700952 CAM_WARN(CAM_CRM, "No active link or invalid state %d",
Sagar Gored79f95e2017-03-14 18:32:17 -0700953 session->num_links);
954 else {
955 for (i = 0; i < session->num_links; i++) {
956 if (session->links[i] == *link)
957 session->links[i] = NULL;
958 }
959 session->num_links--;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700960 CAM_DBG(CAM_CRM, "Active session links (%d)",
Sagar Gored79f95e2017-03-14 18:32:17 -0700961 session->num_links);
962 }
963 kfree(*link);
964 *link = NULL;
965 mutex_unlock(&session->lock);
966
967}
968
969/* Workqueue context processing section */
970
971/**
972 * cam_req_mgr_process_send_req()
973 *
974 * @brief: This runs in workque thread context. Call core funcs to send
975 * apply request id to drivers.
976 * @priv : link information.
977 * @data : contains information about frame_id, link etc.
978 *
979 * @return: 0 on success.
980 */
981int cam_req_mgr_process_send_req(void *priv, void *data)
982{
983 int rc = 0;
984 struct cam_req_mgr_core_link *link = NULL;
985 struct cam_req_mgr_send_request *send_req = NULL;
986 struct cam_req_mgr_req_queue *in_q = NULL;
987
988 if (!data || !priv) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700989 CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
Sagar Gored79f95e2017-03-14 18:32:17 -0700990 rc = -EINVAL;
991 goto end;
992 }
993 link = (struct cam_req_mgr_core_link *)priv;
994 send_req = (struct cam_req_mgr_send_request *)data;
995 in_q = send_req->in_q;
996
Junzhe Zou2df84502017-05-26 13:20:23 -0700997 rc = __cam_req_mgr_send_req(link, in_q, CAM_TRIGGER_POINT_SOF);
Sagar Gored79f95e2017-03-14 18:32:17 -0700998end:
999 return rc;
1000}
1001
1002/**
1003 * cam_req_mgr_process_flush_req()
1004 *
1005 * @brief: This runs in workque thread context. Call core funcs to check
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001006 * which requests need to be removed/cancelled.
Sagar Gored79f95e2017-03-14 18:32:17 -07001007 * @priv : link information.
1008 * @data : contains information about frame_id, link etc.
1009 *
1010 * @return: 0 on success.
1011 */
1012int cam_req_mgr_process_flush_req(void *priv, void *data)
1013{
1014 int rc = 0, i = 0, idx = -1;
1015 struct cam_req_mgr_flush_info *flush_info = NULL;
1016 struct cam_req_mgr_core_link *link = NULL;
1017 struct cam_req_mgr_req_queue *in_q = NULL;
1018 struct cam_req_mgr_slot *slot = NULL;
1019 struct cam_req_mgr_connected_device *device = NULL;
1020 struct cam_req_mgr_flush_request flush_req;
1021 struct crm_task_payload *task_data = NULL;
1022
1023 if (!data || !priv) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001024 CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
Sagar Gored79f95e2017-03-14 18:32:17 -07001025 rc = -EINVAL;
1026 goto end;
1027 }
1028 link = (struct cam_req_mgr_core_link *)priv;
1029 task_data = (struct crm_task_payload *)data;
1030 flush_info = (struct cam_req_mgr_flush_info *)&task_data->u;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001031 CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld type %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001032 flush_info->link_hdl,
1033 flush_info->req_id,
1034 flush_info->flush_type);
1035
1036 in_q = link->req.in_q;
1037
Gregory Bergschneider60679932017-07-19 15:27:16 -06001038 trace_cam_flush_req(link, flush_info);
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001039
Sagar Gored79f95e2017-03-14 18:32:17 -07001040 mutex_lock(&link->req.lock);
1041 if (flush_info->flush_type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
1042 for (i = 0; i < in_q->num_slots; i++) {
1043 slot = &in_q->slot[i];
1044 slot->req_id = -1;
1045 slot->skip_idx = 1;
1046 slot->status = CRM_SLOT_STATUS_NO_REQ;
1047 }
1048 in_q->wr_idx = 0;
1049 in_q->rd_idx = 0;
1050 } else if (flush_info->flush_type ==
1051 CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
1052 idx = __cam_req_mgr_find_slot_for_req(in_q, flush_info->req_id);
1053 if (idx < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001054 CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
Sagar Gored79f95e2017-03-14 18:32:17 -07001055 flush_info->req_id);
1056 } else {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001057 CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001058 flush_info->req_id, idx);
1059 slot = &in_q->slot[idx];
1060 if (slot->status == CRM_SLOT_STATUS_REQ_PENDING ||
1061 slot->status == CRM_SLOT_STATUS_REQ_APPLIED) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001062 CAM_WARN(CAM_CRM,
1063 "req_id %lld can not be cancelled",
Sagar Gored79f95e2017-03-14 18:32:17 -07001064 flush_info->req_id);
1065 mutex_unlock(&link->req.lock);
1066 return -EINVAL;
1067 }
1068 __cam_req_mgr_in_q_skip_idx(in_q, idx);
1069 }
1070 }
1071
1072 for (i = 0; i < link->num_devs; i++) {
1073 device = &link->l_dev[i];
1074 flush_req.link_hdl = flush_info->link_hdl;
1075 flush_req.dev_hdl = device->dev_hdl;
1076 flush_req.req_id = flush_info->req_id;
1077 flush_req.type = flush_info->flush_type;
1078 /* @TODO: error return handling from drivers */
1079 if (device->ops && device->ops->flush_req)
1080 rc = device->ops->flush_req(&flush_req);
1081 }
1082 mutex_unlock(&link->req.lock);
1083
1084 complete(&link->workq_comp);
1085end:
1086 return rc;
1087}
1088
1089/**
1090 * cam_req_mgr_process_sched_req()
1091 *
1092 * @brief: This runs in workque thread context. Call core funcs to check
1093 * which peding requests can be processed.
1094 * @priv : link information.
1095 * @data : contains information about frame_id, link etc.
1096 *
1097 * @return: 0 on success.
1098 */
1099int cam_req_mgr_process_sched_req(void *priv, void *data)
1100{
1101 int rc = 0;
1102 struct cam_req_mgr_sched_request *sched_req = NULL;
1103 struct cam_req_mgr_core_link *link = NULL;
1104 struct cam_req_mgr_req_queue *in_q = NULL;
1105 struct cam_req_mgr_slot *slot = NULL;
1106 struct crm_task_payload *task_data = NULL;
1107
1108 if (!data || !priv) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001109 CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
Sagar Gored79f95e2017-03-14 18:32:17 -07001110 rc = -EINVAL;
1111 goto end;
1112 }
1113 link = (struct cam_req_mgr_core_link *)priv;
1114 task_data = (struct crm_task_payload *)data;
1115 sched_req = (struct cam_req_mgr_sched_request *)&task_data->u;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001116 CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld",
Sagar Gored79f95e2017-03-14 18:32:17 -07001117 sched_req->link_hdl,
1118 sched_req->req_id);
1119
1120 in_q = link->req.in_q;
1121
1122 mutex_lock(&link->req.lock);
1123 slot = &in_q->slot[in_q->wr_idx];
1124
1125 if (slot->status != CRM_SLOT_STATUS_NO_REQ &&
Sagar Goreb56c81e2017-05-08 17:15:47 -07001126 slot->status != CRM_SLOT_STATUS_REQ_APPLIED)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001127 CAM_WARN(CAM_CRM, "in_q overwrite %d", slot->status);
Sagar Goreb56c81e2017-05-08 17:15:47 -07001128
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001129 CAM_DBG(CAM_CRM, "sched_req %lld at slot %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001130 sched_req->req_id, in_q->wr_idx);
1131
1132 slot->status = CRM_SLOT_STATUS_REQ_ADDED;
1133 slot->req_id = sched_req->req_id;
1134 slot->skip_idx = 0;
1135 slot->recover = sched_req->bubble_enable;
1136 __cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
1137 mutex_unlock(&link->req.lock);
1138
Sagar Gored79f95e2017-03-14 18:32:17 -07001139end:
1140 return rc;
1141}
1142
1143/**
1144 * cam_req_mgr_process_add_req()
1145 *
1146 * @brief: This runs in workque thread context. Call core funcs to check
1147 * which peding requests can be processed.
1148 * @priv : link information.
1149 * @data : contains information about frame_id, link etc.
1150 *
1151 * @return: 0 on success.
1152 */
1153int cam_req_mgr_process_add_req(void *priv, void *data)
1154{
1155 int rc = 0, i = 0, idx;
1156 struct cam_req_mgr_add_request *add_req = NULL;
1157 struct cam_req_mgr_core_link *link = NULL;
1158 struct cam_req_mgr_connected_device *device = NULL;
1159 struct cam_req_mgr_req_tbl *tbl = NULL;
1160 struct cam_req_mgr_tbl_slot *slot = NULL;
1161 struct crm_task_payload *task_data = NULL;
1162
1163 if (!data || !priv) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001164 CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
Sagar Gored79f95e2017-03-14 18:32:17 -07001165 rc = -EINVAL;
1166 goto end;
1167 }
1168
1169 link = (struct cam_req_mgr_core_link *)priv;
1170 task_data = (struct crm_task_payload *)data;
1171 add_req = (struct cam_req_mgr_add_request *)&task_data->u;
1172
1173 for (i = 0; i < link->num_devs; i++) {
1174 device = &link->l_dev[i];
1175 if (device->dev_hdl == add_req->dev_hdl) {
1176 tbl = device->pd_tbl;
1177 break;
1178 }
1179 }
1180 if (!tbl) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001181 CAM_ERR(CAM_CRM, "dev_hdl not found %x, %x %x",
Sagar Gored79f95e2017-03-14 18:32:17 -07001182 add_req->dev_hdl,
1183 link->l_dev[0].dev_hdl,
1184 link->l_dev[1].dev_hdl);
1185 rc = -EINVAL;
1186 goto end;
1187 }
1188 /*
1189 * Go through request table and add
1190 * request id to proper table
1191 * 1. find req slot in in_q matching req_id.sent by dev
1192 * 2. goto table of this device based on p_delay
1193 * 3. mark req_ready_map with this dev_bit.
1194 */
1195
1196 mutex_lock(&link->req.lock);
1197 idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
1198 if (idx < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001199 CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07001200 rc = -EBADSLT;
1201 mutex_unlock(&link->req.lock);
1202 goto end;
1203 }
1204 slot = &tbl->slot[idx];
1205 if (slot->state != CRM_REQ_STATE_PENDING &&
1206 slot->state != CRM_REQ_STATE_EMPTY) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001207 CAM_WARN(CAM_CRM, "Unexpected state %d for slot %d map %x",
Sagar Gored79f95e2017-03-14 18:32:17 -07001208 slot->state, idx, slot->req_ready_map);
1209 }
1210
1211 slot->state = CRM_REQ_STATE_PENDING;
1212 slot->req_ready_map |= (1 << device->dev_bit);
1213
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001214 CAM_DBG(CAM_CRM, "idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
Sagar Gored79f95e2017-03-14 18:32:17 -07001215 idx, add_req->dev_hdl, add_req->req_id, tbl->pd,
1216 slot->req_ready_map);
1217
Gregory Bergschneider60679932017-07-19 15:27:16 -06001218 trace_cam_req_mgr_add_req(link, idx, add_req, tbl, device);
1219
Sagar Gored79f95e2017-03-14 18:32:17 -07001220 if (slot->req_ready_map == tbl->dev_mask) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001221 CAM_DBG(CAM_CRM, "idx %d req_id %lld pd %d SLOT READY",
Sagar Gored79f95e2017-03-14 18:32:17 -07001222 idx, add_req->req_id, tbl->pd);
1223 slot->state = CRM_REQ_STATE_READY;
1224 }
1225 mutex_unlock(&link->req.lock);
1226
1227end:
1228 return rc;
1229}
1230
1231/**
1232 * cam_req_mgr_process_error()
1233 *
1234 * @brief: This runs in workque thread context. bubble /err recovery.
1235 * @priv : link information.
1236 * @data : contains information about frame_id, link etc.
1237 *
1238 * @return: 0 on success.
1239 */
1240int cam_req_mgr_process_error(void *priv, void *data)
1241{
1242 int rc = 0, idx = -1, i;
1243 struct cam_req_mgr_error_notify *err_info = NULL;
1244 struct cam_req_mgr_core_link *link = NULL;
1245 struct cam_req_mgr_req_queue *in_q = NULL;
1246 struct cam_req_mgr_slot *slot = NULL;
1247 struct cam_req_mgr_connected_device *device = NULL;
1248 struct cam_req_mgr_link_evt_data evt_data;
1249 struct crm_task_payload *task_data = NULL;
1250
1251 if (!data || !priv) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001252 CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
Sagar Gored79f95e2017-03-14 18:32:17 -07001253 rc = -EINVAL;
1254 goto end;
1255 }
1256 link = (struct cam_req_mgr_core_link *)priv;
1257 task_data = (struct crm_task_payload *)data;
1258 err_info = (struct cam_req_mgr_error_notify *)&task_data->u;
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001259 CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld error %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001260 err_info->link_hdl,
1261 err_info->req_id,
1262 err_info->error);
1263
1264 in_q = link->req.in_q;
1265
1266 mutex_lock(&link->req.lock);
1267 if (err_info->error == CRM_KMD_ERR_BUBBLE) {
1268 idx = __cam_req_mgr_find_slot_for_req(in_q, err_info->req_id);
1269 if (idx < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001270 CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
Sagar Gored79f95e2017-03-14 18:32:17 -07001271 err_info->req_id);
1272 } else {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001273 CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001274 err_info->req_id, idx);
1275 slot = &in_q->slot[idx];
1276 if (!slot->recover) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001277 CAM_WARN(CAM_CRM,
1278 "err recovery disabled req_id %lld",
Sagar Gored79f95e2017-03-14 18:32:17 -07001279 err_info->req_id);
1280 mutex_unlock(&link->req.lock);
1281 return 0;
1282 } else if (slot->status != CRM_SLOT_STATUS_REQ_PENDING
1283 && slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001284 CAM_WARN(CAM_CRM,
1285 "req_id %lld can not be recovered %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001286 err_info->req_id, slot->status);
1287 mutex_unlock(&link->req.lock);
1288 return -EINVAL;
1289 }
1290 /* Notify all devices in the link about error */
1291 for (i = 0; i < link->num_devs; i++) {
1292 device = &link->l_dev[i];
1293 if (device != NULL) {
1294 evt_data.dev_hdl = device->dev_hdl;
1295 evt_data.evt_type =
1296 CAM_REQ_MGR_LINK_EVT_ERR;
1297 evt_data.link_hdl = link->link_hdl;
1298 evt_data.req_id = err_info->req_id;
1299 evt_data.u.error = err_info->error;
1300 if (device->ops &&
1301 device->ops->process_evt)
1302 rc = device->ops->
1303 process_evt(&evt_data);
1304 }
1305 }
1306 /* Bring processing pointer to bubbled req id */
1307 __cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
1308 in_q->rd_idx = idx;
1309 in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
1310 mutex_lock(&link->lock);
1311 link->state = CAM_CRM_LINK_STATE_ERR;
1312 mutex_unlock(&link->lock);
1313 }
1314 }
1315 mutex_unlock(&link->req.lock);
1316
1317end:
1318 return rc;
1319}
1320
1321/**
Junzhe Zou2df84502017-05-26 13:20:23 -07001322 * cam_req_mgr_process_trigger()
Sagar Gore8d91a622017-02-23 14:57:18 -08001323 *
1324 * @brief: This runs in workque thread context. Call core funcs to check
Sagar Gored79f95e2017-03-14 18:32:17 -07001325 * which peding requests can be processed.
1326 * @priv : link information.
1327 * @data : contains information about frame_id, link etc.
Sagar Gore8d91a622017-02-23 14:57:18 -08001328 *
Sagar Gored79f95e2017-03-14 18:32:17 -07001329 * @return: 0 on success.
Sagar Gore8d91a622017-02-23 14:57:18 -08001330 */
Junzhe Zou2df84502017-05-26 13:20:23 -07001331static int cam_req_mgr_process_trigger(void *priv, void *data)
Sagar Gore8d91a622017-02-23 14:57:18 -08001332{
Sagar Gored79f95e2017-03-14 18:32:17 -07001333 int rc = 0;
Junzhe Zou2df84502017-05-26 13:20:23 -07001334 struct cam_req_mgr_trigger_notify *trigger_data = NULL;
Sagar Gored79f95e2017-03-14 18:32:17 -07001335 struct cam_req_mgr_core_link *link = NULL;
1336 struct cam_req_mgr_req_queue *in_q = NULL;
1337 struct crm_task_payload *task_data = NULL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001338
1339 if (!data || !priv) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001340 CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
Sagar Gored79f95e2017-03-14 18:32:17 -07001341 rc = -EINVAL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001342 goto end;
1343 }
1344 link = (struct cam_req_mgr_core_link *)priv;
Sagar Gored79f95e2017-03-14 18:32:17 -07001345 task_data = (struct crm_task_payload *)data;
Junzhe Zou2df84502017-05-26 13:20:23 -07001346 trigger_data = (struct cam_req_mgr_trigger_notify *)&task_data->u;
Sagar Gore8d91a622017-02-23 14:57:18 -08001347
Junzhe Zou2df84502017-05-26 13:20:23 -07001348 CAM_DBG(CAM_CRM, "link_hdl %x frame_id %lld, trigger %x\n",
1349 trigger_data->link_hdl,
1350 trigger_data->frame_id,
1351 trigger_data->trigger);
Sagar Gore8d91a622017-02-23 14:57:18 -08001352
Sagar Gored79f95e2017-03-14 18:32:17 -07001353 in_q = link->req.in_q;
1354
1355 mutex_lock(&link->req.lock);
1356 /*
1357 * Check if current read index is in applied state, if yes make it free
1358 * and increment read index to next slot.
Sagar Gore8d91a622017-02-23 14:57:18 -08001359 */
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001360 CAM_DBG(CAM_CRM, "link_hdl %x curent idx %d req_status %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001361 link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
Sagar Gore8d91a622017-02-23 14:57:18 -08001362
Sagar Gored79f95e2017-03-14 18:32:17 -07001363 if (link->state == CAM_CRM_LINK_STATE_ERR)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001364 CAM_WARN(CAM_CRM, "Error recovery idx %d status %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001365 in_q->rd_idx,
1366 in_q->slot[in_q->rd_idx].status);
Sagar Gore8d91a622017-02-23 14:57:18 -08001367
Sagar Gored79f95e2017-03-14 18:32:17 -07001368 if (in_q->slot[in_q->rd_idx].status == CRM_SLOT_STATUS_REQ_APPLIED) {
1369 /*
1370 * Do NOT reset req q slot data here, it can not be done
1371 * here because we need to preserve the data to handle bubble.
Junzhe Zou2b342222017-08-28 18:15:54 -07001372 *
1373 * Check if any new req is pending in slot, if not finish the
1374 * lower pipeline delay device with available req ids.
Sagar Gored79f95e2017-03-14 18:32:17 -07001375 */
Junzhe Zou2b342222017-08-28 18:15:54 -07001376 __cam_req_mgr_check_next_req_slot(in_q);
Sagar Gored79f95e2017-03-14 18:32:17 -07001377 __cam_req_mgr_inc_idx(&in_q->rd_idx, 1, in_q->num_slots);
Sagar Gore8d91a622017-02-23 14:57:18 -08001378 }
Junzhe Zou2df84502017-05-26 13:20:23 -07001379 rc = __cam_req_mgr_process_req(link, trigger_data->trigger);
Sagar Gored79f95e2017-03-14 18:32:17 -07001380 mutex_unlock(&link->req.lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001381
1382end:
Sagar Gored79f95e2017-03-14 18:32:17 -07001383 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001384}
1385
Sagar Gore8d91a622017-02-23 14:57:18 -08001386
Sagar Gored79f95e2017-03-14 18:32:17 -07001387/* Linked devices' Callback section */
1388
1389/**
1390 * cam_req_mgr_cb_add_req()
1391 *
1392 * @brief : Drivers call this function to notify new packet is available.
1393 * @add_req : Information about new request available at a device.
1394 *
1395 * @return : 0 on success, negative in case of failure
1396 *
1397 */
1398static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
1399{
1400 int rc = 0, idx;
1401 struct crm_workq_task *task = NULL;
1402 struct cam_req_mgr_core_link *link = NULL;
1403 struct cam_req_mgr_add_request *dev_req;
1404 struct crm_task_payload *task_data;
1405
1406 if (!add_req) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001407 CAM_ERR(CAM_CRM, "sof_data is NULL");
Sagar Gored79f95e2017-03-14 18:32:17 -07001408 rc = -EINVAL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001409 goto end;
1410 }
1411
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001412 CAM_DBG(CAM_CRM, "E: dev %x dev req %lld",
1413 add_req->dev_hdl, add_req->req_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07001414 link = (struct cam_req_mgr_core_link *)
1415 cam_get_device_priv(add_req->link_hdl);
1416
1417 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001418 CAM_DBG(CAM_CRM, "link ptr NULL %x", add_req->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001419 rc = -EINVAL;
1420 goto end;
1421 }
1422
1423 /* Validate if req id is present in input queue */
1424 idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
1425 if (idx < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001426 CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07001427 rc = -ENOENT;
1428 goto end;
1429 }
1430
1431 task = cam_req_mgr_workq_get_task(link->workq);
1432 if (!task) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001433 CAM_ERR(CAM_CRM, "no empty task dev %x req %lld",
Sagar Gored79f95e2017-03-14 18:32:17 -07001434 add_req->dev_hdl, add_req->req_id);
1435 rc = -EBUSY;
1436 goto end;
1437 }
1438
1439 task_data = (struct crm_task_payload *)task->payload;
1440 task_data->type = CRM_WORKQ_TASK_DEV_ADD_REQ;
1441 dev_req = (struct cam_req_mgr_add_request *)&task_data->u;
1442 dev_req->req_id = add_req->req_id;
1443 dev_req->link_hdl = add_req->link_hdl;
1444 dev_req->dev_hdl = add_req->dev_hdl;
1445 task->process_cb = &cam_req_mgr_process_add_req;
1446 rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001447 CAM_DBG(CAM_CRM, "X: dev %x dev req %lld",
1448 add_req->dev_hdl, add_req->req_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07001449
1450end:
1451 return rc;
1452}
1453
1454/**
1455 * cam_req_mgr_cb_notify_err()
1456 *
1457 * @brief : Error received from device, sends bubble recovery
1458 * @err_info : contains information about error occurred like bubble/overflow
1459 *
1460 * @return : 0 on success, negative in case of failure
1461 *
1462 */
1463static int cam_req_mgr_cb_notify_err(
1464 struct cam_req_mgr_error_notify *err_info)
1465{
1466 int rc = 0;
1467 struct crm_workq_task *task = NULL;
1468 struct cam_req_mgr_core_link *link = NULL;
1469 struct cam_req_mgr_error_notify *notify_err;
1470 struct crm_task_payload *task_data;
1471
1472 if (!err_info) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001473 CAM_ERR(CAM_CRM, "err_info is NULL");
Sagar Gored79f95e2017-03-14 18:32:17 -07001474 rc = -EINVAL;
1475 goto end;
1476 }
1477
1478 link = (struct cam_req_mgr_core_link *)
1479 cam_get_device_priv(err_info->link_hdl);
1480 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001481 CAM_DBG(CAM_CRM, "link ptr NULL %x", err_info->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001482 rc = -EINVAL;
1483 goto end;
1484 }
1485
1486 crm_timer_reset(link->watchdog);
1487 task = cam_req_mgr_workq_get_task(link->workq);
1488 if (!task) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001489 CAM_ERR(CAM_CRM, "no empty task req_id %lld", err_info->req_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07001490 rc = -EBUSY;
1491 goto end;
1492 }
1493
1494 task_data = (struct crm_task_payload *)task->payload;
1495 task_data->type = CRM_WORKQ_TASK_NOTIFY_ERR;
1496 notify_err = (struct cam_req_mgr_error_notify *)&task_data->u;
1497 notify_err->req_id = err_info->req_id;
1498 notify_err->link_hdl = err_info->link_hdl;
1499 notify_err->dev_hdl = err_info->dev_hdl;
1500 notify_err->error = err_info->error;
1501 task->process_cb = &cam_req_mgr_process_error;
1502 rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
1503
1504end:
1505 return rc;
1506}
1507
1508/**
Junzhe Zou2df84502017-05-26 13:20:23 -07001509 * cam_req_mgr_cb_notify_trigger()
Sagar Gored79f95e2017-03-14 18:32:17 -07001510 *
1511 * @brief : SOF received from device, sends trigger through workqueue
1512 * @sof_data: contains information about frame_id, link etc.
1513 *
1514 * @return : 0 on success
1515 *
1516 */
Junzhe Zou2df84502017-05-26 13:20:23 -07001517static int cam_req_mgr_cb_notify_trigger(
1518 struct cam_req_mgr_trigger_notify *trigger_data)
Sagar Gored79f95e2017-03-14 18:32:17 -07001519{
1520 int rc = 0;
1521 struct crm_workq_task *task = NULL;
1522 struct cam_req_mgr_core_link *link = NULL;
Junzhe Zou2df84502017-05-26 13:20:23 -07001523 struct cam_req_mgr_trigger_notify *notify_trigger;
Sagar Gored79f95e2017-03-14 18:32:17 -07001524 struct crm_task_payload *task_data;
1525
Junzhe Zou2df84502017-05-26 13:20:23 -07001526 if (!trigger_data) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001527 CAM_ERR(CAM_CRM, "sof_data is NULL");
Sagar Gored79f95e2017-03-14 18:32:17 -07001528 rc = -EINVAL;
1529 goto end;
1530 }
Sagar Gore8d91a622017-02-23 14:57:18 -08001531
1532 link = (struct cam_req_mgr_core_link *)
Junzhe Zou2df84502017-05-26 13:20:23 -07001533 cam_get_device_priv(trigger_data->link_hdl);
Sagar Gore8d91a622017-02-23 14:57:18 -08001534 if (!link) {
Junzhe Zou2df84502017-05-26 13:20:23 -07001535 CAM_DBG(CAM_CRM, "link ptr NULL %x", trigger_data->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001536 rc = -EINVAL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001537 goto end;
Sagar Gore8d91a622017-02-23 14:57:18 -08001538 }
1539
Sagar Gored79f95e2017-03-14 18:32:17 -07001540 crm_timer_reset(link->watchdog);
Sagar Gore8d91a622017-02-23 14:57:18 -08001541 task = cam_req_mgr_workq_get_task(link->workq);
1542 if (!task) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001543 CAM_ERR(CAM_CRM, "no empty task frame %lld",
Junzhe Zou2df84502017-05-26 13:20:23 -07001544 trigger_data->frame_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07001545 rc = -EBUSY;
Sagar Gore8d91a622017-02-23 14:57:18 -08001546 goto end;
1547 }
Sagar Gored79f95e2017-03-14 18:32:17 -07001548 task_data = (struct crm_task_payload *)task->payload;
1549 task_data->type = CRM_WORKQ_TASK_NOTIFY_SOF;
Junzhe Zou2df84502017-05-26 13:20:23 -07001550 notify_trigger = (struct cam_req_mgr_trigger_notify *)&task_data->u;
1551 notify_trigger->frame_id = trigger_data->frame_id;
1552 notify_trigger->link_hdl = trigger_data->link_hdl;
1553 notify_trigger->dev_hdl = trigger_data->dev_hdl;
1554 notify_trigger->trigger = trigger_data->trigger;
1555 task->process_cb = &cam_req_mgr_process_trigger;
Sagar Gored79f95e2017-03-14 18:32:17 -07001556 rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
Sagar Gore8d91a622017-02-23 14:57:18 -08001557
1558end:
Sagar Gored79f95e2017-03-14 18:32:17 -07001559 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001560}
1561
Sagar Gored79f95e2017-03-14 18:32:17 -07001562static struct cam_req_mgr_crm_cb cam_req_mgr_ops = {
Junzhe Zou2df84502017-05-26 13:20:23 -07001563 .notify_trigger = cam_req_mgr_cb_notify_trigger,
1564 .notify_err = cam_req_mgr_cb_notify_err,
1565 .add_req = cam_req_mgr_cb_add_req,
Sagar Gored79f95e2017-03-14 18:32:17 -07001566};
1567
Sagar Gore8d91a622017-02-23 14:57:18 -08001568/**
Sagar Gored79f95e2017-03-14 18:32:17 -07001569 * __cam_req_mgr_setup_link_info()
Sagar Gore8d91a622017-02-23 14:57:18 -08001570 *
Sagar Gored79f95e2017-03-14 18:32:17 -07001571 * @brief : Sets up input queue, create pd based tables, communicate with
1572 * devs connected on this link and setup communication.
1573 * @link : pointer to link to setup
1574 * @link_info : link_info coming from CSL to prepare link
Sagar Gore8d91a622017-02-23 14:57:18 -08001575 *
Sagar Gored79f95e2017-03-14 18:32:17 -07001576 * @return : 0 on success, negative in case of failure
1577 *
Sagar Gore8d91a622017-02-23 14:57:18 -08001578 */
Sagar Gored79f95e2017-03-14 18:32:17 -07001579static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
1580 struct cam_req_mgr_link_info *link_info)
Sagar Gore8d91a622017-02-23 14:57:18 -08001581{
Sagar Gored79f95e2017-03-14 18:32:17 -07001582 int rc = 0, i = 0;
1583 struct cam_req_mgr_core_dev_link_setup link_data;
1584 struct cam_req_mgr_connected_device *dev;
1585 struct cam_req_mgr_req_tbl *pd_tbl;
1586 enum cam_pipeline_delay max_delay;
Junzhe Zou2df84502017-05-26 13:20:23 -07001587 uint32_t subscribe_event = 0;
Sagar Gore8d91a622017-02-23 14:57:18 -08001588
Sagar Gored79f95e2017-03-14 18:32:17 -07001589 if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES)
1590 return -EPERM;
Sagar Gore8d91a622017-02-23 14:57:18 -08001591
Sagar Gored79f95e2017-03-14 18:32:17 -07001592 mutex_init(&link->req.lock);
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001593 CAM_DBG(CAM_CRM, "LOCK_DBG in_q lock %pK", &link->req.lock);
Sagar Gored79f95e2017-03-14 18:32:17 -07001594 link->req.num_tbl = 0;
1595
1596 rc = __cam_req_mgr_setup_in_q(&link->req);
1597 if (rc < 0)
1598 return rc;
1599
1600 mutex_lock(&link->lock);
1601 max_delay = CAM_PIPELINE_DELAY_0;
1602 for (i = 0; i < link_info->num_devices; i++) {
1603 dev = &link->l_dev[i];
1604 /* Using dev hdl, get ops ptr to communicate with device */
1605 dev->ops = (struct cam_req_mgr_kmd_ops *)
1606 cam_get_device_ops(link_info->dev_hdls[i]);
1607 if (!dev->ops ||
1608 !dev->ops->get_dev_info ||
1609 !dev->ops->link_setup) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001610 CAM_ERR(CAM_CRM, "FATAL: device ops NULL");
Sagar Gored79f95e2017-03-14 18:32:17 -07001611 rc = -ENXIO;
1612 goto error;
Sagar Gore8d91a622017-02-23 14:57:18 -08001613 }
Sagar Gored79f95e2017-03-14 18:32:17 -07001614 dev->dev_hdl = link_info->dev_hdls[i];
1615 dev->parent = (void *)link;
1616 dev->dev_info.dev_hdl = dev->dev_hdl;
1617 rc = dev->ops->get_dev_info(&dev->dev_info);
Gregory Bergschneider60679932017-07-19 15:27:16 -06001618
1619 trace_cam_req_mgr_connect_device(link, &dev->dev_info);
1620
Junzhe Zou2df84502017-05-26 13:20:23 -07001621 CAM_DBG(CAM_CRM,
1622 "%x: connected: %s, id %d, delay %d, trigger %x",
Sagar Gored79f95e2017-03-14 18:32:17 -07001623 link_info->session_hdl, dev->dev_info.name,
Junzhe Zou2df84502017-05-26 13:20:23 -07001624 dev->dev_info.dev_id, dev->dev_info.p_delay,
1625 dev->dev_info.trigger);
Sagar Gored79f95e2017-03-14 18:32:17 -07001626 if (rc < 0 ||
1627 dev->dev_info.p_delay >=
1628 CAM_PIPELINE_DELAY_MAX ||
1629 dev->dev_info.p_delay <
1630 CAM_PIPELINE_DELAY_0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001631 CAM_ERR(CAM_CRM, "get device info failed");
Sagar Gored79f95e2017-03-14 18:32:17 -07001632 goto error;
1633 } else {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001634 CAM_DBG(CAM_CRM, "%x: connected: %s, delay %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001635 link_info->session_hdl,
1636 dev->dev_info.name,
1637 dev->dev_info.p_delay);
Junzhe Zou2df84502017-05-26 13:20:23 -07001638 if (dev->dev_info.p_delay > max_delay)
1639 max_delay = dev->dev_info.p_delay;
1640
1641 subscribe_event |= (uint32_t)dev->dev_info.trigger;
Sagar Gored79f95e2017-03-14 18:32:17 -07001642 }
Sagar Gore8d91a622017-02-23 14:57:18 -08001643 }
1644
Junzhe Zou2df84502017-05-26 13:20:23 -07001645 link->subscribe_event = subscribe_event;
Sagar Gored79f95e2017-03-14 18:32:17 -07001646 link_data.link_enable = 1;
1647 link_data.link_hdl = link->link_hdl;
1648 link_data.crm_cb = &cam_req_mgr_ops;
1649 link_data.max_delay = max_delay;
Junzhe Zou2df84502017-05-26 13:20:23 -07001650 link_data.subscribe_event = subscribe_event;
Sagar Gored79f95e2017-03-14 18:32:17 -07001651
1652 for (i = 0; i < link_info->num_devices; i++) {
1653 dev = &link->l_dev[i];
1654
1655 link_data.dev_hdl = dev->dev_hdl;
1656 /*
1657 * For unique pipeline delay table create request
1658 * tracking table
1659 */
1660 if (link->pd_mask & (1 << dev->dev_info.p_delay)) {
1661 pd_tbl = __cam_req_mgr_find_pd_tbl(link->req.l_tbl,
1662 dev->dev_info.p_delay);
1663 if (!pd_tbl) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001664 CAM_ERR(CAM_CRM, "pd %d tbl not found",
Sagar Gored79f95e2017-03-14 18:32:17 -07001665 dev->dev_info.p_delay);
1666 rc = -ENXIO;
1667 goto error;
1668 }
1669 } else {
1670 pd_tbl = __cam_req_mgr_create_pd_tbl(
1671 dev->dev_info.p_delay);
1672 if (pd_tbl == NULL) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001673 CAM_ERR(CAM_CRM, "create new pd tbl failed");
Sagar Gored79f95e2017-03-14 18:32:17 -07001674 rc = -ENXIO;
1675 goto error;
1676 }
1677 pd_tbl->pd = dev->dev_info.p_delay;
1678 link->pd_mask |= (1 << pd_tbl->pd);
1679 /*
1680 * Add table to list and also sort list
1681 * from max pd to lowest
1682 */
1683 __cam_req_mgr_add_tbl_to_link(&link->req.l_tbl, pd_tbl);
1684 }
1685 dev->dev_bit = pd_tbl->dev_count++;
1686 dev->pd_tbl = pd_tbl;
1687 pd_tbl->dev_mask |= (1 << dev->dev_bit);
1688
1689 /* Communicate with dev to establish the link */
1690 dev->ops->link_setup(&link_data);
1691
1692 if (link->max_delay < dev->dev_info.p_delay)
1693 link->max_delay = dev->dev_info.p_delay;
1694 }
1695 link->num_devs = link_info->num_devices;
1696
1697 /* Assign id for pd tables */
1698 __cam_req_mgr_tbl_set_id(link->req.l_tbl, &link->req);
1699
1700 /* At start, expect max pd devices, all are in skip state */
1701 __cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
1702
1703 mutex_unlock(&link->lock);
1704 return 0;
1705
1706error:
1707 __cam_req_mgr_destroy_link_info(link);
1708 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001709}
1710
Sagar Gored79f95e2017-03-14 18:32:17 -07001711/* IOCTLs handling section */
Sagar Gore8d91a622017-02-23 14:57:18 -08001712int cam_req_mgr_create_session(
1713 struct cam_req_mgr_session_info *ses_info)
1714{
Sagar Gored79f95e2017-03-14 18:32:17 -07001715 int rc = 0;
1716 int32_t session_hdl;
1717 struct cam_req_mgr_core_session *cam_session = NULL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001718
1719 if (!ses_info) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001720 CAM_DBG(CAM_CRM, "NULL session info pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08001721 return -EINVAL;
1722 }
1723 mutex_lock(&g_crm_core_dev->crm_lock);
1724 cam_session = (struct cam_req_mgr_core_session *)
1725 kzalloc(sizeof(*cam_session), GFP_KERNEL);
1726 if (!cam_session) {
Sagar Gored79f95e2017-03-14 18:32:17 -07001727 rc = -ENOMEM;
Sagar Gore8d91a622017-02-23 14:57:18 -08001728 goto end;
1729 }
1730
1731 session_hdl = cam_create_session_hdl((void *)cam_session);
1732 if (session_hdl < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001733 CAM_ERR(CAM_CRM, "unable to create session_hdl = %x",
1734 session_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001735 rc = session_hdl;
1736 kfree(cam_session);
1737 goto end;
Sagar Gore8d91a622017-02-23 14:57:18 -08001738 }
1739 ses_info->session_hdl = session_hdl;
Sagar Gored79f95e2017-03-14 18:32:17 -07001740
1741 mutex_init(&cam_session->lock);
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001742 CAM_DBG(CAM_CRM, "LOCK_DBG session lock %pK", &cam_session->lock);
Sagar Gored79f95e2017-03-14 18:32:17 -07001743
1744 mutex_lock(&cam_session->lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001745 cam_session->session_hdl = session_hdl;
Sagar Gored79f95e2017-03-14 18:32:17 -07001746 cam_session->num_links = 0;
Sagar Gore8d91a622017-02-23 14:57:18 -08001747 list_add(&cam_session->entry, &g_crm_core_dev->session_head);
Sagar Gored79f95e2017-03-14 18:32:17 -07001748 mutex_unlock(&cam_session->lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001749end:
1750 mutex_unlock(&g_crm_core_dev->crm_lock);
Sagar Gored79f95e2017-03-14 18:32:17 -07001751 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001752}
1753
1754int cam_req_mgr_destroy_session(
1755 struct cam_req_mgr_session_info *ses_info)
1756{
Sagar Gored79f95e2017-03-14 18:32:17 -07001757 int rc;
1758 struct cam_req_mgr_core_session *cam_session = NULL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001759
1760 if (!ses_info) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001761 CAM_DBG(CAM_CRM, "NULL session info pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08001762 return -EINVAL;
1763 }
1764
1765 mutex_lock(&g_crm_core_dev->crm_lock);
1766 cam_session = (struct cam_req_mgr_core_session *)
1767 cam_get_device_priv(ses_info->session_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001768 if (!cam_session) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001769 CAM_ERR(CAM_CRM, "failed to get session priv");
Sagar Gored79f95e2017-03-14 18:32:17 -07001770 rc = -ENOENT;
Sagar Gore8d91a622017-02-23 14:57:18 -08001771 goto end;
1772
1773 }
Sagar Gored79f95e2017-03-14 18:32:17 -07001774 mutex_lock(&cam_session->lock);
1775 if (cam_session->num_links) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001776 CAM_ERR(CAM_CRM, "destroy session %x num_active_links %d",
Sagar Gore8d91a622017-02-23 14:57:18 -08001777 ses_info->session_hdl,
Sagar Gored79f95e2017-03-14 18:32:17 -07001778 cam_session->num_links);
1779 /* @TODO : Go through active links and destroy ? */
Sagar Gore8d91a622017-02-23 14:57:18 -08001780 }
1781 list_del(&cam_session->entry);
Sagar Gored79f95e2017-03-14 18:32:17 -07001782 mutex_unlock(&cam_session->lock);
1783 mutex_destroy(&cam_session->lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001784 kfree(cam_session);
1785
Sagar Gored79f95e2017-03-14 18:32:17 -07001786 rc = cam_destroy_session_hdl(ses_info->session_hdl);
1787 if (rc < 0)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001788 CAM_ERR(CAM_CRM, "unable to destroy session_hdl = %x rc %d",
Sagar Gored79f95e2017-03-14 18:32:17 -07001789 ses_info->session_hdl, rc);
Sagar Gore8d91a622017-02-23 14:57:18 -08001790
1791end:
1792 mutex_unlock(&g_crm_core_dev->crm_lock);
Sagar Gored79f95e2017-03-14 18:32:17 -07001793 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001794}
1795
1796int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
1797{
Sagar Gored79f95e2017-03-14 18:32:17 -07001798 int rc = 0;
1799 char buf[128];
1800 struct cam_create_dev_hdl root_dev;
1801 struct cam_req_mgr_core_session *cam_session;
1802 struct cam_req_mgr_core_link *link;
Sagar Gore8d91a622017-02-23 14:57:18 -08001803
1804 if (!link_info) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001805 CAM_DBG(CAM_CRM, "NULL pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08001806 return -EINVAL;
1807 }
Sagar Gore8d91a622017-02-23 14:57:18 -08001808 if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001809 CAM_ERR(CAM_CRM, "Invalid num devices %d",
1810 link_info->num_devices);
Sagar Gore8d91a622017-02-23 14:57:18 -08001811 return -EINVAL;
1812 }
1813
Sagar Gored79f95e2017-03-14 18:32:17 -07001814 /* session hdl's priv data is cam session struct */
Sagar Gore8d91a622017-02-23 14:57:18 -08001815 cam_session = (struct cam_req_mgr_core_session *)
1816 cam_get_device_priv(link_info->session_hdl);
1817 if (!cam_session) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001818 CAM_DBG(CAM_CRM, "NULL pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08001819 return -EINVAL;
1820 }
1821
Sagar Gored79f95e2017-03-14 18:32:17 -07001822 mutex_lock(&g_crm_core_dev->crm_lock);
1823
1824 /* Allocate link struct and map it with session's request queue */
1825 link = __cam_req_mgr_reserve_link(cam_session);
Sagar Gore8d91a622017-02-23 14:57:18 -08001826 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001827 CAM_ERR(CAM_CRM, "failed to reserve new link");
Sagar Gored79f95e2017-03-14 18:32:17 -07001828 mutex_unlock(&g_crm_core_dev->crm_lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001829 return -EINVAL;
1830 }
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001831 CAM_DBG(CAM_CRM, "link reserved %pK %x", link, link->link_hdl);
Sagar Gore8d91a622017-02-23 14:57:18 -08001832
1833 memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
1834 root_dev.session_hdl = link_info->session_hdl;
1835 root_dev.priv = (void *)link;
1836
Sagar Gored79f95e2017-03-14 18:32:17 -07001837 mutex_lock(&link->lock);
1838 /* Create unique dev handle for link */
1839 link->link_hdl = cam_create_device_hdl(&root_dev);
1840 if (link->link_hdl < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001841 CAM_ERR(CAM_CRM,
1842 "Insufficient memory to create new device handle");
Sagar Gored79f95e2017-03-14 18:32:17 -07001843 mutex_unlock(&link->lock);
1844 rc = link->link_hdl;
Sagar Gore8d91a622017-02-23 14:57:18 -08001845 goto link_hdl_fail;
1846 }
Sagar Gored79f95e2017-03-14 18:32:17 -07001847 mutex_unlock(&link->lock);
1848 link_info->link_hdl = link->link_hdl;
Sagar Gore8d91a622017-02-23 14:57:18 -08001849
Sagar Gored79f95e2017-03-14 18:32:17 -07001850 /* Allocate memory to hold data of all linked devs */
1851 rc = __cam_req_mgr_create_subdevs(&link->l_dev,
1852 link_info->num_devices);
1853 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001854 CAM_ERR(CAM_CRM,
1855 "Insufficient memory to create new crm subdevs");
Sagar Gore8d91a622017-02-23 14:57:18 -08001856 goto create_subdev_failed;
1857 }
1858
Sagar Gored79f95e2017-03-14 18:32:17 -07001859 /* Using device ops query connected devs, prepare request tables */
1860 rc = __cam_req_mgr_setup_link_info(link, link_info);
1861 if (rc < 0)
1862 goto setup_failed;
Sagar Gore8d91a622017-02-23 14:57:18 -08001863
Sagar Gored79f95e2017-03-14 18:32:17 -07001864 mutex_lock(&link->lock);
1865 link->state = CAM_CRM_LINK_STATE_READY;
1866 mutex_unlock(&link->lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001867
1868 /* Create worker for current link */
Sagar Gored79f95e2017-03-14 18:32:17 -07001869 snprintf(buf, sizeof(buf), "%x-%x",
1870 link_info->session_hdl, link->link_hdl);
Sagar Gore9f404712017-05-22 16:57:25 -07001871 rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS,
1872 &link->workq, CRM_WORKQ_USAGE_NON_IRQ);
Sagar Gored79f95e2017-03-14 18:32:17 -07001873 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001874 CAM_ERR(CAM_CRM, "FATAL: unable to create worker");
Sagar Gored79f95e2017-03-14 18:32:17 -07001875 __cam_req_mgr_destroy_link_info(link);
1876 goto setup_failed;
Sagar Gore8d91a622017-02-23 14:57:18 -08001877 }
1878
Sagar Gored79f95e2017-03-14 18:32:17 -07001879 /* Assign payload to workqueue tasks */
1880 rc = __cam_req_mgr_setup_payload(link->workq);
1881 if (rc < 0) {
1882 __cam_req_mgr_destroy_link_info(link);
1883 cam_req_mgr_workq_destroy(&link->workq);
1884 goto setup_failed;
1885 }
Sagar Gore8d91a622017-02-23 14:57:18 -08001886
Sagar Gored79f95e2017-03-14 18:32:17 -07001887 /* Start watchdong timer to detect if camera hw goes into bad state */
1888 rc = crm_timer_init(&link->watchdog, CAM_REQ_MGR_WATCHDOG_TIMEOUT,
1889 link, &__cam_req_mgr_sof_freeze);
1890 if (rc < 0) {
1891 kfree(link->workq->task.pool[0].payload);
1892 __cam_req_mgr_destroy_link_info(link);
1893 cam_req_mgr_workq_destroy(&link->workq);
1894 goto setup_failed;
1895 }
Sagar Gore8d91a622017-02-23 14:57:18 -08001896
Sagar Gored79f95e2017-03-14 18:32:17 -07001897 mutex_unlock(&g_crm_core_dev->crm_lock);
1898 return rc;
1899setup_failed:
1900 __cam_req_mgr_destroy_subdev(link->l_dev);
Sagar Gore8d91a622017-02-23 14:57:18 -08001901create_subdev_failed:
Sagar Gored79f95e2017-03-14 18:32:17 -07001902 cam_destroy_device_hdl(link->link_hdl);
1903 link_info->link_hdl = 0;
Sagar Gore8d91a622017-02-23 14:57:18 -08001904link_hdl_fail:
Sagar Gored79f95e2017-03-14 18:32:17 -07001905 mutex_lock(&link->lock);
1906 link->state = CAM_CRM_LINK_STATE_AVAILABLE;
1907 mutex_unlock(&link->lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001908
Sagar Gored79f95e2017-03-14 18:32:17 -07001909 mutex_unlock(&g_crm_core_dev->crm_lock);
1910 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001911}
1912
1913int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
1914{
Sagar Gored79f95e2017-03-14 18:32:17 -07001915 int rc = 0;
Sagar Gore8d91a622017-02-23 14:57:18 -08001916 struct cam_req_mgr_core_session *cam_session;
Sagar Gored79f95e2017-03-14 18:32:17 -07001917 struct cam_req_mgr_core_link *link;
Sagar Gore8d91a622017-02-23 14:57:18 -08001918
1919 if (!unlink_info) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001920 CAM_ERR(CAM_CRM, "NULL pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08001921 return -EINVAL;
1922 }
Sagar Gored79f95e2017-03-14 18:32:17 -07001923
1924 mutex_lock(&g_crm_core_dev->crm_lock);
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001925 CAM_DBG(CAM_CRM, "link_hdl %x", unlink_info->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001926
1927 /* session hdl's priv data is cam session struct */
Sagar Gore8d91a622017-02-23 14:57:18 -08001928 cam_session = (struct cam_req_mgr_core_session *)
Sagar Gored79f95e2017-03-14 18:32:17 -07001929 cam_get_device_priv(unlink_info->session_hdl);
Sagar Gore8d91a622017-02-23 14:57:18 -08001930 if (!cam_session) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001931 CAM_ERR(CAM_CRM, "NULL pointer");
Sagar Gored79f95e2017-03-14 18:32:17 -07001932 mutex_unlock(&g_crm_core_dev->crm_lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001933 return -EINVAL;
1934 }
1935
Sagar Gored79f95e2017-03-14 18:32:17 -07001936 /* link hdl's priv data is core_link struct */
1937 link = cam_get_device_priv(unlink_info->link_hdl);
Sagar Gore8d91a622017-02-23 14:57:18 -08001938 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001939 CAM_ERR(CAM_CRM, "NULL pointer");
Sagar Gored79f95e2017-03-14 18:32:17 -07001940 mutex_unlock(&g_crm_core_dev->crm_lock);
Sagar Gore8d91a622017-02-23 14:57:18 -08001941 return -EINVAL;
1942 }
Sagar Gored79f95e2017-03-14 18:32:17 -07001943 __cam_req_mgr_print_req_tbl(&link->req);
Sagar Gore8d91a622017-02-23 14:57:18 -08001944
Sagar Gored79f95e2017-03-14 18:32:17 -07001945 /* Destroy workq payload data */
1946 kfree(link->workq->task.pool[0].payload);
1947 link->workq->task.pool[0].payload = NULL;
Sagar Gore8d91a622017-02-23 14:57:18 -08001948
Sagar Gored79f95e2017-03-14 18:32:17 -07001949 /* Destroy workq and timer of link */
1950 crm_timer_exit(&link->watchdog);
Sagar Gore8d91a622017-02-23 14:57:18 -08001951
Sagar Gored79f95e2017-03-14 18:32:17 -07001952 cam_req_mgr_workq_destroy(&link->workq);
1953
1954 /* Cleanuprequest tables */
1955 __cam_req_mgr_destroy_link_info(link);
1956
1957 /* Free memory holding data of linked devs */
1958 __cam_req_mgr_destroy_subdev(link->l_dev);
1959
1960 /* Destroy the link handle */
1961 rc = cam_destroy_device_hdl(unlink_info->link_hdl);
1962 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001963 CAM_ERR(CAM_CRM, "error while destroying dev handle %d %x",
Sagar Gored79f95e2017-03-14 18:32:17 -07001964 rc, link->link_hdl);
Sagar Gore8d91a622017-02-23 14:57:18 -08001965 }
1966
Sagar Gored79f95e2017-03-14 18:32:17 -07001967 /* Free curent link and put back into session's free pool of links */
1968 __cam_req_mgr_unreserve_link(cam_session, &link);
1969 mutex_unlock(&g_crm_core_dev->crm_lock);
1970
1971 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08001972}
1973
1974int cam_req_mgr_schedule_request(
1975 struct cam_req_mgr_sched_request *sched_req)
1976{
Sagar Gored79f95e2017-03-14 18:32:17 -07001977 int rc = 0;
Sagar Gored79f95e2017-03-14 18:32:17 -07001978 struct cam_req_mgr_core_link *link = NULL;
1979 struct cam_req_mgr_core_session *session = NULL;
1980 struct cam_req_mgr_sched_request *sched;
Sagar Goreb56c81e2017-05-08 17:15:47 -07001981 struct crm_task_payload task_data;
Sagar Gored79f95e2017-03-14 18:32:17 -07001982
Sagar Gore8d91a622017-02-23 14:57:18 -08001983 if (!sched_req) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001984 CAM_ERR(CAM_CRM, "csl_req is NULL");
Sagar Gored79f95e2017-03-14 18:32:17 -07001985 rc = -EINVAL;
1986 goto end;
1987 }
1988
1989 link = (struct cam_req_mgr_core_link *)
1990 cam_get_device_priv(sched_req->link_hdl);
1991 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001992 CAM_DBG(CAM_CRM, "link ptr NULL %x", sched_req->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07001993 return -EINVAL;
1994 }
1995 session = (struct cam_req_mgr_core_session *)link->parent;
1996 if (!session) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001997 CAM_WARN(CAM_CRM, "session ptr NULL %x", sched_req->link_hdl);
Sagar Gore8d91a622017-02-23 14:57:18 -08001998 return -EINVAL;
1999 }
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002000 CAM_DBG(CAM_CRM, "link %x req %lld",
2001 sched_req->link_hdl, sched_req->req_id);
Sagar Gore8d91a622017-02-23 14:57:18 -08002002
Sagar Goreb56c81e2017-05-08 17:15:47 -07002003 task_data.type = CRM_WORKQ_TASK_SCHED_REQ;
2004 sched = (struct cam_req_mgr_sched_request *)&task_data.u;
Sagar Gored79f95e2017-03-14 18:32:17 -07002005 sched->req_id = sched_req->req_id;
2006 sched->link_hdl = sched_req->link_hdl;
2007 if (session->force_err_recovery == AUTO_RECOVERY) {
2008 sched->bubble_enable = sched_req->bubble_enable;
2009 } else {
2010 sched->bubble_enable =
2011 (session->force_err_recovery == FORCE_ENABLE_RECOVERY) ? 1 : 0;
2012 }
Sagar Gored79f95e2017-03-14 18:32:17 -07002013
Sagar Goreb56c81e2017-05-08 17:15:47 -07002014 rc = cam_req_mgr_process_sched_req(link, &task_data);
2015
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002016 CAM_DBG(CAM_CRM, "DONE dev %x req %lld",
2017 sched_req->link_hdl, sched_req->req_id);
Sagar Gored79f95e2017-03-14 18:32:17 -07002018end:
2019 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08002020}
2021
Sagar Gored79f95e2017-03-14 18:32:17 -07002022int cam_req_mgr_sync_link(
2023 struct cam_req_mgr_sync_mode *sync_links)
Sagar Gore8d91a622017-02-23 14:57:18 -08002024{
2025 if (!sync_links) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002026 CAM_ERR(CAM_CRM, "NULL pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08002027 return -EINVAL;
2028 }
2029
2030 /* This function handles ioctl, implementation pending */
2031 return 0;
2032}
2033
2034int cam_req_mgr_flush_requests(
Sagar Gored79f95e2017-03-14 18:32:17 -07002035 struct cam_req_mgr_flush_info *flush_info)
Sagar Gore8d91a622017-02-23 14:57:18 -08002036{
Sagar Gored79f95e2017-03-14 18:32:17 -07002037 int rc = 0;
2038 struct crm_workq_task *task = NULL;
2039 struct cam_req_mgr_core_link *link = NULL;
2040 struct cam_req_mgr_flush_info *flush;
2041 struct crm_task_payload *task_data;
2042 struct cam_req_mgr_core_session *session = NULL;
2043
Sagar Gore8d91a622017-02-23 14:57:18 -08002044 if (!flush_info) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002045 CAM_ERR(CAM_CRM, "flush req is NULL");
Sagar Gored79f95e2017-03-14 18:32:17 -07002046 rc = -EFAULT;
2047 goto end;
2048 }
2049 if (flush_info->flush_type >= CAM_REQ_MGR_FLUSH_TYPE_MAX) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002050 CAM_ERR(CAM_CRM, "incorrect flush type %x",
2051 flush_info->flush_type);
Sagar Gored79f95e2017-03-14 18:32:17 -07002052 rc = -EINVAL;
2053 goto end;
Sagar Gore8d91a622017-02-23 14:57:18 -08002054 }
2055
Sagar Gored79f95e2017-03-14 18:32:17 -07002056 /* session hdl's priv data is cam session struct */
2057 session = (struct cam_req_mgr_core_session *)
2058 cam_get_device_priv(flush_info->session_hdl);
2059 if (!session) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002060 CAM_ERR(CAM_CRM, "Invalid session %x", flush_info->session_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07002061 rc = -EINVAL;
2062 goto end;
2063 }
2064 if (session->num_links <= 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002065 CAM_WARN(CAM_CRM, "No active links in session %x",
Sagar Gored79f95e2017-03-14 18:32:17 -07002066 flush_info->session_hdl);
2067 goto end;
2068 }
2069
2070 link = (struct cam_req_mgr_core_link *)
2071 cam_get_device_priv(flush_info->link_hdl);
2072 if (!link) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002073 CAM_DBG(CAM_CRM, "link ptr NULL %x", flush_info->link_hdl);
Sagar Gored79f95e2017-03-14 18:32:17 -07002074 rc = -EINVAL;
2075 goto end;
2076 }
2077
2078 task = cam_req_mgr_workq_get_task(link->workq);
2079 if (!task) {
2080 rc = -ENOMEM;
2081 goto end;
2082 }
2083
2084 task_data = (struct crm_task_payload *)task->payload;
2085 task_data->type = CRM_WORKQ_TASK_FLUSH_REQ;
2086 flush = (struct cam_req_mgr_flush_info *)&task_data->u;
2087 flush->req_id = flush_info->req_id;
2088 flush->link_hdl = flush_info->link_hdl;
2089 flush->flush_type = flush_info->flush_type;
2090 task->process_cb = &cam_req_mgr_process_flush_req;
2091 rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
2092
2093 /* Blocking call */
2094 init_completion(&link->workq_comp);
2095 rc = wait_for_completion_timeout(
2096 &link->workq_comp,
2097 msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
2098end:
2099 return rc;
Sagar Gore8d91a622017-02-23 14:57:18 -08002100}
2101
2102
2103int cam_req_mgr_core_device_init(void)
2104{
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002105 CAM_DBG(CAM_CRM, "Enter g_crm_core_dev %pK", g_crm_core_dev);
Sagar Gore8d91a622017-02-23 14:57:18 -08002106
2107 if (g_crm_core_dev) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002108 CAM_WARN(CAM_CRM, "core device is already initialized");
Sagar Gore8d91a622017-02-23 14:57:18 -08002109 return 0;
2110 }
2111 g_crm_core_dev = (struct cam_req_mgr_core_device *)
2112 kzalloc(sizeof(*g_crm_core_dev), GFP_KERNEL);
2113 if (!g_crm_core_dev)
2114 return -ENOMEM;
2115
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002116 CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
Sagar Gore8d91a622017-02-23 14:57:18 -08002117 INIT_LIST_HEAD(&g_crm_core_dev->session_head);
2118 mutex_init(&g_crm_core_dev->crm_lock);
Sagar Gored79f95e2017-03-14 18:32:17 -07002119 cam_req_mgr_debug_register(g_crm_core_dev);
Sagar Gore8d91a622017-02-23 14:57:18 -08002120
2121 return 0;
2122}
2123
2124int cam_req_mgr_core_device_deinit(void)
2125{
2126 if (!g_crm_core_dev) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002127 CAM_ERR(CAM_CRM, "NULL pointer");
Sagar Gore8d91a622017-02-23 14:57:18 -08002128 return -EINVAL;
2129 }
2130
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07002131 CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
Sagar Gore8d91a622017-02-23 14:57:18 -08002132 mutex_destroy(&g_crm_core_dev->crm_lock);
2133 kfree(g_crm_core_dev);
2134 g_crm_core_dev = NULL;
2135
2136 return 0;
2137}