blob: 2dc9a1f52f48a17153f47f8a95ece5a48b5bd5a1 [file] [log] [blame]
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
Sachin Bhayareeeb88892018-01-02 16:36:01 +053020#include <linux/uaccess.h>
21#include <linux/of.h>
22#include <linux/clk.h>
23#include <linux/msm-bus.h>
24#include <linux/msm-bus-board.h>
25#include <linux/regulator/consumer.h>
26
27#include "mdss_rotator_internal.h"
28#include "mdss_mdp.h"
29#include "mdss_debug.h"
Sachin Bhayare2b6d0042018-01-13 19:38:21 +053030#include "mdss_sync.h"
Sachin Bhayareeeb88892018-01-02 16:36:01 +053031
32/* waiting for hw time out, 3 vsync for 30fps*/
33#define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100
34
35/* acquire fence time out, following other driver fence time out practice */
36#define ROT_FENCE_WAIT_TIMEOUT MSEC_PER_SEC
37/*
38 * Max rotator hw blocks possible. Used for upper array limits instead of
39 * alloc and freeing small array
40 */
41#define ROT_MAX_HW_BLOCKS 2
42
43#define ROT_CHECK_BOUNDS(offset, size, max_size) \
44 (((size) > (max_size)) || ((offset) > ((max_size) - (size))))
45
46#define CLASS_NAME "rotator"
47#define DRIVER_NAME "mdss_rotator"
48
49#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \
50 { \
51 .src = MSM_BUS_MASTER_AMPSS_M0, \
52 .dst = MSM_BUS_SLAVE_DISPLAY_CFG, \
53 .ab = (ab_val), \
54 .ib = (ib_val), \
55 }
56
57#define BUS_VOTE_19_MHZ 153600000
58
59static struct msm_bus_vectors rot_reg_bus_vectors[] = {
60 MDP_REG_BUS_VECTOR_ENTRY(0, 0),
61 MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
62};
63static struct msm_bus_paths rot_reg_bus_usecases[ARRAY_SIZE(
64 rot_reg_bus_vectors)];
65static struct msm_bus_scale_pdata rot_reg_bus_scale_table = {
66 .usecase = rot_reg_bus_usecases,
67 .num_usecases = ARRAY_SIZE(rot_reg_bus_usecases),
68 .name = "mdss_rot_reg",
69 .active_only = 1,
70};
71
72static struct mdss_rot_mgr *rot_mgr;
73static void mdss_rotator_wq_handler(struct work_struct *work);
74
75static int mdss_rotator_bus_scale_set_quota(struct mdss_rot_bus_data_type *bus,
76 u64 quota)
77{
78 int new_uc_idx;
79 int ret;
80
81 if (bus->bus_hdl < 1) {
82 pr_err("invalid bus handle %d\n", bus->bus_hdl);
83 return -EINVAL;
84 }
85
86 if (bus->curr_quota_val == quota) {
87 pr_debug("bw request already requested\n");
88 return 0;
89 }
90
91 if (!quota) {
92 new_uc_idx = 0;
93 } else {
94 struct msm_bus_vectors *vect = NULL;
95 struct msm_bus_scale_pdata *bw_table =
96 bus->bus_scale_pdata;
97 u64 port_quota = quota;
98 u32 total_axi_port_cnt;
99 int i;
100
101 new_uc_idx = (bus->curr_bw_uc_idx %
102 (bw_table->num_usecases - 1)) + 1;
103
104 total_axi_port_cnt = bw_table->usecase[new_uc_idx].num_paths;
105 if (total_axi_port_cnt == 0) {
106 pr_err("Number of bw paths is 0\n");
107 return -ENODEV;
108 }
109 do_div(port_quota, total_axi_port_cnt);
110
111 for (i = 0; i < total_axi_port_cnt; i++) {
112 vect = &bw_table->usecase[new_uc_idx].vectors[i];
113 vect->ab = port_quota;
114 vect->ib = 0;
115 }
116 }
117 bus->curr_bw_uc_idx = new_uc_idx;
118 bus->curr_quota_val = quota;
119
120 pr_debug("uc_idx=%d quota=%llu\n", new_uc_idx, quota);
121 MDSS_XLOG(new_uc_idx, ((quota >> 32) & 0xFFFFFFFF),
122 (quota & 0xFFFFFFFF));
123 ATRACE_BEGIN("msm_bus_scale_req_rot");
124 ret = msm_bus_scale_client_update_request(bus->bus_hdl,
125 new_uc_idx);
126 ATRACE_END("msm_bus_scale_req_rot");
127 return ret;
128}
129
130static int mdss_rotator_enable_reg_bus(struct mdss_rot_mgr *mgr, u64 quota)
131{
132 int ret = 0, changed = 0;
133 u32 usecase_ndx = 0;
134
135 if (!mgr || !mgr->reg_bus.bus_hdl)
136 return 0;
137
138 if (quota)
139 usecase_ndx = 1;
140
141 if (usecase_ndx != mgr->reg_bus.curr_bw_uc_idx) {
142 mgr->reg_bus.curr_bw_uc_idx = usecase_ndx;
143 changed++;
144 }
145
146 pr_debug("%s, changed=%d register bus %s\n", __func__, changed,
147 quota ? "Enable":"Disable");
148
149 if (changed) {
150 ATRACE_BEGIN("msm_bus_scale_req_rot_reg");
151 ret = msm_bus_scale_client_update_request(mgr->reg_bus.bus_hdl,
152 usecase_ndx);
153 ATRACE_END("msm_bus_scale_req_rot_reg");
154 }
155
156 return ret;
157}
158
159/*
160 * Clock rate of all open sessions working a particular hw block
161 * are added together to get the required rate for that hw block.
162 * The max of each hw block becomes the final clock rate voted for
163 */
164static unsigned long mdss_rotator_clk_rate_calc(
165 struct mdss_rot_mgr *mgr,
166 struct mdss_rot_file_private *private)
167{
168 struct mdss_rot_perf *perf;
169 unsigned long clk_rate[ROT_MAX_HW_BLOCKS] = {0};
170 unsigned long total_clk_rate = 0;
171 int i, wb_idx;
172
173 mutex_lock(&private->perf_lock);
174 list_for_each_entry(perf, &private->perf_list, list) {
175 bool rate_accounted_for = false;
176
177 mutex_lock(&perf->work_dis_lock);
178 /*
179 * If there is one session that has two work items across
180 * different hw blocks rate is accounted for in both blocks.
181 */
182 for (i = 0; i < mgr->queue_count; i++) {
183 if (perf->work_distribution[i]) {
184 clk_rate[i] += perf->clk_rate;
185 rate_accounted_for = true;
186 }
187 }
188
189 /*
190 * Sessions that are open but not distributed on any hw block
191 * Still need to be accounted for. Rate is added to last known
192 * wb idx.
193 */
194 wb_idx = perf->last_wb_idx;
195 if ((!rate_accounted_for) && (wb_idx >= 0) &&
196 (wb_idx < mgr->queue_count))
197 clk_rate[wb_idx] += perf->clk_rate;
198 mutex_unlock(&perf->work_dis_lock);
199 }
200 mutex_unlock(&private->perf_lock);
201
202 for (i = 0; i < mgr->queue_count; i++)
203 total_clk_rate = max(clk_rate[i], total_clk_rate);
204
205 pr_debug("Total clk rate calc=%lu\n", total_clk_rate);
206 return total_clk_rate;
207}
208
209static struct clk *mdss_rotator_get_clk(struct mdss_rot_mgr *mgr, u32 clk_idx)
210{
211 if (clk_idx >= MDSS_CLK_ROTATOR_END_IDX) {
212 pr_err("Invalid clk index:%u", clk_idx);
213 return NULL;
214 }
215
216 return mgr->rot_clk[clk_idx];
217}
218
219static void mdss_rotator_set_clk_rate(struct mdss_rot_mgr *mgr,
220 unsigned long rate, u32 clk_idx)
221{
222 unsigned long clk_rate;
223 struct clk *clk = mdss_rotator_get_clk(mgr, clk_idx);
224 int ret;
225
226 if (clk) {
227 mutex_lock(&mgr->clk_lock);
228 clk_rate = clk_round_rate(clk, rate);
229 if (IS_ERR_VALUE(clk_rate)) {
230 pr_err("unable to round rate err=%ld\n", clk_rate);
231 } else if (clk_rate != clk_get_rate(clk)) {
232 ret = clk_set_rate(clk, clk_rate);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530233 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530234 pr_err("clk_set_rate failed, err:%d\n", ret);
235 } else {
236 pr_debug("rotator clk rate=%lu\n", clk_rate);
237 MDSS_XLOG(clk_rate);
238 }
239 }
240 mutex_unlock(&mgr->clk_lock);
241 } else {
242 pr_err("rotator clk not setup properly\n");
243 }
244}
245
246static void mdss_rotator_footswitch_ctrl(struct mdss_rot_mgr *mgr, bool on)
247{
248 int ret;
249
250 if (mgr->regulator_enable == on) {
251 pr_err("Regulators already in selected mode on=%d\n", on);
252 return;
253 }
254
255 pr_debug("%s: rotator regulators", on ? "Enable" : "Disable");
Sachin Bhayare5076e252018-01-18 14:56:45 +0530256 ret = msm_mdss_enable_vreg(mgr->module_power.vreg_config,
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530257 mgr->module_power.num_vreg, on);
258 if (ret) {
259 pr_warn("Rotator regulator failed to %s\n",
260 on ? "enable" : "disable");
261 return;
262 }
263
264 mgr->regulator_enable = on;
265}
266
267static int mdss_rotator_clk_ctrl(struct mdss_rot_mgr *mgr, int enable)
268{
269 struct clk *clk;
270 int ret = 0;
271 int i, changed = 0;
272
273 mutex_lock(&mgr->clk_lock);
274 if (enable) {
275 if (mgr->rot_enable_clk_cnt == 0)
276 changed++;
277 mgr->rot_enable_clk_cnt++;
278 } else {
279 if (mgr->rot_enable_clk_cnt) {
280 mgr->rot_enable_clk_cnt--;
281 if (mgr->rot_enable_clk_cnt == 0)
282 changed++;
283 } else {
284 pr_err("Can not be turned off\n");
285 }
286 }
287
288 if (changed) {
289 pr_debug("Rotator clk %s\n", enable ? "enable" : "disable");
290 for (i = 0; i < MDSS_CLK_ROTATOR_END_IDX; i++) {
291 clk = mgr->rot_clk[i];
292 if (enable) {
293 ret = clk_prepare_enable(clk);
294 if (ret) {
295 pr_err("enable failed clk_idx %d\n", i);
296 goto error;
297 }
298 } else {
299 clk_disable_unprepare(clk);
300 }
301 }
302 mutex_lock(&mgr->bus_lock);
303 if (enable) {
304 /* Active+Sleep */
305 msm_bus_scale_client_update_context(
306 mgr->data_bus.bus_hdl, false,
307 mgr->data_bus.curr_bw_uc_idx);
308 trace_rotator_bw_ao_as_context(0);
309 } else {
310 /* Active Only */
311 msm_bus_scale_client_update_context(
312 mgr->data_bus.bus_hdl, true,
313 mgr->data_bus.curr_bw_uc_idx);
314 trace_rotator_bw_ao_as_context(1);
315 }
316 mutex_unlock(&mgr->bus_lock);
317 }
318 mutex_unlock(&mgr->clk_lock);
319
320 return ret;
321error:
322 for (i--; i >= 0; i--)
323 clk_disable_unprepare(mgr->rot_clk[i]);
324 mutex_unlock(&mgr->clk_lock);
325 return ret;
326}
327
328int mdss_rotator_resource_ctrl(struct mdss_rot_mgr *mgr, int enable)
329{
330 int changed = 0;
331 int ret = 0;
332
333 mutex_lock(&mgr->clk_lock);
334 if (enable) {
335 if (mgr->res_ref_cnt == 0)
336 changed++;
337 mgr->res_ref_cnt++;
338 } else {
339 if (mgr->res_ref_cnt) {
340 mgr->res_ref_cnt--;
341 if (mgr->res_ref_cnt == 0)
342 changed++;
343 } else {
344 pr_err("Rot resource already off\n");
345 }
346 }
347
348 pr_debug("%s: res_cnt=%d changed=%d enable=%d\n",
349 __func__, mgr->res_ref_cnt, changed, enable);
350 MDSS_XLOG(mgr->res_ref_cnt, changed, enable);
351
352 if (changed) {
353 if (enable)
354 mdss_rotator_footswitch_ctrl(mgr, true);
355 else
356 mdss_rotator_footswitch_ctrl(mgr, false);
357 }
358 mutex_unlock(&mgr->clk_lock);
359 return ret;
360}
361
362/* caller is expected to hold perf->work_dis_lock lock */
363static bool mdss_rotator_is_work_pending(struct mdss_rot_mgr *mgr,
364 struct mdss_rot_perf *perf)
365{
366 int i;
367
368 for (i = 0; i < mgr->queue_count; i++) {
369 if (perf->work_distribution[i]) {
370 pr_debug("Work is still scheduled to complete\n");
371 return true;
372 }
373 }
374 return false;
375}
376
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530377static int mdss_rotator_create_fence(struct mdss_rot_entry *entry)
378{
379 int ret = 0, fd;
380 u32 val;
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530381 struct mdss_fence *fence;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530382 struct mdss_rot_timeline *rot_timeline;
383
384 if (!entry->queue)
385 return -EINVAL;
386
387 rot_timeline = &entry->queue->timeline;
388
389 mutex_lock(&rot_timeline->lock);
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530390 val = 1;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530391
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530392 fence = mdss_get_sync_fence(rot_timeline->timeline,
393 rot_timeline->fence_name, NULL, val);
394 if (fence == NULL) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530395 pr_err("cannot create sync point\n");
396 goto sync_pt_create_err;
397 }
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530398 fd = mdss_get_sync_fence_fd(fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530399 if (fd < 0) {
400 pr_err("get_unused_fd_flags failed error:0x%x\n", fd);
401 ret = fd;
402 goto get_fd_err;
403 }
404
405 rot_timeline->next_value++;
406 mutex_unlock(&rot_timeline->lock);
407
408 entry->output_fence_fd = fd;
409 entry->output_fence = fence;
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530410 pr_debug("output sync point created at %s:val=%u\n",
411 mdss_get_sync_fence_name(fence), val);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530412
413 return 0;
414
415get_fd_err:
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530416 mdss_put_sync_fence(fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530417sync_pt_create_err:
418 mutex_unlock(&rot_timeline->lock);
419 return ret;
420}
421
422static void mdss_rotator_clear_fence(struct mdss_rot_entry *entry)
423{
424 struct mdss_rot_timeline *rot_timeline;
425
426 if (entry->input_fence) {
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530427 mdss_put_sync_fence(entry->input_fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530428 entry->input_fence = NULL;
429 }
430
431 rot_timeline = &entry->queue->timeline;
432
433 /* fence failed to copy to user space */
434 if (entry->output_fence) {
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530435 mdss_put_sync_fence(entry->output_fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530436 entry->output_fence = NULL;
437 put_unused_fd(entry->output_fence_fd);
438
439 mutex_lock(&rot_timeline->lock);
440 rot_timeline->next_value--;
441 mutex_unlock(&rot_timeline->lock);
442 }
443}
444
445static int mdss_rotator_signal_output(struct mdss_rot_entry *entry)
446{
447 struct mdss_rot_timeline *rot_timeline;
448
449 if (!entry->queue)
450 return -EINVAL;
451
452 rot_timeline = &entry->queue->timeline;
453
454 if (entry->output_signaled) {
455 pr_debug("output already signaled\n");
456 return 0;
457 }
458
459 mutex_lock(&rot_timeline->lock);
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530460 mdss_inc_timeline(rot_timeline->timeline, 1);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530461 mutex_unlock(&rot_timeline->lock);
462
463 entry->output_signaled = true;
464
465 return 0;
466}
467
468static int mdss_rotator_wait_for_input(struct mdss_rot_entry *entry)
469{
470 int ret;
471
472 if (!entry->input_fence) {
473 pr_debug("invalid input fence, no wait\n");
474 return 0;
475 }
476
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530477 ret = mdss_wait_sync_fence(entry->input_fence, ROT_FENCE_WAIT_TIMEOUT);
478 mdss_put_sync_fence(entry->input_fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530479 entry->input_fence = NULL;
480 return ret;
481}
482
483static int mdss_rotator_import_buffer(struct mdp_layer_buffer *buffer,
484 struct mdss_mdp_data *data, u32 flags, struct device *dev, bool input)
485{
486 int i, ret = 0;
487 struct msmfb_data planes[MAX_PLANES];
488 int dir = DMA_TO_DEVICE;
489
490 if (!input)
491 dir = DMA_FROM_DEVICE;
492
493 memset(planes, 0, sizeof(planes));
494
495 if (buffer->plane_count > MAX_PLANES) {
496 pr_err("buffer plane_count exceeds MAX_PLANES limit:%d\n",
497 buffer->plane_count);
498 return -EINVAL;
499 }
500
501 for (i = 0; i < buffer->plane_count; i++) {
502 planes[i].memory_id = buffer->planes[i].fd;
503 planes[i].offset = buffer->planes[i].offset;
504 }
505
506 ret = mdss_mdp_data_get_and_validate_size(data, planes,
507 buffer->plane_count, flags, dev, true, dir, buffer);
508 data->state = MDP_BUF_STATE_READY;
509 data->last_alloc = local_clock();
510
511 return ret;
512}
513
514static int mdss_rotator_map_and_check_data(struct mdss_rot_entry *entry)
515{
516 int ret;
517 struct mdp_layer_buffer *input;
518 struct mdp_layer_buffer *output;
519 struct mdss_mdp_format_params *fmt;
520 struct mdss_mdp_plane_sizes ps;
521 bool rotation;
522
523 input = &entry->item.input;
524 output = &entry->item.output;
525
526 rotation = (entry->item.flags & MDP_ROTATION_90) ? true : false;
527
528 ATRACE_BEGIN(__func__);
529 ret = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530530 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530531 ATRACE_END(__func__);
532 return ret;
533 }
534
535 /* if error during map, the caller will release the data */
536 entry->src_buf.state = MDP_BUF_STATE_ACTIVE;
537 ret = mdss_mdp_data_map(&entry->src_buf, true, DMA_TO_DEVICE);
538 if (ret) {
539 pr_err("source buffer mapping failed ret:%d\n", ret);
540 goto end;
541 }
542
543 entry->dst_buf.state = MDP_BUF_STATE_ACTIVE;
544 ret = mdss_mdp_data_map(&entry->dst_buf, true, DMA_FROM_DEVICE);
545 if (ret) {
546 pr_err("destination buffer mapping failed ret:%d\n", ret);
547 goto end;
548 }
549
550 fmt = mdss_mdp_get_format_params(input->format);
551 if (!fmt) {
552 pr_err("invalid input format:%d\n", input->format);
553 ret = -EINVAL;
554 goto end;
555 }
556
557 ret = mdss_mdp_get_plane_sizes(
558 fmt, input->width, input->height, &ps, 0, rotation);
559 if (ret) {
560 pr_err("fail to get input plane size ret=%d\n", ret);
561 goto end;
562 }
563
564 ret = mdss_mdp_data_check(&entry->src_buf, &ps, fmt);
565 if (ret) {
566 pr_err("fail to check input data ret=%d\n", ret);
567 goto end;
568 }
569
570 fmt = mdss_mdp_get_format_params(output->format);
571 if (!fmt) {
572 pr_err("invalid output format:%d\n", output->format);
573 ret = -EINVAL;
574 goto end;
575 }
576
577 ret = mdss_mdp_get_plane_sizes(
578 fmt, output->width, output->height, &ps, 0, rotation);
579 if (ret) {
580 pr_err("fail to get output plane size ret=%d\n", ret);
581 goto end;
582 }
583
584 ret = mdss_mdp_data_check(&entry->dst_buf, &ps, fmt);
585 if (ret) {
586 pr_err("fail to check output data ret=%d\n", ret);
587 goto end;
588 }
589
590end:
591 mdss_iommu_ctrl(0);
592 ATRACE_END(__func__);
593
594 return ret;
595}
596
597static struct mdss_rot_perf *__mdss_rotator_find_session(
598 struct mdss_rot_file_private *private,
599 u32 session_id)
600{
601 struct mdss_rot_perf *perf, *perf_next;
602 bool found = false;
603
604 list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) {
605 if (perf->config.session_id == session_id) {
606 found = true;
607 break;
608 }
609 }
610 if (!found)
611 perf = NULL;
612 return perf;
613}
614
615static struct mdss_rot_perf *mdss_rotator_find_session(
616 struct mdss_rot_file_private *private,
617 u32 session_id)
618{
619 struct mdss_rot_perf *perf;
620
621 mutex_lock(&private->perf_lock);
622 perf = __mdss_rotator_find_session(private, session_id);
623 mutex_unlock(&private->perf_lock);
624 return perf;
625}
626
627static void mdss_rotator_release_data(struct mdss_rot_entry *entry)
628{
629 struct mdss_mdp_data *src_buf = &entry->src_buf;
630 struct mdss_mdp_data *dst_buf = &entry->dst_buf;
631
632 mdss_mdp_data_free(src_buf, true, DMA_TO_DEVICE);
633 src_buf->last_freed = local_clock();
634 src_buf->state = MDP_BUF_STATE_UNUSED;
635
636 mdss_mdp_data_free(dst_buf, true, DMA_FROM_DEVICE);
637 dst_buf->last_freed = local_clock();
638 dst_buf->state = MDP_BUF_STATE_UNUSED;
639}
640
641static int mdss_rotator_import_data(struct mdss_rot_mgr *mgr,
642 struct mdss_rot_entry *entry)
643{
644 int ret;
645 struct mdp_layer_buffer *input;
646 struct mdp_layer_buffer *output;
647 u32 flag = 0;
648
649 input = &entry->item.input;
650 output = &entry->item.output;
651
652 if (entry->item.flags & MDP_ROTATION_SECURE)
653 flag = MDP_SECURE_OVERLAY_SESSION;
654
655 ret = mdss_rotator_import_buffer(input, &entry->src_buf, flag,
656 &mgr->pdev->dev, true);
657 if (ret) {
658 pr_err("fail to import input buffer\n");
659 return ret;
660 }
661
662 /*
663 * driver assumes output buffer is ready to be written
664 * immediately
665 */
666 ret = mdss_rotator_import_buffer(output, &entry->dst_buf, flag,
667 &mgr->pdev->dev, false);
668 if (ret) {
669 pr_err("fail to import output buffer\n");
670 return ret;
671 }
672
673 return ret;
674}
675
676static struct mdss_rot_hw_resource *mdss_rotator_hw_alloc(
677 struct mdss_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
678{
679 struct mdss_rot_hw_resource *hw;
680 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
681 u32 pipe_ndx, offset = mdss_mdp_get_wb_ctl_support(mdata, true);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530682 int ret = 0;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530683
684 hw = devm_kzalloc(&mgr->pdev->dev, sizeof(struct mdss_rot_hw_resource),
685 GFP_KERNEL);
686 if (!hw)
687 return ERR_PTR(-ENOMEM);
688
689 hw->ctl = mdss_mdp_ctl_alloc(mdata, offset);
690 if (IS_ERR_OR_NULL(hw->ctl)) {
691 pr_err("unable to allocate ctl\n");
692 ret = -ENODEV;
693 goto error;
694 }
695
696 if (wb_id == MDSS_ROTATION_HW_ANY)
697 hw->wb = mdss_mdp_wb_alloc(MDSS_MDP_WB_ROTATOR, hw->ctl->num);
698 else
699 hw->wb = mdss_mdp_wb_assign(wb_id, hw->ctl->num);
700
701 if (IS_ERR_OR_NULL(hw->wb)) {
702 pr_err("unable to allocate wb\n");
703 ret = -ENODEV;
704 goto error;
705 }
706 hw->ctl->wb = hw->wb;
707 hw->mixer = mdss_mdp_mixer_assign(hw->wb->num, true, true);
708
709 if (IS_ERR_OR_NULL(hw->mixer)) {
710 pr_err("unable to allocate wb mixer\n");
711 ret = -ENODEV;
712 goto error;
713 }
714 hw->ctl->mixer_left = hw->mixer;
715 hw->mixer->ctl = hw->ctl;
716
717 hw->mixer->rotator_mode = true;
718
719 switch (hw->mixer->num) {
720 case MDSS_MDP_WB_LAYERMIXER0:
721 hw->ctl->opmode = MDSS_MDP_CTL_OP_ROT0_MODE;
722 break;
723 case MDSS_MDP_WB_LAYERMIXER1:
724 hw->ctl->opmode = MDSS_MDP_CTL_OP_ROT1_MODE;
725 break;
726 default:
727 pr_err("invalid layer mixer=%d\n", hw->mixer->num);
728 ret = -EINVAL;
729 goto error;
730 }
731
732 hw->ctl->ops.start_fnc = mdss_mdp_writeback_start;
733 hw->ctl->power_state = MDSS_PANEL_POWER_ON;
734 hw->ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_BLOCK;
735
736
737 if (hw->ctl->ops.start_fnc)
738 ret = hw->ctl->ops.start_fnc(hw->ctl);
739
740 if (ret)
741 goto error;
742
743 if (pipe_id >= mdata->ndma_pipes)
744 goto error;
745
746 pipe_ndx = mdata->dma_pipes[pipe_id].ndx;
747 hw->pipe = mdss_mdp_pipe_assign(mdata, hw->mixer,
748 pipe_ndx, MDSS_MDP_PIPE_RECT0);
749 if (IS_ERR_OR_NULL(hw->pipe)) {
750 pr_err("dma pipe allocation failed\n");
751 ret = -ENODEV;
752 goto error;
753 }
754
755 hw->pipe->mixer_left = hw->mixer;
756 hw->pipe_id = hw->wb->num;
757 hw->wb_id = hw->wb->num;
758
759 return hw;
760error:
761 if (!IS_ERR_OR_NULL(hw->pipe))
762 mdss_mdp_pipe_destroy(hw->pipe);
763 if (!IS_ERR_OR_NULL(hw->ctl)) {
764 if (hw->ctl->ops.stop_fnc)
765 hw->ctl->ops.stop_fnc(hw->ctl, MDSS_PANEL_POWER_OFF);
766 mdss_mdp_ctl_free(hw->ctl);
767 }
768 devm_kfree(&mgr->pdev->dev, hw);
769
770 return ERR_PTR(ret);
771}
772
773static void mdss_rotator_free_hw(struct mdss_rot_mgr *mgr,
774 struct mdss_rot_hw_resource *hw)
775{
776 struct mdss_mdp_mixer *mixer;
777 struct mdss_mdp_ctl *ctl;
778
779 mixer = hw->pipe->mixer_left;
780
781 mdss_mdp_pipe_destroy(hw->pipe);
782
783 ctl = mdss_mdp_ctl_mixer_switch(mixer->ctl,
784 MDSS_MDP_WB_CTL_TYPE_BLOCK);
785 if (ctl) {
786 if (ctl->ops.stop_fnc)
787 ctl->ops.stop_fnc(ctl, MDSS_PANEL_POWER_OFF);
788 mdss_mdp_ctl_free(ctl);
789 }
790
791 devm_kfree(&mgr->pdev->dev, hw);
792}
793
794struct mdss_rot_hw_resource *mdss_rotator_get_hw_resource(
795 struct mdss_rot_queue *queue, struct mdss_rot_entry *entry)
796{
797 struct mdss_rot_hw_resource *hw = queue->hw;
798
799 if (!hw) {
800 pr_err("no hw in the queue\n");
801 return NULL;
802 }
803
804 mutex_lock(&queue->hw_lock);
805
806 if (hw->workload) {
807 hw = ERR_PTR(-EBUSY);
808 goto get_hw_resource_err;
809 }
810 hw->workload = entry;
811
812get_hw_resource_err:
813 mutex_unlock(&queue->hw_lock);
814 return hw;
815}
816
817static void mdss_rotator_put_hw_resource(struct mdss_rot_queue *queue,
818 struct mdss_rot_hw_resource *hw)
819{
820 mutex_lock(&queue->hw_lock);
821 hw->workload = NULL;
822 mutex_unlock(&queue->hw_lock);
823}
824
825/*
826 * caller will need to call mdss_rotator_deinit_queue when
827 * the function returns error
828 */
829static int mdss_rotator_init_queue(struct mdss_rot_mgr *mgr)
830{
831 int i, size, ret = 0;
832 char name[32];
833
834 size = sizeof(struct mdss_rot_queue) * mgr->queue_count;
835 mgr->queues = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
836 if (!mgr->queues)
837 return -ENOMEM;
838
839 for (i = 0; i < mgr->queue_count; i++) {
840 snprintf(name, sizeof(name), "rot_workq_%d", i);
841 pr_debug("work queue name=%s\n", name);
842 mgr->queues[i].rot_work_queue = alloc_ordered_workqueue("%s",
843 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, name);
844 if (!mgr->queues[i].rot_work_queue) {
845 ret = -EPERM;
846 break;
847 }
848
849 snprintf(name, sizeof(name), "rot_timeline_%d", i);
850 pr_debug("timeline name=%s\n", name);
851 mgr->queues[i].timeline.timeline =
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530852 mdss_create_timeline(name);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530853 if (!mgr->queues[i].timeline.timeline) {
854 ret = -EPERM;
855 break;
856 }
857
858 size = sizeof(mgr->queues[i].timeline.fence_name);
859 snprintf(mgr->queues[i].timeline.fence_name, size,
860 "rot_fence_%d", i);
861 mutex_init(&mgr->queues[i].timeline.lock);
862
863 mutex_init(&mgr->queues[i].hw_lock);
864 }
865
866 return ret;
867}
868
869static void mdss_rotator_deinit_queue(struct mdss_rot_mgr *mgr)
870{
871 int i;
872
873 if (!mgr->queues)
874 return;
875
876 for (i = 0; i < mgr->queue_count; i++) {
877 if (mgr->queues[i].rot_work_queue)
878 destroy_workqueue(mgr->queues[i].rot_work_queue);
879
880 if (mgr->queues[i].timeline.timeline) {
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530881 struct mdss_timeline *obj;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530882
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530883 obj = (struct mdss_timeline *)
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530884 mgr->queues[i].timeline.timeline;
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530885 mdss_destroy_timeline(obj);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530886 }
887 }
888 devm_kfree(&mgr->pdev->dev, mgr->queues);
889 mgr->queue_count = 0;
890}
891
892/*
893 * mdss_rotator_assign_queue() - Function assign rotation work onto hw
894 * @mgr: Rotator manager.
895 * @entry: Contains details on rotator work item being requested
896 * @private: Private struct used for access rot session performance struct
897 *
898 * This Function allocates hw required to complete rotation work item
899 * requested.
900 *
901 * Caller is responsible for calling cleanup function if error is returned
902 */
903static int mdss_rotator_assign_queue(struct mdss_rot_mgr *mgr,
904 struct mdss_rot_entry *entry,
905 struct mdss_rot_file_private *private)
906{
907 struct mdss_rot_perf *perf;
908 struct mdss_rot_queue *queue;
909 struct mdss_rot_hw_resource *hw;
910 struct mdp_rotation_item *item = &entry->item;
911 u32 wb_idx = item->wb_idx;
912 u32 pipe_idx = item->pipe_idx;
913 int ret = 0;
914
915 /*
916 * todo: instead of always assign writeback block 0, we can
917 * apply some load balancing logic in the future
918 */
919 if (wb_idx == MDSS_ROTATION_HW_ANY) {
920 wb_idx = 0;
921 pipe_idx = 0;
922 }
923
924 if (wb_idx >= mgr->queue_count) {
925 pr_err("Invalid wb idx = %d\n", wb_idx);
926 return -EINVAL;
927 }
928
929 queue = mgr->queues + wb_idx;
930
931 mutex_lock(&queue->hw_lock);
932
933 if (!queue->hw) {
934 hw = mdss_rotator_hw_alloc(mgr, pipe_idx, wb_idx);
935 if (IS_ERR_OR_NULL(hw)) {
936 pr_err("fail to allocate hw\n");
937 ret = PTR_ERR(hw);
938 } else {
939 queue->hw = hw;
940 }
941 }
942
943 if (queue->hw) {
944 entry->queue = queue;
945 queue->hw->pending_count++;
946 }
947
948 mutex_unlock(&queue->hw_lock);
949
950 perf = mdss_rotator_find_session(private, item->session_id);
951 if (!perf) {
952 pr_err("Could not find session based on rotation work item\n");
953 return -EINVAL;
954 }
955
956 entry->perf = perf;
957 perf->last_wb_idx = wb_idx;
958
959 return ret;
960}
961
962static void mdss_rotator_unassign_queue(struct mdss_rot_mgr *mgr,
963 struct mdss_rot_entry *entry)
964{
965 struct mdss_rot_queue *queue = entry->queue;
966
967 if (!queue)
968 return;
969
970 entry->queue = NULL;
971
972 mutex_lock(&queue->hw_lock);
973
974 if (!queue->hw) {
975 pr_err("entry assigned a queue with no hw\n");
976 mutex_unlock(&queue->hw_lock);
977 return;
978 }
979
980 queue->hw->pending_count--;
981 if (queue->hw->pending_count == 0) {
982 mdss_rotator_free_hw(mgr, queue->hw);
983 queue->hw = NULL;
984 }
985
986 mutex_unlock(&queue->hw_lock);
987}
988
989static void mdss_rotator_queue_request(struct mdss_rot_mgr *mgr,
990 struct mdss_rot_file_private *private,
991 struct mdss_rot_entry_container *req)
992{
993 struct mdss_rot_entry *entry;
994 struct mdss_rot_queue *queue;
995 unsigned long clk_rate;
996 u32 wb_idx;
997 int i;
998
999 for (i = 0; i < req->count; i++) {
1000 entry = req->entries + i;
1001 queue = entry->queue;
1002 wb_idx = queue->hw->wb_id;
1003 mutex_lock(&entry->perf->work_dis_lock);
1004 entry->perf->work_distribution[wb_idx]++;
1005 mutex_unlock(&entry->perf->work_dis_lock);
1006 entry->work_assigned = true;
1007 }
1008
1009 clk_rate = mdss_rotator_clk_rate_calc(mgr, private);
1010 mdss_rotator_set_clk_rate(mgr, clk_rate, MDSS_CLK_ROTATOR_CORE);
1011
1012 for (i = 0; i < req->count; i++) {
1013 entry = req->entries + i;
1014 queue = entry->queue;
1015 entry->output_fence = NULL;
1016 queue_work(queue->rot_work_queue, &entry->commit_work);
1017 }
1018}
1019
1020static int mdss_rotator_calc_perf(struct mdss_rot_perf *perf)
1021{
1022 struct mdp_rotation_config *config = &perf->config;
1023 u32 read_bw, write_bw;
1024 struct mdss_mdp_format_params *in_fmt, *out_fmt;
1025
1026 in_fmt = mdss_mdp_get_format_params(config->input.format);
1027 if (!in_fmt) {
1028 pr_err("invalid input format\n");
1029 return -EINVAL;
1030 }
1031 out_fmt = mdss_mdp_get_format_params(config->output.format);
1032 if (!out_fmt) {
1033 pr_err("invalid output format\n");
1034 return -EINVAL;
1035 }
1036 if (!config->input.width ||
1037 (0xffffffff/config->input.width < config->input.height))
1038 return -EINVAL;
1039
1040 perf->clk_rate = config->input.width * config->input.height;
1041
1042 if (!perf->clk_rate ||
1043 (0xffffffff/perf->clk_rate < config->frame_rate))
1044 return -EINVAL;
1045
1046 perf->clk_rate *= config->frame_rate;
1047 /* rotator processes 4 pixels per clock */
1048 perf->clk_rate /= 4;
1049
1050 read_bw = config->input.width * config->input.height *
1051 config->frame_rate;
1052 if (in_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
1053 read_bw = (read_bw * 3) / 2;
1054 else
1055 read_bw *= in_fmt->bpp;
1056
1057 write_bw = config->output.width * config->output.height *
1058 config->frame_rate;
1059 if (out_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
1060 write_bw = (write_bw * 3) / 2;
1061 else
1062 write_bw *= out_fmt->bpp;
1063
1064 read_bw = apply_comp_ratio_factor(read_bw, in_fmt,
1065 &config->input.comp_ratio);
1066 write_bw = apply_comp_ratio_factor(write_bw, out_fmt,
1067 &config->output.comp_ratio);
1068
1069 perf->bw = read_bw + write_bw;
1070 return 0;
1071}
1072
1073static int mdss_rotator_update_perf(struct mdss_rot_mgr *mgr)
1074{
1075 struct mdss_rot_file_private *priv;
1076 struct mdss_rot_perf *perf;
1077 int not_in_suspend_mode;
1078 u64 total_bw = 0;
1079
1080 ATRACE_BEGIN(__func__);
1081
1082 not_in_suspend_mode = !atomic_read(&mgr->device_suspended);
1083
1084 if (not_in_suspend_mode) {
1085 mutex_lock(&mgr->file_lock);
1086 list_for_each_entry(priv, &mgr->file_list, list) {
1087 mutex_lock(&priv->perf_lock);
1088 list_for_each_entry(perf, &priv->perf_list, list) {
1089 total_bw += perf->bw;
1090 }
1091 mutex_unlock(&priv->perf_lock);
1092 }
1093 mutex_unlock(&mgr->file_lock);
1094 }
1095
1096 mutex_lock(&mgr->bus_lock);
1097 total_bw += mgr->pending_close_bw_vote;
1098 mdss_rotator_enable_reg_bus(mgr, total_bw);
1099 mdss_rotator_bus_scale_set_quota(&mgr->data_bus, total_bw);
1100 mutex_unlock(&mgr->bus_lock);
1101
1102 ATRACE_END(__func__);
1103 return 0;
1104}
1105
1106static void mdss_rotator_release_from_work_distribution(
1107 struct mdss_rot_mgr *mgr,
1108 struct mdss_rot_entry *entry)
1109{
1110 if (entry->work_assigned) {
1111 bool free_perf = false;
1112 u32 wb_idx = entry->queue->hw->wb_id;
1113
1114 mutex_lock(&mgr->lock);
1115 mutex_lock(&entry->perf->work_dis_lock);
1116 if (entry->perf->work_distribution[wb_idx])
1117 entry->perf->work_distribution[wb_idx]--;
1118
1119 if (!entry->perf->work_distribution[wb_idx]
1120 && list_empty(&entry->perf->list)) {
1121 /* close session has offloaded perf free to us */
1122 free_perf = true;
1123 }
1124 mutex_unlock(&entry->perf->work_dis_lock);
1125 entry->work_assigned = false;
1126 if (free_perf) {
1127 mutex_lock(&mgr->bus_lock);
1128 mgr->pending_close_bw_vote -= entry->perf->bw;
1129 mutex_unlock(&mgr->bus_lock);
1130 mdss_rotator_resource_ctrl(mgr, false);
1131 devm_kfree(&mgr->pdev->dev,
1132 entry->perf->work_distribution);
1133 devm_kfree(&mgr->pdev->dev, entry->perf);
1134 mdss_rotator_update_perf(mgr);
1135 mdss_rotator_clk_ctrl(mgr, false);
1136 entry->perf = NULL;
1137 }
1138 mutex_unlock(&mgr->lock);
1139 }
1140}
1141
1142static void mdss_rotator_release_entry(struct mdss_rot_mgr *mgr,
1143 struct mdss_rot_entry *entry)
1144{
1145 mdss_rotator_release_from_work_distribution(mgr, entry);
1146 mdss_rotator_clear_fence(entry);
1147 mdss_rotator_release_data(entry);
1148 mdss_rotator_unassign_queue(mgr, entry);
1149}
1150
1151static int mdss_rotator_config_dnsc_factor(struct mdss_rot_mgr *mgr,
1152 struct mdss_rot_entry *entry)
1153{
1154 int ret = 0;
1155 u16 src_w, src_h, dst_w, dst_h, bit;
1156 struct mdp_rotation_item *item = &entry->item;
1157 struct mdss_mdp_format_params *fmt;
1158
1159 src_w = item->src_rect.w;
1160 src_h = item->src_rect.h;
1161
1162 if (item->flags & MDP_ROTATION_90) {
1163 dst_w = item->dst_rect.h;
1164 dst_h = item->dst_rect.w;
1165 } else {
1166 dst_w = item->dst_rect.w;
1167 dst_h = item->dst_rect.h;
1168 }
1169
1170 if (!mgr->has_downscale &&
1171 (src_w != dst_w || src_h != dst_h)) {
1172 pr_err("rotator downscale not supported\n");
1173 ret = -EINVAL;
1174 goto dnsc_err;
1175 }
1176
1177 entry->dnsc_factor_w = 0;
1178 entry->dnsc_factor_h = 0;
1179
1180 if ((src_w != dst_w) || (src_h != dst_h)) {
1181 if ((src_w % dst_w) || (src_h % dst_h)) {
1182 ret = -EINVAL;
1183 goto dnsc_err;
1184 }
1185 entry->dnsc_factor_w = src_w / dst_w;
1186 bit = fls(entry->dnsc_factor_w);
1187 /*
1188 * New Chipsets supports downscale upto 1/64
1189 * change the Bit check from 5 to 7 to support 1/64 down scale
1190 */
1191 if ((entry->dnsc_factor_w & ~BIT(bit - 1)) || (bit > 7)) {
1192 ret = -EINVAL;
1193 goto dnsc_err;
1194 }
1195 entry->dnsc_factor_h = src_h / dst_h;
1196 bit = fls(entry->dnsc_factor_h);
1197 if ((entry->dnsc_factor_h & ~BIT(bit - 1)) || (bit > 7)) {
1198 ret = -EINVAL;
1199 goto dnsc_err;
1200 }
1201 }
1202
1203 fmt = mdss_mdp_get_format_params(item->output.format);
1204 if (mdss_mdp_is_ubwc_format(fmt) &&
1205 (entry->dnsc_factor_h || entry->dnsc_factor_w)) {
1206 pr_err("ubwc not supported with downscale %d\n",
1207 item->output.format);
1208 ret = -EINVAL;
1209 }
1210
1211dnsc_err:
1212
1213 /* Downscaler does not support asymmetrical dnsc */
1214 if (entry->dnsc_factor_w != entry->dnsc_factor_h)
1215 ret = -EINVAL;
1216
1217 if (ret) {
1218 pr_err("Invalid rotator downscale ratio %dx%d->%dx%d\n",
1219 src_w, src_h, dst_w, dst_h);
1220 entry->dnsc_factor_w = 0;
1221 entry->dnsc_factor_h = 0;
1222 }
1223 return ret;
1224}
1225
1226static bool mdss_rotator_verify_format(struct mdss_rot_mgr *mgr,
1227 struct mdss_mdp_format_params *in_fmt,
1228 struct mdss_mdp_format_params *out_fmt, bool rotation)
1229{
1230 u8 in_v_subsample, in_h_subsample;
1231 u8 out_v_subsample, out_h_subsample;
1232
1233 if (!mgr->has_ubwc && (mdss_mdp_is_ubwc_format(in_fmt) ||
1234 mdss_mdp_is_ubwc_format(out_fmt))) {
1235 pr_err("Rotator doesn't allow ubwc\n");
1236 return -EINVAL;
1237 }
1238
1239 if (!(out_fmt->flag & VALID_ROT_WB_FORMAT)) {
1240 pr_err("Invalid output format\n");
1241 return false;
1242 }
1243
1244 if (in_fmt->is_yuv != out_fmt->is_yuv) {
1245 pr_err("Rotator does not support CSC\n");
1246 return false;
1247 }
1248
1249 /* Forcing same pixel depth */
1250 if (memcmp(in_fmt->bits, out_fmt->bits, sizeof(in_fmt->bits))) {
1251 /* Exception is that RGB can drop alpha or add X */
1252 if (in_fmt->is_yuv || out_fmt->alpha_enable ||
1253 (in_fmt->bits[C2_R_Cr] != out_fmt->bits[C2_R_Cr]) ||
1254 (in_fmt->bits[C0_G_Y] != out_fmt->bits[C0_G_Y]) ||
1255 (in_fmt->bits[C1_B_Cb] != out_fmt->bits[C1_B_Cb])) {
1256 pr_err("Bit format does not match\n");
1257 return false;
1258 }
1259 }
1260
1261 /* Need to make sure that sub-sampling persists through rotation */
1262 if (rotation) {
1263 mdss_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample,
1264 &in_v_subsample, &in_h_subsample);
1265 mdss_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample,
1266 &out_v_subsample, &out_h_subsample);
1267
1268 if ((in_v_subsample != out_h_subsample) ||
1269 (in_h_subsample != out_v_subsample)) {
1270 pr_err("Rotation has invalid subsampling\n");
1271 return false;
1272 }
1273 } else {
1274 if (in_fmt->chroma_sample != out_fmt->chroma_sample) {
1275 pr_err("Format subsampling mismatch\n");
1276 return false;
1277 }
1278 }
1279
1280 pr_debug("in_fmt=%0d, out_fmt=%d, has_ubwc=%d\n",
1281 in_fmt->format, out_fmt->format, mgr->has_ubwc);
1282 return true;
1283}
1284
1285static int mdss_rotator_verify_config(struct mdss_rot_mgr *mgr,
1286 struct mdp_rotation_config *config)
1287{
1288 struct mdss_mdp_format_params *in_fmt, *out_fmt;
1289 u8 in_v_subsample, in_h_subsample;
1290 u8 out_v_subsample, out_h_subsample;
1291 u32 input, output;
1292 bool rotation;
1293
1294 input = config->input.format;
1295 output = config->output.format;
1296 rotation = (config->flags & MDP_ROTATION_90) ? true : false;
1297
1298 in_fmt = mdss_mdp_get_format_params(input);
1299 if (!in_fmt) {
1300 pr_err("Unrecognized input format:%u\n", input);
1301 return -EINVAL;
1302 }
1303
1304 out_fmt = mdss_mdp_get_format_params(output);
1305 if (!out_fmt) {
1306 pr_err("Unrecognized output format:%u\n", output);
1307 return -EINVAL;
1308 }
1309
1310 mdss_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample,
1311 &in_v_subsample, &in_h_subsample);
1312 mdss_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample,
1313 &out_v_subsample, &out_h_subsample);
1314
1315 /* Dimension of image needs to be divisible by subsample rate */
1316 if ((config->input.height % in_v_subsample) ||
1317 (config->input.width % in_h_subsample)) {
1318 pr_err("In ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n",
1319 config->input.width, config->input.height,
1320 in_v_subsample, in_h_subsample);
1321 return -EINVAL;
1322 }
1323
1324 if ((config->output.height % out_v_subsample) ||
1325 (config->output.width % out_h_subsample)) {
1326 pr_err("Out ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n",
1327 config->output.width, config->output.height,
1328 out_v_subsample, out_h_subsample);
1329 return -EINVAL;
1330 }
1331
1332 if (!mdss_rotator_verify_format(mgr, in_fmt,
1333 out_fmt, rotation)) {
1334 pr_err("Rot format pairing invalid, in_fmt:%d, out_fmt:%d\n",
1335 input, output);
1336 return -EINVAL;
1337 }
1338
1339 return 0;
1340}
1341
1342static int mdss_rotator_validate_item_matches_session(
1343 struct mdp_rotation_config *config, struct mdp_rotation_item *item)
1344{
1345 int ret;
1346
1347 ret = __compare_session_item_rect(&config->input,
1348 &item->src_rect, item->input.format, true);
1349 if (ret)
1350 return ret;
1351
1352 ret = __compare_session_item_rect(&config->output,
1353 &item->dst_rect, item->output.format, false);
1354 if (ret)
1355 return ret;
1356
1357 ret = __compare_session_rotations(config->flags, item->flags);
1358 if (ret)
1359 return ret;
1360
1361 return 0;
1362}
1363
1364static int mdss_rotator_validate_img_roi(struct mdp_rotation_item *item)
1365{
1366 struct mdss_mdp_format_params *fmt;
1367 uint32_t width, height;
1368 int ret = 0;
1369
1370 width = item->input.width;
1371 height = item->input.height;
1372 if (item->flags & MDP_ROTATION_DEINTERLACE) {
1373 width *= 2;
1374 height /= 2;
1375 }
1376
1377 /* Check roi bounds */
1378 if (ROT_CHECK_BOUNDS(item->src_rect.x, item->src_rect.w, width) ||
1379 ROT_CHECK_BOUNDS(item->src_rect.y, item->src_rect.h,
1380 height)) {
1381 pr_err("invalid src flag=%08x img wh=%dx%d rect=%d,%d,%d,%d\n",
1382 item->flags, width, height, item->src_rect.x,
1383 item->src_rect.y, item->src_rect.w, item->src_rect.h);
1384 return -EINVAL;
1385 }
1386 if (ROT_CHECK_BOUNDS(item->dst_rect.x, item->dst_rect.w,
1387 item->output.width) ||
1388 ROT_CHECK_BOUNDS(item->dst_rect.y, item->dst_rect.h,
1389 item->output.height)) {
1390 pr_err("invalid dst img wh=%dx%d rect=%d,%d,%d,%d\n",
1391 item->output.width, item->output.height,
1392 item->dst_rect.x, item->dst_rect.y, item->dst_rect.w,
1393 item->dst_rect.h);
1394 return -EINVAL;
1395 }
1396
1397 fmt = mdss_mdp_get_format_params(item->output.format);
1398 if (!fmt) {
1399 pr_err("invalid output format:%d\n", item->output.format);
1400 return -EINVAL;
1401 }
1402
1403 if (mdss_mdp_is_ubwc_format(fmt))
1404 ret = mdss_mdp_validate_offset_for_ubwc_format(fmt,
1405 item->dst_rect.x, item->dst_rect.y);
1406
1407 return ret;
1408}
1409
1410static int mdss_rotator_validate_fmt_and_item_flags(
1411 struct mdp_rotation_config *config, struct mdp_rotation_item *item)
1412{
1413 struct mdss_mdp_format_params *fmt;
1414
1415 fmt = mdss_mdp_get_format_params(item->input.format);
1416 if ((item->flags & MDP_ROTATION_DEINTERLACE) &&
1417 mdss_mdp_is_ubwc_format(fmt)) {
1418 pr_err("cannot perform mdp deinterlace on tiled formats\n");
1419 return -EINVAL;
1420 }
1421 return 0;
1422}
1423
1424static int mdss_rotator_validate_entry(struct mdss_rot_mgr *mgr,
1425 struct mdss_rot_file_private *private,
1426 struct mdss_rot_entry *entry)
1427{
1428 int ret;
1429 struct mdp_rotation_item *item;
1430 struct mdss_rot_perf *perf;
1431
1432 item = &entry->item;
1433
1434 if (item->wb_idx != item->pipe_idx) {
1435 pr_err("invalid writeback and pipe idx\n");
1436 return -EINVAL;
1437 }
1438
1439 if (item->wb_idx != MDSS_ROTATION_HW_ANY &&
1440 item->wb_idx > mgr->queue_count) {
1441 pr_err("invalid writeback idx\n");
1442 return -EINVAL;
1443 }
1444
1445 perf = mdss_rotator_find_session(private, item->session_id);
1446 if (!perf) {
1447 pr_err("Could not find session:%u\n", item->session_id);
1448 return -EINVAL;
1449 }
1450
1451 ret = mdss_rotator_validate_item_matches_session(&perf->config, item);
1452 if (ret) {
1453 pr_err("Work item does not match session:%u\n",
1454 item->session_id);
1455 return ret;
1456 }
1457
1458 ret = mdss_rotator_validate_img_roi(item);
1459 if (ret) {
1460 pr_err("Image roi is invalid\n");
1461 return ret;
1462 }
1463
1464 ret = mdss_rotator_validate_fmt_and_item_flags(&perf->config, item);
1465 if (ret)
1466 return ret;
1467
1468 ret = mdss_rotator_config_dnsc_factor(mgr, entry);
1469 if (ret) {
1470 pr_err("fail to configure downscale factor\n");
1471 return ret;
1472 }
1473 return ret;
1474}
1475
1476/*
1477 * Upon failure from the function, caller needs to make sure
1478 * to call mdss_rotator_remove_request to clean up resources.
1479 */
1480static int mdss_rotator_add_request(struct mdss_rot_mgr *mgr,
1481 struct mdss_rot_file_private *private,
1482 struct mdss_rot_entry_container *req)
1483{
1484 struct mdss_rot_entry *entry;
1485 struct mdp_rotation_item *item;
1486 u32 flag = 0;
1487 int i, ret;
1488
1489 for (i = 0; i < req->count; i++) {
1490 entry = req->entries + i;
1491 item = &entry->item;
1492
1493 if (item->flags & MDP_ROTATION_SECURE)
1494 flag = MDP_SECURE_OVERLAY_SESSION;
1495
1496 ret = mdss_rotator_validate_entry(mgr, private, entry);
1497 if (ret) {
1498 pr_err("fail to validate the entry\n");
1499 return ret;
1500 }
1501
1502 ret = mdss_rotator_import_data(mgr, entry);
1503 if (ret) {
1504 pr_err("fail to import the data\n");
1505 return ret;
1506 }
1507
1508 if (item->input.fence >= 0) {
Sachin Bhayare2b6d0042018-01-13 19:38:21 +05301509 entry->input_fence = mdss_get_fd_sync_fence(
1510 item->input.fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301511 if (!entry->input_fence) {
1512 pr_err("invalid input fence fd\n");
1513 return -EINVAL;
1514 }
1515 }
1516
1517 ret = mdss_rotator_assign_queue(mgr, entry, private);
1518 if (ret) {
1519 pr_err("fail to assign queue to entry\n");
1520 return ret;
1521 }
1522
1523 entry->request = req;
1524
1525 INIT_WORK(&entry->commit_work, mdss_rotator_wq_handler);
1526
1527 ret = mdss_rotator_create_fence(entry);
1528 if (ret) {
1529 pr_err("fail to create fence\n");
1530 return ret;
1531 }
1532 item->output.fence = entry->output_fence_fd;
1533
1534 pr_debug("Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%u\n"
1535 "dst{%u,%u,%u,%u}f=%u session_id=%u\n", item->wb_idx,
1536 item->src_rect.x, item->src_rect.y,
1537 item->src_rect.w, item->src_rect.h, item->input.format,
1538 item->dst_rect.x, item->dst_rect.y,
1539 item->dst_rect.w, item->dst_rect.h, item->output.format,
1540 item->session_id);
1541 }
1542
1543 mutex_lock(&private->req_lock);
1544 list_add(&req->list, &private->req_list);
1545 mutex_unlock(&private->req_lock);
1546
1547 return 0;
1548}
1549
1550static void mdss_rotator_remove_request(struct mdss_rot_mgr *mgr,
1551 struct mdss_rot_file_private *private,
1552 struct mdss_rot_entry_container *req)
1553{
1554 int i;
1555
1556 mutex_lock(&private->req_lock);
1557 for (i = 0; i < req->count; i++)
1558 mdss_rotator_release_entry(mgr, req->entries + i);
1559 list_del_init(&req->list);
1560 mutex_unlock(&private->req_lock);
1561}
1562
1563/* This function should be called with req_lock */
1564static void mdss_rotator_cancel_request(struct mdss_rot_mgr *mgr,
1565 struct mdss_rot_entry_container *req)
1566{
1567 struct mdss_rot_entry *entry;
1568 int i;
1569
1570 /*
1571 * To avoid signal the rotation entry output fence in the wrong
1572 * order, all the entries in the same request needs to be cancelled
1573 * first, before signaling the output fence.
1574 */
1575 for (i = req->count - 1; i >= 0; i--) {
1576 entry = req->entries + i;
1577 cancel_work_sync(&entry->commit_work);
1578 }
1579
1580 for (i = req->count - 1; i >= 0; i--) {
1581 entry = req->entries + i;
1582 mdss_rotator_signal_output(entry);
1583 mdss_rotator_release_entry(mgr, entry);
1584 }
1585
1586 list_del_init(&req->list);
1587 devm_kfree(&mgr->pdev->dev, req);
1588}
1589
1590static void mdss_rotator_cancel_all_requests(struct mdss_rot_mgr *mgr,
1591 struct mdss_rot_file_private *private)
1592{
1593 struct mdss_rot_entry_container *req, *req_next;
1594
1595 pr_debug("Canceling all rotator requests\n");
1596
1597 mutex_lock(&private->req_lock);
1598 list_for_each_entry_safe(req, req_next, &private->req_list, list)
1599 mdss_rotator_cancel_request(mgr, req);
1600 mutex_unlock(&private->req_lock);
1601}
1602
1603static void mdss_rotator_free_competed_request(struct mdss_rot_mgr *mgr,
1604 struct mdss_rot_file_private *private)
1605{
1606 struct mdss_rot_entry_container *req, *req_next;
1607
1608 mutex_lock(&private->req_lock);
1609 list_for_each_entry_safe(req, req_next, &private->req_list, list) {
1610 if (atomic_read(&req->pending_count) == 0) {
1611 list_del_init(&req->list);
1612 devm_kfree(&mgr->pdev->dev, req);
1613 }
1614 }
1615 mutex_unlock(&private->req_lock);
1616}
1617
1618static void mdss_rotator_release_rotator_perf_session(
1619 struct mdss_rot_mgr *mgr,
1620 struct mdss_rot_file_private *private)
1621{
1622 struct mdss_rot_perf *perf, *perf_next;
1623
1624 pr_debug("Releasing all rotator request\n");
1625 mdss_rotator_cancel_all_requests(mgr, private);
1626
1627 mutex_lock(&private->perf_lock);
1628 list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) {
1629 list_del_init(&perf->list);
1630 devm_kfree(&mgr->pdev->dev, perf->work_distribution);
1631 devm_kfree(&mgr->pdev->dev, perf);
1632 }
1633 mutex_unlock(&private->perf_lock);
1634}
1635
1636static void mdss_rotator_release_all(struct mdss_rot_mgr *mgr)
1637{
1638 struct mdss_rot_file_private *priv, *priv_next;
1639
1640 mutex_lock(&mgr->file_lock);
1641 list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) {
1642 mdss_rotator_release_rotator_perf_session(mgr, priv);
1643 mdss_rotator_resource_ctrl(mgr, false);
1644 list_del_init(&priv->list);
1645 priv->file->private_data = NULL;
1646 devm_kfree(&mgr->pdev->dev, priv);
1647 }
1648 mutex_unlock(&rot_mgr->file_lock);
1649
1650 mdss_rotator_update_perf(mgr);
1651}
1652
1653static int mdss_rotator_prepare_hw(struct mdss_rot_hw_resource *hw,
1654 struct mdss_rot_entry *entry)
1655{
1656 struct mdss_mdp_pipe *pipe;
1657 struct mdss_mdp_ctl *orig_ctl, *rot_ctl;
1658 int ret;
1659
1660 pipe = hw->pipe;
1661 orig_ctl = pipe->mixer_left->ctl;
1662 if (orig_ctl->shared_lock)
1663 mutex_lock(orig_ctl->shared_lock);
1664
1665 rot_ctl = mdss_mdp_ctl_mixer_switch(orig_ctl,
1666 MDSS_MDP_WB_CTL_TYPE_BLOCK);
1667 if (!rot_ctl) {
1668 ret = -EINVAL;
1669 goto error;
1670 } else {
1671 hw->ctl = rot_ctl;
1672 pipe->mixer_left = rot_ctl->mixer_left;
1673 }
1674
1675 return 0;
1676
1677error:
1678 if (orig_ctl->shared_lock)
1679 mutex_unlock(orig_ctl->shared_lock);
1680 return ret;
1681}
1682
1683static void mdss_rotator_translate_rect(struct mdss_rect *dst,
1684 struct mdp_rect *src)
1685{
1686 dst->x = src->x;
1687 dst->y = src->y;
1688 dst->w = src->w;
1689 dst->h = src->h;
1690}
1691
1692static u32 mdss_rotator_translate_flags(u32 input)
1693{
1694 u32 output = 0;
1695
1696 if (input & MDP_ROTATION_NOP)
1697 output |= MDP_ROT_NOP;
1698 if (input & MDP_ROTATION_FLIP_LR)
1699 output |= MDP_FLIP_LR;
1700 if (input & MDP_ROTATION_FLIP_UD)
1701 output |= MDP_FLIP_UD;
1702 if (input & MDP_ROTATION_90)
1703 output |= MDP_ROT_90;
1704 if (input & MDP_ROTATION_DEINTERLACE)
1705 output |= MDP_DEINTERLACE;
1706 if (input & MDP_ROTATION_SECURE)
1707 output |= MDP_SECURE_OVERLAY_SESSION;
1708 if (input & MDP_ROTATION_BWC_EN)
1709 output |= MDP_BWC_EN;
1710
1711 return output;
1712}
1713
1714static int mdss_rotator_config_hw(struct mdss_rot_hw_resource *hw,
1715 struct mdss_rot_entry *entry)
1716{
1717 struct mdss_mdp_pipe *pipe;
1718 struct mdp_rotation_item *item;
1719 struct mdss_rot_perf *perf;
1720 int ret;
1721
1722 ATRACE_BEGIN(__func__);
1723 pipe = hw->pipe;
1724 item = &entry->item;
1725 perf = entry->perf;
1726
1727 pipe->flags = mdss_rotator_translate_flags(item->flags);
1728 pipe->src_fmt = mdss_mdp_get_format_params(item->input.format);
1729 pipe->img_width = item->input.width;
1730 pipe->img_height = item->input.height;
1731 mdss_rotator_translate_rect(&pipe->src, &item->src_rect);
1732 mdss_rotator_translate_rect(&pipe->dst, &item->src_rect);
1733 pipe->scaler.enable = 0;
1734 pipe->frame_rate = perf->config.frame_rate;
1735
1736 pipe->params_changed++;
1737
1738 mdss_mdp_smp_release(pipe);
1739
1740 ret = mdss_mdp_smp_reserve(pipe);
1741 if (ret) {
1742 pr_err("unable to mdss_mdp_smp_reserve rot data\n");
1743 goto done;
1744 }
1745
1746 ret = mdss_mdp_overlay_setup_scaling(pipe);
1747 if (ret) {
1748 pr_err("scaling setup failed %d\n", ret);
1749 goto done;
1750 }
1751
1752 ret = mdss_mdp_pipe_queue_data(pipe, &entry->src_buf);
1753 pr_debug("Config pipe. src{%u,%u,%u,%u}f=%u\n"
1754 "dst{%u,%u,%u,%u}f=%u session_id=%u\n",
1755 item->src_rect.x, item->src_rect.y,
1756 item->src_rect.w, item->src_rect.h, item->input.format,
1757 item->dst_rect.x, item->dst_rect.y,
1758 item->dst_rect.w, item->dst_rect.h, item->output.format,
1759 item->session_id);
1760 MDSS_XLOG(item->input.format, pipe->img_width, pipe->img_height,
1761 pipe->flags);
1762done:
1763 ATRACE_END(__func__);
1764 return ret;
1765}
1766
1767static int mdss_rotator_kickoff_entry(struct mdss_rot_hw_resource *hw,
1768 struct mdss_rot_entry *entry)
1769{
1770 int ret;
1771 struct mdss_mdp_writeback_arg wb_args = {
1772 .data = &entry->dst_buf,
1773 .priv_data = entry,
1774 };
1775
1776 ret = mdss_mdp_writeback_display_commit(hw->ctl, &wb_args);
1777 return ret;
1778}
1779
1780static int mdss_rotator_wait_for_entry(struct mdss_rot_hw_resource *hw,
1781 struct mdss_rot_entry *entry)
1782{
1783 int ret;
1784 struct mdss_mdp_ctl *ctl = hw->ctl;
1785
1786 ret = mdss_mdp_display_wait4comp(ctl);
1787 if (ctl->shared_lock)
1788 mutex_unlock(ctl->shared_lock);
1789 return ret;
1790}
1791
1792static int mdss_rotator_commit_entry(struct mdss_rot_hw_resource *hw,
1793 struct mdss_rot_entry *entry)
1794{
1795 int ret;
1796
1797 ret = mdss_rotator_prepare_hw(hw, entry);
1798 if (ret) {
1799 pr_err("fail to prepare hw resource %d\n", ret);
1800 return ret;
1801 }
1802
1803 ret = mdss_rotator_config_hw(hw, entry);
1804 if (ret) {
1805 pr_err("fail to configure hw resource %d\n", ret);
1806 return ret;
1807 }
1808
1809 ret = mdss_rotator_kickoff_entry(hw, entry);
1810 if (ret) {
1811 pr_err("fail to do kickoff %d\n", ret);
1812 return ret;
1813 }
1814
1815 ret = mdss_rotator_wait_for_entry(hw, entry);
1816 if (ret) {
1817 pr_err("fail to wait for completion %d\n", ret);
1818 return ret;
1819 }
1820
1821 return ret;
1822}
1823
1824static int mdss_rotator_handle_entry(struct mdss_rot_hw_resource *hw,
1825 struct mdss_rot_entry *entry)
1826{
1827 int ret;
1828
1829 ret = mdss_rotator_wait_for_input(entry);
1830 if (ret) {
1831 pr_err("wait for input buffer failed %d\n", ret);
1832 return ret;
1833 }
1834
1835 ret = mdss_rotator_map_and_check_data(entry);
1836 if (ret) {
1837 pr_err("fail to prepare input/output data %d\n", ret);
1838 return ret;
1839 }
1840
1841 ret = mdss_rotator_commit_entry(hw, entry);
1842 if (ret)
1843 pr_err("rotator commit failed %d\n", ret);
1844
1845 return ret;
1846}
1847
1848static void mdss_rotator_wq_handler(struct work_struct *work)
1849{
1850 struct mdss_rot_entry *entry;
1851 struct mdss_rot_entry_container *request;
1852 struct mdss_rot_hw_resource *hw;
1853 int ret;
1854
1855 entry = container_of(work, struct mdss_rot_entry, commit_work);
1856 request = entry->request;
1857
1858 if (!request) {
1859 pr_err("fatal error, no request with entry\n");
1860 return;
1861 }
1862
1863 hw = mdss_rotator_get_hw_resource(entry->queue, entry);
1864 if (!hw) {
1865 pr_err("no hw for the queue\n");
1866 goto get_hw_res_err;
1867 }
1868
1869 ret = mdss_rotator_handle_entry(hw, entry);
1870 if (ret) {
1871 struct mdp_rotation_item *item = &entry->item;
1872
1873 pr_err("Rot req fail. src{%u,%u,%u,%u}f=%u\n"
1874 "dst{%u,%u,%u,%u}f=%u session_id=%u, wbidx%d, pipe_id=%d\n",
1875 item->src_rect.x, item->src_rect.y,
1876 item->src_rect.w, item->src_rect.h, item->input.format,
1877 item->dst_rect.x, item->dst_rect.y,
1878 item->dst_rect.w, item->dst_rect.h, item->output.format,
1879 item->session_id, item->wb_idx, item->pipe_idx);
1880 }
1881
1882 mdss_rotator_put_hw_resource(entry->queue, hw);
1883
1884get_hw_res_err:
1885 mdss_rotator_signal_output(entry);
1886 mdss_rotator_release_entry(rot_mgr, entry);
1887 atomic_dec(&request->pending_count);
1888}
1889
1890static int mdss_rotator_validate_request(struct mdss_rot_mgr *mgr,
1891 struct mdss_rot_file_private *private,
1892 struct mdss_rot_entry_container *req)
1893{
1894 int i, ret = 0;
1895 struct mdss_rot_entry *entry;
1896
1897 for (i = 0; i < req->count; i++) {
1898 entry = req->entries + i;
1899 ret = mdss_rotator_validate_entry(mgr, private,
1900 entry);
1901 if (ret) {
1902 pr_err("fail to validate the entry\n");
1903 return ret;
1904 }
1905 }
1906
1907 return ret;
1908}
1909
1910static u32 mdss_rotator_generator_session_id(struct mdss_rot_mgr *mgr)
1911{
1912 u32 id;
1913
1914 mutex_lock(&mgr->lock);
1915 id = mgr->session_id_generator++;
1916 mutex_unlock(&mgr->lock);
1917 return id;
1918}
1919
1920static int mdss_rotator_open_session(struct mdss_rot_mgr *mgr,
1921 struct mdss_rot_file_private *private, unsigned long arg)
1922{
1923 struct mdp_rotation_config config;
1924 struct mdss_rot_perf *perf;
1925 int ret;
1926
1927 ret = copy_from_user(&config, (void __user *)arg, sizeof(config));
1928 if (ret) {
1929 pr_err("fail to copy session data\n");
1930 return ret;
1931 }
1932
1933 ret = mdss_rotator_verify_config(mgr, &config);
1934 if (ret) {
1935 pr_err("Rotator verify format failed\n");
1936 return ret;
1937 }
1938
1939 perf = devm_kzalloc(&mgr->pdev->dev, sizeof(*perf), GFP_KERNEL);
1940 if (!perf)
1941 return -ENOMEM;
1942
1943 ATRACE_BEGIN(__func__); /* Open session votes for bw */
1944 perf->work_distribution = devm_kzalloc(&mgr->pdev->dev,
1945 sizeof(u32) * mgr->queue_count, GFP_KERNEL);
1946 if (!perf->work_distribution) {
1947 ret = -ENOMEM;
1948 goto alloc_err;
1949 }
1950
1951 config.session_id = mdss_rotator_generator_session_id(mgr);
1952 perf->config = config;
1953 perf->last_wb_idx = -1;
1954 mutex_init(&perf->work_dis_lock);
1955
1956 INIT_LIST_HEAD(&perf->list);
1957
1958 ret = mdss_rotator_calc_perf(perf);
1959 if (ret) {
1960 pr_err("error setting the session%d\n", ret);
1961 goto copy_user_err;
1962 }
1963
1964 ret = copy_to_user((void *)arg, &config, sizeof(config));
1965 if (ret) {
1966 pr_err("fail to copy to user\n");
1967 goto copy_user_err;
1968 }
1969
1970 mutex_lock(&private->perf_lock);
1971 list_add(&perf->list, &private->perf_list);
1972 mutex_unlock(&private->perf_lock);
1973
1974 ret = mdss_rotator_resource_ctrl(mgr, true);
1975 if (ret) {
1976 pr_err("Failed to aqcuire rotator resources\n");
1977 goto resource_err;
1978 }
1979
1980 mdss_rotator_clk_ctrl(rot_mgr, true);
1981 ret = mdss_rotator_update_perf(mgr);
1982 if (ret) {
1983 pr_err("fail to open session, not enough clk/bw\n");
1984 goto perf_err;
1985 }
1986 pr_debug("open session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n",
1987 config.session_id, config.input.width, config.input.height,
1988 config.input.format, config.output.width, config.output.height,
1989 config.output.format);
1990
1991 goto done;
1992perf_err:
1993 mdss_rotator_clk_ctrl(rot_mgr, false);
1994 mdss_rotator_resource_ctrl(mgr, false);
1995resource_err:
1996 mutex_lock(&private->perf_lock);
1997 list_del_init(&perf->list);
1998 mutex_unlock(&private->perf_lock);
1999copy_user_err:
2000 devm_kfree(&mgr->pdev->dev, perf->work_distribution);
2001alloc_err:
2002 devm_kfree(&mgr->pdev->dev, perf);
2003done:
2004 ATRACE_END(__func__);
2005 return ret;
2006}
2007
2008static int mdss_rotator_close_session(struct mdss_rot_mgr *mgr,
2009 struct mdss_rot_file_private *private, unsigned long arg)
2010{
2011 struct mdss_rot_perf *perf;
2012 bool offload_release_work = false;
2013 u32 id;
2014
2015 id = (u32)arg;
2016 mutex_lock(&mgr->lock);
2017 mutex_lock(&private->perf_lock);
2018 perf = __mdss_rotator_find_session(private, id);
2019 if (!perf) {
2020 mutex_unlock(&private->perf_lock);
2021 mutex_unlock(&mgr->lock);
2022 pr_err("Trying to close session that does not exist\n");
2023 return -EINVAL;
2024 }
2025
2026 ATRACE_BEGIN(__func__);
2027 mutex_lock(&perf->work_dis_lock);
2028 if (mdss_rotator_is_work_pending(mgr, perf)) {
2029 pr_debug("Work is still pending, offload free to wq\n");
2030 mutex_lock(&mgr->bus_lock);
2031 mgr->pending_close_bw_vote += perf->bw;
2032 mutex_unlock(&mgr->bus_lock);
2033 offload_release_work = true;
2034 }
2035 list_del_init(&perf->list);
2036 mutex_unlock(&perf->work_dis_lock);
2037 mutex_unlock(&private->perf_lock);
2038
2039 if (offload_release_work)
2040 goto done;
2041
2042 mdss_rotator_resource_ctrl(mgr, false);
2043 devm_kfree(&mgr->pdev->dev, perf->work_distribution);
2044 devm_kfree(&mgr->pdev->dev, perf);
2045 mdss_rotator_update_perf(mgr);
2046 mdss_rotator_clk_ctrl(rot_mgr, false);
2047done:
2048 pr_debug("Closed session id:%u", id);
2049 ATRACE_END(__func__);
2050 mutex_unlock(&mgr->lock);
2051 return 0;
2052}
2053
2054static int mdss_rotator_config_session(struct mdss_rot_mgr *mgr,
2055 struct mdss_rot_file_private *private, unsigned long arg)
2056{
2057 int ret = 0;
2058 struct mdss_rot_perf *perf;
2059 struct mdp_rotation_config config;
2060
2061 ret = copy_from_user(&config, (void __user *)arg,
2062 sizeof(config));
2063 if (ret) {
2064 pr_err("fail to copy session data\n");
2065 return ret;
2066 }
2067
2068 ret = mdss_rotator_verify_config(mgr, &config);
2069 if (ret) {
2070 pr_err("Rotator verify format failed\n");
2071 return ret;
2072 }
2073
2074 mutex_lock(&mgr->lock);
2075 perf = mdss_rotator_find_session(private, config.session_id);
2076 if (!perf) {
2077 pr_err("No session with id=%u could be found\n",
2078 config.session_id);
2079 mutex_unlock(&mgr->lock);
2080 return -EINVAL;
2081 }
2082
2083 ATRACE_BEGIN(__func__);
2084 mutex_lock(&private->perf_lock);
2085 perf->config = config;
2086 ret = mdss_rotator_calc_perf(perf);
2087 mutex_unlock(&private->perf_lock);
2088
2089 if (ret) {
2090 pr_err("error in configuring the session %d\n", ret);
2091 goto done;
2092 }
2093
2094 ret = mdss_rotator_update_perf(mgr);
2095
2096 pr_debug("reconfig session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n",
2097 config.session_id, config.input.width, config.input.height,
2098 config.input.format, config.output.width, config.output.height,
2099 config.output.format);
2100done:
2101 ATRACE_END(__func__);
2102 mutex_unlock(&mgr->lock);
2103 return ret;
2104}
2105
2106struct mdss_rot_entry_container *mdss_rotator_req_init(
2107 struct mdss_rot_mgr *mgr, struct mdp_rotation_item *items,
2108 u32 count, u32 flags)
2109{
2110 struct mdss_rot_entry_container *req;
2111 int size, i;
2112
2113 /*
2114 * Check input and output plane_count from each given item
2115 * are within the MAX_PLANES limit
2116 */
2117 for (i = 0 ; i < count; i++) {
2118 if ((items[i].input.plane_count > MAX_PLANES) ||
2119 (items[i].output.plane_count > MAX_PLANES)) {
2120 pr_err("Input/Output plane_count exceeds MAX_PLANES limit, input:%d, output:%d\n",
2121 items[i].input.plane_count,
2122 items[i].output.plane_count);
2123 return ERR_PTR(-EINVAL);
2124 }
2125 }
2126
2127 size = sizeof(struct mdss_rot_entry_container);
2128 size += sizeof(struct mdss_rot_entry) * count;
2129 req = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
2130
2131 if (!req)
2132 return ERR_PTR(-ENOMEM);
2133
2134
2135 INIT_LIST_HEAD(&req->list);
2136 req->count = count;
2137 req->entries = (struct mdss_rot_entry *)
2138 ((void *)req + sizeof(struct mdss_rot_entry_container));
2139 req->flags = flags;
2140 atomic_set(&req->pending_count, count);
2141
2142 for (i = 0; i < count; i++)
2143 req->entries[i].item = items[i];
2144
2145 return req;
2146}
2147
2148static int mdss_rotator_handle_request_common(struct mdss_rot_mgr *mgr,
2149 struct mdss_rot_file_private *private,
2150 struct mdss_rot_entry_container *req,
2151 struct mdp_rotation_item *items)
2152{
2153 int i, ret;
2154
2155 mdss_rotator_free_competed_request(mgr, private);
2156
2157 ret = mdss_rotator_add_request(mgr, private, req);
2158 if (ret) {
2159 pr_err("fail to add rotation request\n");
2160 mdss_rotator_remove_request(mgr, private, req);
2161 return ret;
2162 }
2163
2164 for (i = 0; i < req->count; i++)
2165 items[i].output.fence =
2166 req->entries[i].item.output.fence;
2167
2168 return ret;
2169}
2170
2171static int mdss_rotator_handle_request(struct mdss_rot_mgr *mgr,
2172 struct mdss_rot_file_private *private, unsigned long arg)
2173{
2174 struct mdp_rotation_request user_req;
2175 struct mdp_rotation_item *items = NULL;
2176 struct mdss_rot_entry_container *req = NULL;
2177 int size, ret;
2178 uint32_t req_count;
2179 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
2180
2181 if (mdata->handoff_pending) {
2182 pr_err("Rotator request failed. Handoff pending\n");
2183 return -EPERM;
2184 }
2185
2186 if (mdss_get_sd_client_cnt()) {
2187 pr_err("rot request not permitted during secure display session\n");
2188 return -EPERM;
2189 }
2190
2191 ret = copy_from_user(&user_req, (void __user *)arg,
2192 sizeof(user_req));
2193 if (ret) {
2194 pr_err("fail to copy rotation request\n");
2195 return ret;
2196 }
2197
2198 req_count = user_req.count;
2199 if ((!req_count) || (req_count > MAX_LAYER_COUNT)) {
2200 pr_err("invalid rotator req count :%d\n", req_count);
2201 return -EINVAL;
2202 }
2203
2204 /*
2205 * here, we make a copy of the items so that we can copy
2206 * all the output fences to the client in one call. Otherwise,
2207 * we will have to call multiple copy_to_user
2208 */
2209 size = sizeof(struct mdp_rotation_item) * req_count;
2210 items = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
2211 if (!items) {
2212 pr_err("fail to allocate rotation items\n");
2213 return -ENOMEM;
2214 }
2215 ret = copy_from_user(items, user_req.list, size);
2216 if (ret) {
2217 pr_err("fail to copy rotation items\n");
2218 goto handle_request_err;
2219 }
2220
2221 req = mdss_rotator_req_init(mgr, items, user_req.count, user_req.flags);
2222 if (IS_ERR_OR_NULL(req)) {
2223 pr_err("fail to allocate rotation request\n");
2224 ret = PTR_ERR(req);
2225 goto handle_request_err;
2226 }
2227
2228 mutex_lock(&mgr->lock);
2229
2230 if (req->flags & MDSS_ROTATION_REQUEST_VALIDATE) {
2231 ret = mdss_rotator_validate_request(mgr, private, req);
2232 goto handle_request_err1;
2233 }
2234
2235 ret = mdss_rotator_handle_request_common(mgr, private, req, items);
2236 if (ret) {
2237 pr_err("fail to handle request\n");
2238 goto handle_request_err1;
2239 }
2240
2241 ret = copy_to_user(user_req.list, items, size);
2242 if (ret) {
2243 pr_err("fail to copy output fence to user\n");
2244 mdss_rotator_remove_request(mgr, private, req);
2245 goto handle_request_err1;
2246 }
2247
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302248 mdss_rotator_queue_request(mgr, private, req);
2249
2250 mutex_unlock(&mgr->lock);
2251
2252 devm_kfree(&mgr->pdev->dev, items);
2253 return ret;
2254
2255handle_request_err1:
2256 mutex_unlock(&mgr->lock);
2257handle_request_err:
2258 devm_kfree(&mgr->pdev->dev, items);
2259 devm_kfree(&mgr->pdev->dev, req);
2260 return ret;
2261}
2262
2263static int mdss_rotator_open(struct inode *inode, struct file *file)
2264{
2265 struct mdss_rot_file_private *private;
2266
2267 if (!rot_mgr)
2268 return -ENODEV;
2269
2270 if (atomic_read(&rot_mgr->device_suspended))
2271 return -EPERM;
2272
2273 private = devm_kzalloc(&rot_mgr->pdev->dev, sizeof(*private),
2274 GFP_KERNEL);
2275 if (!private)
2276 return -ENOMEM;
2277
2278 mutex_init(&private->req_lock);
2279 mutex_init(&private->perf_lock);
2280 INIT_LIST_HEAD(&private->req_list);
2281 INIT_LIST_HEAD(&private->perf_list);
2282 INIT_LIST_HEAD(&private->list);
2283
2284 mutex_lock(&rot_mgr->file_lock);
2285 list_add(&private->list, &rot_mgr->file_list);
2286 file->private_data = private;
2287 private->file = file;
2288 mutex_unlock(&rot_mgr->file_lock);
2289
2290 return 0;
2291}
2292
2293static bool mdss_rotator_file_priv_allowed(struct mdss_rot_mgr *mgr,
2294 struct mdss_rot_file_private *priv)
2295{
2296 struct mdss_rot_file_private *_priv, *_priv_next;
2297 bool ret = false;
2298
2299 mutex_lock(&mgr->file_lock);
2300 list_for_each_entry_safe(_priv, _priv_next, &mgr->file_list, list) {
2301 if (_priv == priv) {
2302 ret = true;
2303 break;
2304 }
2305 }
2306 mutex_unlock(&mgr->file_lock);
2307 return ret;
2308}
2309
2310static int mdss_rotator_close(struct inode *inode, struct file *file)
2311{
2312 struct mdss_rot_file_private *private;
2313
2314 if (!rot_mgr)
2315 return -ENODEV;
2316
2317 if (!file->private_data)
2318 return -EINVAL;
2319
2320 private = (struct mdss_rot_file_private *)file->private_data;
2321
2322 if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) {
2323 pr_err("Calling close with unrecognized rot_file_private\n");
2324 return -EINVAL;
2325 }
2326
2327 mdss_rotator_release_rotator_perf_session(rot_mgr, private);
2328
2329 mutex_lock(&rot_mgr->file_lock);
2330 list_del_init(&private->list);
2331 devm_kfree(&rot_mgr->pdev->dev, private);
2332 file->private_data = NULL;
2333 mutex_unlock(&rot_mgr->file_lock);
2334
2335 mdss_rotator_update_perf(rot_mgr);
2336 return 0;
2337}
2338
2339#ifdef CONFIG_COMPAT
2340static int mdss_rotator_handle_request32(struct mdss_rot_mgr *mgr,
2341 struct mdss_rot_file_private *private, unsigned long arg)
2342{
2343 struct mdp_rotation_request32 user_req32;
2344 struct mdp_rotation_item *items = NULL;
2345 struct mdss_rot_entry_container *req = NULL;
2346 int size, ret;
2347 uint32_t req_count;
2348
2349 if (mdss_get_sd_client_cnt()) {
2350 pr_err("rot request not permitted during secure display session\n");
2351 return -EPERM;
2352 }
2353
2354 ret = copy_from_user(&user_req32, (void __user *)arg,
2355 sizeof(user_req32));
2356 if (ret) {
2357 pr_err("fail to copy rotation request\n");
2358 return ret;
2359 }
2360
2361 req_count = user_req32.count;
2362 if ((!req_count) || (req_count > MAX_LAYER_COUNT)) {
2363 pr_err("invalid rotator req count :%d\n", req_count);
2364 return -EINVAL;
2365 }
2366
2367 size = sizeof(struct mdp_rotation_item) * req_count;
2368 items = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
2369 if (!items) {
2370 pr_err("fail to allocate rotation items\n");
2371 return -ENOMEM;
2372 }
2373 ret = copy_from_user(items, compat_ptr(user_req32.list), size);
2374 if (ret) {
2375 pr_err("fail to copy rotation items\n");
2376 goto handle_request32_err;
2377 }
2378
2379 req = mdss_rotator_req_init(mgr, items, user_req32.count,
2380 user_req32.flags);
2381 if (IS_ERR_OR_NULL(req)) {
2382 pr_err("fail to allocate rotation request\n");
2383 ret = PTR_ERR(req);
2384 goto handle_request32_err;
2385 }
2386
2387 mutex_lock(&mgr->lock);
2388
2389 if (req->flags & MDSS_ROTATION_REQUEST_VALIDATE) {
2390 ret = mdss_rotator_validate_request(mgr, private, req);
2391 goto handle_request32_err1;
2392 }
2393
2394 ret = mdss_rotator_handle_request_common(mgr, private, req, items);
2395 if (ret) {
2396 pr_err("fail to handle request\n");
2397 goto handle_request32_err1;
2398 }
2399
2400 ret = copy_to_user(compat_ptr(user_req32.list), items, size);
2401 if (ret) {
2402 pr_err("fail to copy output fence to user\n");
2403 mdss_rotator_remove_request(mgr, private, req);
2404 goto handle_request32_err1;
2405 }
2406
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302407 mdss_rotator_queue_request(mgr, private, req);
2408
2409 mutex_unlock(&mgr->lock);
2410
2411 devm_kfree(&mgr->pdev->dev, items);
2412 return ret;
2413
2414handle_request32_err1:
2415 mutex_unlock(&mgr->lock);
2416handle_request32_err:
2417 devm_kfree(&mgr->pdev->dev, items);
2418 devm_kfree(&mgr->pdev->dev, req);
2419 return ret;
2420}
2421
2422static unsigned int __do_compat_ioctl_rot(unsigned int cmd32)
2423{
2424 unsigned int cmd;
2425
2426 switch (cmd32) {
2427 case MDSS_ROTATION_REQUEST32:
2428 cmd = MDSS_ROTATION_REQUEST;
2429 break;
2430 case MDSS_ROTATION_OPEN32:
2431 cmd = MDSS_ROTATION_OPEN;
2432 break;
2433 case MDSS_ROTATION_CLOSE32:
2434 cmd = MDSS_ROTATION_CLOSE;
2435 break;
2436 case MDSS_ROTATION_CONFIG32:
2437 cmd = MDSS_ROTATION_CONFIG;
2438 break;
2439 default:
2440 cmd = cmd32;
2441 break;
2442 }
2443
2444 return cmd;
2445}
2446
2447static long mdss_rotator_compat_ioctl(struct file *file, unsigned int cmd,
2448 unsigned long arg)
2449{
2450 struct mdss_rot_file_private *private;
2451 int ret = -EINVAL;
2452
2453 if (!rot_mgr)
2454 return -ENODEV;
2455
2456 if (atomic_read(&rot_mgr->device_suspended))
2457 return -EPERM;
2458
2459 if (!file->private_data)
2460 return -EINVAL;
2461
2462 private = (struct mdss_rot_file_private *)file->private_data;
2463
2464 if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) {
2465 pr_err("Calling ioctl with unrecognized rot_file_private\n");
2466 return -EINVAL;
2467 }
2468
2469 cmd = __do_compat_ioctl_rot(cmd);
2470
2471 switch (cmd) {
2472 case MDSS_ROTATION_REQUEST:
2473 ATRACE_BEGIN("rotator_request32");
2474 ret = mdss_rotator_handle_request32(rot_mgr, private, arg);
2475 ATRACE_END("rotator_request32");
2476 break;
2477 case MDSS_ROTATION_OPEN:
2478 ret = mdss_rotator_open_session(rot_mgr, private, arg);
2479 break;
2480 case MDSS_ROTATION_CLOSE:
2481 ret = mdss_rotator_close_session(rot_mgr, private, arg);
2482 break;
2483 case MDSS_ROTATION_CONFIG:
2484 ret = mdss_rotator_config_session(rot_mgr, private, arg);
2485 break;
2486 default:
2487 pr_err("unexpected IOCTL %d\n", cmd);
2488 }
2489
2490 if (ret)
2491 pr_err("rotator ioctl=%d failed, err=%d\n", cmd, ret);
2492 return ret;
2493
2494}
2495#endif
2496
2497static long mdss_rotator_ioctl(struct file *file, unsigned int cmd,
2498 unsigned long arg)
2499{
2500 struct mdss_rot_file_private *private;
2501 int ret = -EINVAL;
2502
2503 if (!rot_mgr)
2504 return -ENODEV;
2505
2506 if (atomic_read(&rot_mgr->device_suspended))
2507 return -EPERM;
2508
2509 if (!file->private_data)
2510 return -EINVAL;
2511
2512 private = (struct mdss_rot_file_private *)file->private_data;
2513
2514 if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) {
2515 pr_err("Calling ioctl with unrecognized rot_file_private\n");
2516 return -EINVAL;
2517 }
2518
2519 switch (cmd) {
2520 case MDSS_ROTATION_REQUEST:
2521 ATRACE_BEGIN("rotator_request");
2522 ret = mdss_rotator_handle_request(rot_mgr, private, arg);
2523 ATRACE_END("rotator_request");
2524 break;
2525 case MDSS_ROTATION_OPEN:
2526 ret = mdss_rotator_open_session(rot_mgr, private, arg);
2527 break;
2528 case MDSS_ROTATION_CLOSE:
2529 ret = mdss_rotator_close_session(rot_mgr, private, arg);
2530 break;
2531 case MDSS_ROTATION_CONFIG:
2532 ret = mdss_rotator_config_session(rot_mgr, private, arg);
2533 break;
2534 default:
2535 pr_err("unexpected IOCTL %d\n", cmd);
2536 }
2537
2538 if (ret)
2539 pr_err("rotator ioctl=%d failed, err=%d\n", cmd, ret);
2540 return ret;
2541}
2542
2543static ssize_t mdss_rotator_show_capabilities(struct device *dev,
2544 struct device_attribute *attr, char *buf)
2545{
2546 size_t len = PAGE_SIZE;
2547 int cnt = 0;
2548
2549 if (!rot_mgr)
2550 return cnt;
2551
2552#define SPRINT(fmt, ...) \
2553 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2554
2555 SPRINT("wb_count=%d\n", rot_mgr->queue_count);
2556 SPRINT("downscale=%d\n", rot_mgr->has_downscale);
2557
2558 return cnt;
2559}
2560
2561static DEVICE_ATTR(caps, 0444, mdss_rotator_show_capabilities, NULL);
2562
2563static struct attribute *mdss_rotator_fs_attrs[] = {
2564 &dev_attr_caps.attr,
2565 NULL
2566};
2567
2568static struct attribute_group mdss_rotator_fs_attr_group = {
2569 .attrs = mdss_rotator_fs_attrs
2570};
2571
2572static const struct file_operations mdss_rotator_fops = {
2573 .owner = THIS_MODULE,
2574 .open = mdss_rotator_open,
2575 .release = mdss_rotator_close,
2576 .unlocked_ioctl = mdss_rotator_ioctl,
2577#ifdef CONFIG_COMPAT
2578 .compat_ioctl = mdss_rotator_compat_ioctl,
2579#endif
2580};
2581
2582static int mdss_rotator_parse_dt_bus(struct mdss_rot_mgr *mgr,
2583 struct platform_device *dev)
2584{
2585 struct device_node *node;
2586 int ret = 0, i;
2587 bool register_bus_needed;
2588 int usecases;
2589
2590 mgr->data_bus.bus_scale_pdata = msm_bus_cl_get_pdata(dev);
2591 if (IS_ERR_OR_NULL(mgr->data_bus.bus_scale_pdata)) {
2592 ret = PTR_ERR(mgr->data_bus.bus_scale_pdata);
2593 if (!ret) {
2594 ret = -EINVAL;
2595 pr_err("msm_bus_cl_get_pdata failed. ret=%d\n", ret);
2596 mgr->data_bus.bus_scale_pdata = NULL;
2597 }
2598 }
2599
2600 register_bus_needed = of_property_read_bool(dev->dev.of_node,
2601 "qcom,mdss-has-reg-bus");
2602 if (register_bus_needed) {
2603 node = of_get_child_by_name(
2604 dev->dev.of_node, "qcom,mdss-rot-reg-bus");
2605 if (!node) {
2606 mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table;
2607 usecases = mgr->reg_bus.bus_scale_pdata->num_usecases;
2608 for (i = 0; i < usecases; i++) {
2609 rot_reg_bus_usecases[i].num_paths = 1;
2610 rot_reg_bus_usecases[i].vectors =
2611 &rot_reg_bus_vectors[i];
2612 }
2613 } else {
2614 mgr->reg_bus.bus_scale_pdata =
2615 msm_bus_pdata_from_node(dev, node);
2616 if (IS_ERR_OR_NULL(mgr->reg_bus.bus_scale_pdata)) {
2617 ret = PTR_ERR(mgr->reg_bus.bus_scale_pdata);
2618 if (!ret)
2619 ret = -EINVAL;
2620 pr_err("reg_rot_bus failed rc=%d\n", ret);
2621 mgr->reg_bus.bus_scale_pdata = NULL;
2622 }
2623 }
2624 }
2625 return ret;
2626}
2627
2628static int mdss_rotator_parse_dt(struct mdss_rot_mgr *mgr,
2629 struct platform_device *dev)
2630{
2631 int ret = 0;
2632 u32 data;
2633
2634 ret = of_property_read_u32(dev->dev.of_node,
2635 "qcom,mdss-wb-count", &data);
2636 if (ret) {
2637 pr_err("Error in device tree\n");
2638 return ret;
2639 }
2640 if (data > ROT_MAX_HW_BLOCKS) {
2641 pr_err("Err, num of wb block (%d) larger than sw max %d\n",
2642 data, ROT_MAX_HW_BLOCKS);
2643 return -EINVAL;
2644 }
2645
2646 rot_mgr->queue_count = data;
2647 rot_mgr->has_downscale = of_property_read_bool(dev->dev.of_node,
2648 "qcom,mdss-has-downscale");
2649 rot_mgr->has_ubwc = of_property_read_bool(dev->dev.of_node,
2650 "qcom,mdss-has-ubwc");
2651
2652 ret = mdss_rotator_parse_dt_bus(mgr, dev);
2653 if (ret)
2654 pr_err("Failed to parse bus data\n");
2655
2656 return ret;
2657}
2658
2659static void mdss_rotator_put_dt_vreg_data(struct device *dev,
Sachin Bhayare5076e252018-01-18 14:56:45 +05302660 struct mdss_module_power *mp)
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302661{
2662 if (!mp) {
2663 DEV_ERR("%s: invalid input\n", __func__);
2664 return;
2665 }
2666
Sachin Bhayare5076e252018-01-18 14:56:45 +05302667 msm_mdss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 0);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302668 if (mp->vreg_config) {
2669 devm_kfree(dev, mp->vreg_config);
2670 mp->vreg_config = NULL;
2671 }
2672 mp->num_vreg = 0;
2673}
2674
2675static int mdss_rotator_get_dt_vreg_data(struct device *dev,
Sachin Bhayare5076e252018-01-18 14:56:45 +05302676 struct mdss_module_power *mp)
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302677{
2678 const char *st = NULL;
2679 struct device_node *of_node = NULL;
2680 int dt_vreg_total = 0;
2681 int i;
2682 int rc;
2683
2684 if (!dev || !mp) {
2685 DEV_ERR("%s: invalid input\n", __func__);
2686 return -EINVAL;
2687 }
2688
2689 of_node = dev->of_node;
2690
2691 dt_vreg_total = of_property_count_strings(of_node, "qcom,supply-names");
2692 if (dt_vreg_total < 0) {
2693 DEV_ERR("%s: vreg not found. rc=%d\n", __func__,
2694 dt_vreg_total);
2695 return 0;
2696 }
2697 mp->num_vreg = dt_vreg_total;
Sachin Bhayare5076e252018-01-18 14:56:45 +05302698 mp->vreg_config = devm_kzalloc(dev, sizeof(struct mdss_vreg) *
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302699 dt_vreg_total, GFP_KERNEL);
2700 if (!mp->vreg_config) {
2701 DEV_ERR("%s: can't alloc vreg mem\n", __func__);
2702 return -ENOMEM;
2703 }
2704
2705 /* vreg-name */
2706 for (i = 0; i < dt_vreg_total; i++) {
2707 rc = of_property_read_string_index(of_node,
2708 "qcom,supply-names", i, &st);
2709 if (rc) {
2710 DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
2711 __func__, i, rc);
2712 goto error;
2713 }
2714 snprintf(mp->vreg_config[i].vreg_name, 32, "%s", st);
2715 }
Sachin Bhayare5076e252018-01-18 14:56:45 +05302716 msm_mdss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 1);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302717
2718 for (i = 0; i < dt_vreg_total; i++) {
2719 DEV_DBG("%s: %s min=%d, max=%d, enable=%d disable=%d\n",
2720 __func__,
2721 mp->vreg_config[i].vreg_name,
2722 mp->vreg_config[i].min_voltage,
2723 mp->vreg_config[i].max_voltage,
2724 mp->vreg_config[i].load[DSS_REG_MODE_ENABLE],
2725 mp->vreg_config[i].load[DSS_REG_MODE_DISABLE]);
2726 }
2727 return rc;
2728
2729error:
2730 if (mp->vreg_config) {
2731 devm_kfree(dev, mp->vreg_config);
2732 mp->vreg_config = NULL;
2733 }
2734 mp->num_vreg = 0;
2735 return rc;
2736}
2737
2738static void mdss_rotator_bus_scale_unregister(struct mdss_rot_mgr *mgr)
2739{
2740 pr_debug("unregister bus_hdl=%x, reg_bus_hdl=%x\n",
2741 mgr->data_bus.bus_hdl, mgr->reg_bus.bus_hdl);
2742
2743 if (mgr->data_bus.bus_hdl)
2744 msm_bus_scale_unregister_client(mgr->data_bus.bus_hdl);
2745
2746 if (mgr->reg_bus.bus_hdl)
2747 msm_bus_scale_unregister_client(mgr->reg_bus.bus_hdl);
2748}
2749
2750static int mdss_rotator_bus_scale_register(struct mdss_rot_mgr *mgr)
2751{
2752 if (!mgr->data_bus.bus_scale_pdata) {
2753 pr_err("Scale table is NULL\n");
2754 return -EINVAL;
2755 }
2756
2757 mgr->data_bus.bus_hdl =
2758 msm_bus_scale_register_client(
2759 mgr->data_bus.bus_scale_pdata);
2760 if (!mgr->data_bus.bus_hdl) {
2761 pr_err("bus_client register failed\n");
2762 return -EINVAL;
2763 }
2764 pr_debug("registered bus_hdl=%x\n", mgr->data_bus.bus_hdl);
2765
2766 if (mgr->reg_bus.bus_scale_pdata) {
2767 mgr->reg_bus.bus_hdl =
2768 msm_bus_scale_register_client(
2769 mgr->reg_bus.bus_scale_pdata);
2770 if (!mgr->reg_bus.bus_hdl) {
2771 pr_err("register bus_client register failed\n");
2772 mdss_rotator_bus_scale_unregister(mgr);
2773 return -EINVAL;
2774 }
2775 pr_debug("registered register bus_hdl=%x\n",
2776 mgr->reg_bus.bus_hdl);
2777 }
2778
2779 return 0;
2780}
2781
2782static int mdss_rotator_clk_register(struct platform_device *pdev,
2783 struct mdss_rot_mgr *mgr, char *clk_name, u32 clk_idx)
2784{
2785 struct clk *tmp;
2786
2787 pr_debug("registered clk_reg\n");
2788
2789 if (clk_idx >= MDSS_CLK_ROTATOR_END_IDX) {
2790 pr_err("invalid clk index %d\n", clk_idx);
2791 return -EINVAL;
2792 }
2793
2794 if (mgr->rot_clk[clk_idx]) {
2795 pr_err("Stomping on clk prev registered:%d\n", clk_idx);
2796 return -EINVAL;
2797 }
2798
2799 tmp = devm_clk_get(&pdev->dev, clk_name);
2800 if (IS_ERR(tmp)) {
2801 pr_err("unable to get clk: %s\n", clk_name);
2802 return PTR_ERR(tmp);
2803 }
2804 mgr->rot_clk[clk_idx] = tmp;
2805 return 0;
2806}
2807
2808static int mdss_rotator_res_init(struct platform_device *pdev,
2809 struct mdss_rot_mgr *mgr)
2810{
2811 int ret;
2812
2813 ret = mdss_rotator_get_dt_vreg_data(&pdev->dev, &mgr->module_power);
2814 if (ret)
2815 return ret;
2816
2817 ret = mdss_rotator_clk_register(pdev, mgr,
2818 "iface_clk", MDSS_CLK_ROTATOR_AHB);
2819 if (ret)
2820 goto error;
2821
2822 ret = mdss_rotator_clk_register(pdev, mgr,
2823 "rot_core_clk", MDSS_CLK_ROTATOR_CORE);
2824 if (ret)
2825 goto error;
2826
2827 ret = mdss_rotator_bus_scale_register(mgr);
2828 if (ret)
2829 goto error;
2830
2831 return 0;
2832error:
2833 mdss_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power);
2834 return ret;
2835}
2836
2837static int mdss_rotator_probe(struct platform_device *pdev)
2838{
2839 int ret;
2840
2841 rot_mgr = devm_kzalloc(&pdev->dev, sizeof(struct mdss_rot_mgr),
2842 GFP_KERNEL);
2843 if (!rot_mgr)
2844 return -ENOMEM;
2845
2846 rot_mgr->pdev = pdev;
2847 ret = mdss_rotator_parse_dt(rot_mgr, pdev);
2848 if (ret) {
2849 pr_err("fail to parse the dt\n");
2850 goto error_parse_dt;
2851 }
2852
2853 mutex_init(&rot_mgr->lock);
2854 mutex_init(&rot_mgr->clk_lock);
2855 mutex_init(&rot_mgr->bus_lock);
2856 atomic_set(&rot_mgr->device_suspended, 0);
2857 ret = mdss_rotator_init_queue(rot_mgr);
2858 if (ret) {
2859 pr_err("fail to init queue\n");
2860 goto error_get_dev_num;
2861 }
2862
2863 mutex_init(&rot_mgr->file_lock);
2864 INIT_LIST_HEAD(&rot_mgr->file_list);
2865
2866 platform_set_drvdata(pdev, rot_mgr);
2867
2868 ret = alloc_chrdev_region(&rot_mgr->dev_num, 0, 1, DRIVER_NAME);
2869 if (ret < 0) {
2870 pr_err("alloc_chrdev_region failed ret = %d\n", ret);
2871 goto error_get_dev_num;
2872 }
2873
2874 rot_mgr->class = class_create(THIS_MODULE, CLASS_NAME);
2875 if (IS_ERR(rot_mgr->class)) {
2876 ret = PTR_ERR(rot_mgr->class);
2877 pr_err("couldn't create class rc = %d\n", ret);
2878 goto error_class_create;
2879 }
2880
2881 rot_mgr->device = device_create(rot_mgr->class, NULL,
2882 rot_mgr->dev_num, NULL, DRIVER_NAME);
2883 if (IS_ERR(rot_mgr->device)) {
2884 ret = PTR_ERR(rot_mgr->device);
2885 pr_err("device_create failed %d\n", ret);
2886 goto error_class_device_create;
2887 }
2888
2889 cdev_init(&rot_mgr->cdev, &mdss_rotator_fops);
2890 ret = cdev_add(&rot_mgr->cdev,
2891 MKDEV(MAJOR(rot_mgr->dev_num), 0), 1);
2892 if (ret < 0) {
2893 pr_err("cdev_add failed %d\n", ret);
2894 goto error_cdev_add;
2895 }
2896
2897 ret = sysfs_create_group(&rot_mgr->device->kobj,
2898 &mdss_rotator_fs_attr_group);
2899 if (ret)
2900 pr_err("unable to register rotator sysfs nodes\n");
2901
2902 ret = mdss_rotator_res_init(pdev, rot_mgr);
2903 if (ret < 0) {
2904 pr_err("res_init failed %d\n", ret);
2905 goto error_res_init;
2906 }
2907 return 0;
2908
2909error_res_init:
2910 cdev_del(&rot_mgr->cdev);
2911error_cdev_add:
2912 device_destroy(rot_mgr->class, rot_mgr->dev_num);
2913error_class_device_create:
2914 class_destroy(rot_mgr->class);
2915error_class_create:
2916 unregister_chrdev_region(rot_mgr->dev_num, 1);
2917error_get_dev_num:
2918 mdss_rotator_deinit_queue(rot_mgr);
2919error_parse_dt:
2920 devm_kfree(&pdev->dev, rot_mgr);
2921 rot_mgr = NULL;
2922 return ret;
2923}
2924
2925static int mdss_rotator_remove(struct platform_device *dev)
2926{
2927 struct mdss_rot_mgr *mgr;
2928
2929 mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev);
2930 if (!mgr)
2931 return -ENODEV;
2932
2933 sysfs_remove_group(&rot_mgr->device->kobj, &mdss_rotator_fs_attr_group);
2934
2935 mdss_rotator_release_all(mgr);
2936
2937 mdss_rotator_put_dt_vreg_data(&dev->dev, &mgr->module_power);
2938 mdss_rotator_bus_scale_unregister(mgr);
2939 cdev_del(&rot_mgr->cdev);
2940 device_destroy(rot_mgr->class, rot_mgr->dev_num);
2941 class_destroy(rot_mgr->class);
2942 unregister_chrdev_region(rot_mgr->dev_num, 1);
2943
2944 mdss_rotator_deinit_queue(rot_mgr);
2945 devm_kfree(&dev->dev, rot_mgr);
2946 rot_mgr = NULL;
2947 return 0;
2948}
2949
2950static void mdss_rotator_suspend_cancel_rot_work(struct mdss_rot_mgr *mgr)
2951{
2952 struct mdss_rot_file_private *priv, *priv_next;
2953
2954 mutex_lock(&mgr->file_lock);
2955 list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) {
2956 mdss_rotator_cancel_all_requests(mgr, priv);
2957 }
2958 mutex_unlock(&rot_mgr->file_lock);
2959}
2960
2961#if defined(CONFIG_PM)
2962static int mdss_rotator_suspend(struct platform_device *dev, pm_message_t state)
2963{
2964 struct mdss_rot_mgr *mgr;
2965
2966 mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev);
2967 if (!mgr)
2968 return -ENODEV;
2969
2970 atomic_inc(&mgr->device_suspended);
2971 mdss_rotator_suspend_cancel_rot_work(mgr);
2972 mdss_rotator_update_perf(mgr);
2973 return 0;
2974}
2975
2976static int mdss_rotator_resume(struct platform_device *dev)
2977{
2978 struct mdss_rot_mgr *mgr;
2979
2980 mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev);
2981 if (!mgr)
2982 return -ENODEV;
2983
2984 atomic_dec(&mgr->device_suspended);
2985 mdss_rotator_update_perf(mgr);
2986 return 0;
2987}
2988#endif
2989
2990static const struct of_device_id mdss_rotator_dt_match[] = {
2991 { .compatible = "qcom,mdss_rotator",},
2992 {}
2993};
2994
2995MODULE_DEVICE_TABLE(of, mdss_rotator_dt_match);
2996
2997static struct platform_driver mdss_rotator_driver = {
2998 .probe = mdss_rotator_probe,
2999 .remove = mdss_rotator_remove,
3000#if defined(CONFIG_PM)
3001 .suspend = mdss_rotator_suspend,
3002 .resume = mdss_rotator_resume,
3003#endif
3004 .driver = {
3005 .name = "mdss_rotator",
3006 .of_match_table = mdss_rotator_dt_match,
3007 .pm = NULL,
3008 }
3009};
3010
3011static int __init mdss_rotator_init(void)
3012{
3013 return platform_driver_register(&mdss_rotator_driver);
3014}
3015
3016static void __exit mdss_rotator_exit(void)
3017{
3018 return platform_driver_unregister(&mdss_rotator_driver);
3019}
3020
3021module_init(mdss_rotator_init);
3022module_exit(mdss_rotator_exit);
3023
3024MODULE_DESCRIPTION("MSM Rotator driver");
3025MODULE_LICENSE("GPL v2");