blob: ff4dbb718e53f289ca0cd06231974761cd4bdf10 [file] [log] [blame]
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
20#include <linux/sync.h>
21#include <linux/uaccess.h>
22#include <linux/of.h>
23#include <linux/clk.h>
24#include <linux/msm-bus.h>
25#include <linux/msm-bus-board.h>
26#include <linux/regulator/consumer.h>
27
28#include "mdss_rotator_internal.h"
29#include "mdss_mdp.h"
30#include "mdss_debug.h"
31
32/* waiting for hw time out, 3 vsync for 30fps*/
33#define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100
34
35/* acquire fence time out, following other driver fence time out practice */
36#define ROT_FENCE_WAIT_TIMEOUT MSEC_PER_SEC
37/*
38 * Max rotator hw blocks possible. Used for upper array limits instead of
39 * alloc and freeing small array
40 */
41#define ROT_MAX_HW_BLOCKS 2
42
43#define ROT_CHECK_BOUNDS(offset, size, max_size) \
44 (((size) > (max_size)) || ((offset) > ((max_size) - (size))))
45
46#define CLASS_NAME "rotator"
47#define DRIVER_NAME "mdss_rotator"
48
49#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \
50 { \
51 .src = MSM_BUS_MASTER_AMPSS_M0, \
52 .dst = MSM_BUS_SLAVE_DISPLAY_CFG, \
53 .ab = (ab_val), \
54 .ib = (ib_val), \
55 }
56
57#define BUS_VOTE_19_MHZ 153600000
58
59static struct msm_bus_vectors rot_reg_bus_vectors[] = {
60 MDP_REG_BUS_VECTOR_ENTRY(0, 0),
61 MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
62};
63static struct msm_bus_paths rot_reg_bus_usecases[ARRAY_SIZE(
64 rot_reg_bus_vectors)];
65static struct msm_bus_scale_pdata rot_reg_bus_scale_table = {
66 .usecase = rot_reg_bus_usecases,
67 .num_usecases = ARRAY_SIZE(rot_reg_bus_usecases),
68 .name = "mdss_rot_reg",
69 .active_only = 1,
70};
71
72static struct mdss_rot_mgr *rot_mgr;
73static void mdss_rotator_wq_handler(struct work_struct *work);
74
75static int mdss_rotator_bus_scale_set_quota(struct mdss_rot_bus_data_type *bus,
76 u64 quota)
77{
78 int new_uc_idx;
79 int ret;
80
81 if (bus->bus_hdl < 1) {
82 pr_err("invalid bus handle %d\n", bus->bus_hdl);
83 return -EINVAL;
84 }
85
86 if (bus->curr_quota_val == quota) {
87 pr_debug("bw request already requested\n");
88 return 0;
89 }
90
91 if (!quota) {
92 new_uc_idx = 0;
93 } else {
94 struct msm_bus_vectors *vect = NULL;
95 struct msm_bus_scale_pdata *bw_table =
96 bus->bus_scale_pdata;
97 u64 port_quota = quota;
98 u32 total_axi_port_cnt;
99 int i;
100
101 new_uc_idx = (bus->curr_bw_uc_idx %
102 (bw_table->num_usecases - 1)) + 1;
103
104 total_axi_port_cnt = bw_table->usecase[new_uc_idx].num_paths;
105 if (total_axi_port_cnt == 0) {
106 pr_err("Number of bw paths is 0\n");
107 return -ENODEV;
108 }
109 do_div(port_quota, total_axi_port_cnt);
110
111 for (i = 0; i < total_axi_port_cnt; i++) {
112 vect = &bw_table->usecase[new_uc_idx].vectors[i];
113 vect->ab = port_quota;
114 vect->ib = 0;
115 }
116 }
117 bus->curr_bw_uc_idx = new_uc_idx;
118 bus->curr_quota_val = quota;
119
120 pr_debug("uc_idx=%d quota=%llu\n", new_uc_idx, quota);
121 MDSS_XLOG(new_uc_idx, ((quota >> 32) & 0xFFFFFFFF),
122 (quota & 0xFFFFFFFF));
123 ATRACE_BEGIN("msm_bus_scale_req_rot");
124 ret = msm_bus_scale_client_update_request(bus->bus_hdl,
125 new_uc_idx);
126 ATRACE_END("msm_bus_scale_req_rot");
127 return ret;
128}
129
130static int mdss_rotator_enable_reg_bus(struct mdss_rot_mgr *mgr, u64 quota)
131{
132 int ret = 0, changed = 0;
133 u32 usecase_ndx = 0;
134
135 if (!mgr || !mgr->reg_bus.bus_hdl)
136 return 0;
137
138 if (quota)
139 usecase_ndx = 1;
140
141 if (usecase_ndx != mgr->reg_bus.curr_bw_uc_idx) {
142 mgr->reg_bus.curr_bw_uc_idx = usecase_ndx;
143 changed++;
144 }
145
146 pr_debug("%s, changed=%d register bus %s\n", __func__, changed,
147 quota ? "Enable":"Disable");
148
149 if (changed) {
150 ATRACE_BEGIN("msm_bus_scale_req_rot_reg");
151 ret = msm_bus_scale_client_update_request(mgr->reg_bus.bus_hdl,
152 usecase_ndx);
153 ATRACE_END("msm_bus_scale_req_rot_reg");
154 }
155
156 return ret;
157}
158
159/*
160 * Clock rate of all open sessions working a particular hw block
161 * are added together to get the required rate for that hw block.
162 * The max of each hw block becomes the final clock rate voted for
163 */
164static unsigned long mdss_rotator_clk_rate_calc(
165 struct mdss_rot_mgr *mgr,
166 struct mdss_rot_file_private *private)
167{
168 struct mdss_rot_perf *perf;
169 unsigned long clk_rate[ROT_MAX_HW_BLOCKS] = {0};
170 unsigned long total_clk_rate = 0;
171 int i, wb_idx;
172
173 mutex_lock(&private->perf_lock);
174 list_for_each_entry(perf, &private->perf_list, list) {
175 bool rate_accounted_for = false;
176
177 mutex_lock(&perf->work_dis_lock);
178 /*
179 * If there is one session that has two work items across
180 * different hw blocks rate is accounted for in both blocks.
181 */
182 for (i = 0; i < mgr->queue_count; i++) {
183 if (perf->work_distribution[i]) {
184 clk_rate[i] += perf->clk_rate;
185 rate_accounted_for = true;
186 }
187 }
188
189 /*
190 * Sessions that are open but not distributed on any hw block
191 * Still need to be accounted for. Rate is added to last known
192 * wb idx.
193 */
194 wb_idx = perf->last_wb_idx;
195 if ((!rate_accounted_for) && (wb_idx >= 0) &&
196 (wb_idx < mgr->queue_count))
197 clk_rate[wb_idx] += perf->clk_rate;
198 mutex_unlock(&perf->work_dis_lock);
199 }
200 mutex_unlock(&private->perf_lock);
201
202 for (i = 0; i < mgr->queue_count; i++)
203 total_clk_rate = max(clk_rate[i], total_clk_rate);
204
205 pr_debug("Total clk rate calc=%lu\n", total_clk_rate);
206 return total_clk_rate;
207}
208
209static struct clk *mdss_rotator_get_clk(struct mdss_rot_mgr *mgr, u32 clk_idx)
210{
211 if (clk_idx >= MDSS_CLK_ROTATOR_END_IDX) {
212 pr_err("Invalid clk index:%u", clk_idx);
213 return NULL;
214 }
215
216 return mgr->rot_clk[clk_idx];
217}
218
219static void mdss_rotator_set_clk_rate(struct mdss_rot_mgr *mgr,
220 unsigned long rate, u32 clk_idx)
221{
222 unsigned long clk_rate;
223 struct clk *clk = mdss_rotator_get_clk(mgr, clk_idx);
224 int ret;
225
226 if (clk) {
227 mutex_lock(&mgr->clk_lock);
228 clk_rate = clk_round_rate(clk, rate);
229 if (IS_ERR_VALUE(clk_rate)) {
230 pr_err("unable to round rate err=%ld\n", clk_rate);
231 } else if (clk_rate != clk_get_rate(clk)) {
232 ret = clk_set_rate(clk, clk_rate);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530233 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530234 pr_err("clk_set_rate failed, err:%d\n", ret);
235 } else {
236 pr_debug("rotator clk rate=%lu\n", clk_rate);
237 MDSS_XLOG(clk_rate);
238 }
239 }
240 mutex_unlock(&mgr->clk_lock);
241 } else {
242 pr_err("rotator clk not setup properly\n");
243 }
244}
245
246static void mdss_rotator_footswitch_ctrl(struct mdss_rot_mgr *mgr, bool on)
247{
248 int ret;
249
250 if (mgr->regulator_enable == on) {
251 pr_err("Regulators already in selected mode on=%d\n", on);
252 return;
253 }
254
255 pr_debug("%s: rotator regulators", on ? "Enable" : "Disable");
256 ret = msm_dss_enable_vreg(mgr->module_power.vreg_config,
257 mgr->module_power.num_vreg, on);
258 if (ret) {
259 pr_warn("Rotator regulator failed to %s\n",
260 on ? "enable" : "disable");
261 return;
262 }
263
264 mgr->regulator_enable = on;
265}
266
267static int mdss_rotator_clk_ctrl(struct mdss_rot_mgr *mgr, int enable)
268{
269 struct clk *clk;
270 int ret = 0;
271 int i, changed = 0;
272
273 mutex_lock(&mgr->clk_lock);
274 if (enable) {
275 if (mgr->rot_enable_clk_cnt == 0)
276 changed++;
277 mgr->rot_enable_clk_cnt++;
278 } else {
279 if (mgr->rot_enable_clk_cnt) {
280 mgr->rot_enable_clk_cnt--;
281 if (mgr->rot_enable_clk_cnt == 0)
282 changed++;
283 } else {
284 pr_err("Can not be turned off\n");
285 }
286 }
287
288 if (changed) {
289 pr_debug("Rotator clk %s\n", enable ? "enable" : "disable");
290 for (i = 0; i < MDSS_CLK_ROTATOR_END_IDX; i++) {
291 clk = mgr->rot_clk[i];
292 if (enable) {
293 ret = clk_prepare_enable(clk);
294 if (ret) {
295 pr_err("enable failed clk_idx %d\n", i);
296 goto error;
297 }
298 } else {
299 clk_disable_unprepare(clk);
300 }
301 }
302 mutex_lock(&mgr->bus_lock);
303 if (enable) {
304 /* Active+Sleep */
305 msm_bus_scale_client_update_context(
306 mgr->data_bus.bus_hdl, false,
307 mgr->data_bus.curr_bw_uc_idx);
308 trace_rotator_bw_ao_as_context(0);
309 } else {
310 /* Active Only */
311 msm_bus_scale_client_update_context(
312 mgr->data_bus.bus_hdl, true,
313 mgr->data_bus.curr_bw_uc_idx);
314 trace_rotator_bw_ao_as_context(1);
315 }
316 mutex_unlock(&mgr->bus_lock);
317 }
318 mutex_unlock(&mgr->clk_lock);
319
320 return ret;
321error:
322 for (i--; i >= 0; i--)
323 clk_disable_unprepare(mgr->rot_clk[i]);
324 mutex_unlock(&mgr->clk_lock);
325 return ret;
326}
327
328int mdss_rotator_resource_ctrl(struct mdss_rot_mgr *mgr, int enable)
329{
330 int changed = 0;
331 int ret = 0;
332
333 mutex_lock(&mgr->clk_lock);
334 if (enable) {
335 if (mgr->res_ref_cnt == 0)
336 changed++;
337 mgr->res_ref_cnt++;
338 } else {
339 if (mgr->res_ref_cnt) {
340 mgr->res_ref_cnt--;
341 if (mgr->res_ref_cnt == 0)
342 changed++;
343 } else {
344 pr_err("Rot resource already off\n");
345 }
346 }
347
348 pr_debug("%s: res_cnt=%d changed=%d enable=%d\n",
349 __func__, mgr->res_ref_cnt, changed, enable);
350 MDSS_XLOG(mgr->res_ref_cnt, changed, enable);
351
352 if (changed) {
353 if (enable)
354 mdss_rotator_footswitch_ctrl(mgr, true);
355 else
356 mdss_rotator_footswitch_ctrl(mgr, false);
357 }
358 mutex_unlock(&mgr->clk_lock);
359 return ret;
360}
361
362/* caller is expected to hold perf->work_dis_lock lock */
363static bool mdss_rotator_is_work_pending(struct mdss_rot_mgr *mgr,
364 struct mdss_rot_perf *perf)
365{
366 int i;
367
368 for (i = 0; i < mgr->queue_count; i++) {
369 if (perf->work_distribution[i]) {
370 pr_debug("Work is still scheduled to complete\n");
371 return true;
372 }
373 }
374 return false;
375}
376
377static void mdss_rotator_install_fence_fd(struct mdss_rot_entry_container *req)
378{
379 int i = 0;
380
381 for (i = 0; i < req->count; i++)
382 sync_fence_install(req->entries[i].output_fence,
383 req->entries[i].output_fence_fd);
384}
385
386static int mdss_rotator_create_fence(struct mdss_rot_entry *entry)
387{
388 int ret = 0, fd;
389 u32 val;
390 struct sync_pt *sync_pt;
391 struct sync_fence *fence;
392 struct mdss_rot_timeline *rot_timeline;
393
394 if (!entry->queue)
395 return -EINVAL;
396
397 rot_timeline = &entry->queue->timeline;
398
399 mutex_lock(&rot_timeline->lock);
400 val = rot_timeline->next_value + 1;
401
402 sync_pt = sw_sync_pt_create(rot_timeline->timeline, val);
403 if (sync_pt == NULL) {
404 pr_err("cannot create sync point\n");
405 goto sync_pt_create_err;
406 }
407
408 /* create fence */
409 fence = sync_fence_create(rot_timeline->fence_name, sync_pt);
410 if (fence == NULL) {
411 pr_err("%s: cannot create fence\n", rot_timeline->fence_name);
412 sync_pt_free(sync_pt);
413 ret = -ENOMEM;
414 goto sync_pt_create_err;
415 }
416
417 fd = get_unused_fd_flags(0);
418 if (fd < 0) {
419 pr_err("get_unused_fd_flags failed error:0x%x\n", fd);
420 ret = fd;
421 goto get_fd_err;
422 }
423
424 rot_timeline->next_value++;
425 mutex_unlock(&rot_timeline->lock);
426
427 entry->output_fence_fd = fd;
428 entry->output_fence = fence;
429 pr_debug("output sync point created at val=%u\n", val);
430
431 return 0;
432
433get_fd_err:
434 sync_fence_put(fence);
435sync_pt_create_err:
436 mutex_unlock(&rot_timeline->lock);
437 return ret;
438}
439
440static void mdss_rotator_clear_fence(struct mdss_rot_entry *entry)
441{
442 struct mdss_rot_timeline *rot_timeline;
443
444 if (entry->input_fence) {
445 sync_fence_put(entry->input_fence);
446 entry->input_fence = NULL;
447 }
448
449 rot_timeline = &entry->queue->timeline;
450
451 /* fence failed to copy to user space */
452 if (entry->output_fence) {
453 sync_fence_put(entry->output_fence);
454 entry->output_fence = NULL;
455 put_unused_fd(entry->output_fence_fd);
456
457 mutex_lock(&rot_timeline->lock);
458 rot_timeline->next_value--;
459 mutex_unlock(&rot_timeline->lock);
460 }
461}
462
463static int mdss_rotator_signal_output(struct mdss_rot_entry *entry)
464{
465 struct mdss_rot_timeline *rot_timeline;
466
467 if (!entry->queue)
468 return -EINVAL;
469
470 rot_timeline = &entry->queue->timeline;
471
472 if (entry->output_signaled) {
473 pr_debug("output already signaled\n");
474 return 0;
475 }
476
477 mutex_lock(&rot_timeline->lock);
478 sw_sync_timeline_inc(rot_timeline->timeline, 1);
479 mutex_unlock(&rot_timeline->lock);
480
481 entry->output_signaled = true;
482
483 return 0;
484}
485
486static int mdss_rotator_wait_for_input(struct mdss_rot_entry *entry)
487{
488 int ret;
489
490 if (!entry->input_fence) {
491 pr_debug("invalid input fence, no wait\n");
492 return 0;
493 }
494
495 ret = sync_fence_wait(entry->input_fence, ROT_FENCE_WAIT_TIMEOUT);
496 sync_fence_put(entry->input_fence);
497 entry->input_fence = NULL;
498 return ret;
499}
500
501static int mdss_rotator_import_buffer(struct mdp_layer_buffer *buffer,
502 struct mdss_mdp_data *data, u32 flags, struct device *dev, bool input)
503{
504 int i, ret = 0;
505 struct msmfb_data planes[MAX_PLANES];
506 int dir = DMA_TO_DEVICE;
507
508 if (!input)
509 dir = DMA_FROM_DEVICE;
510
511 memset(planes, 0, sizeof(planes));
512
513 if (buffer->plane_count > MAX_PLANES) {
514 pr_err("buffer plane_count exceeds MAX_PLANES limit:%d\n",
515 buffer->plane_count);
516 return -EINVAL;
517 }
518
519 for (i = 0; i < buffer->plane_count; i++) {
520 planes[i].memory_id = buffer->planes[i].fd;
521 planes[i].offset = buffer->planes[i].offset;
522 }
523
524 ret = mdss_mdp_data_get_and_validate_size(data, planes,
525 buffer->plane_count, flags, dev, true, dir, buffer);
526 data->state = MDP_BUF_STATE_READY;
527 data->last_alloc = local_clock();
528
529 return ret;
530}
531
532static int mdss_rotator_map_and_check_data(struct mdss_rot_entry *entry)
533{
534 int ret;
535 struct mdp_layer_buffer *input;
536 struct mdp_layer_buffer *output;
537 struct mdss_mdp_format_params *fmt;
538 struct mdss_mdp_plane_sizes ps;
539 bool rotation;
540
541 input = &entry->item.input;
542 output = &entry->item.output;
543
544 rotation = (entry->item.flags & MDP_ROTATION_90) ? true : false;
545
546 ATRACE_BEGIN(__func__);
547 ret = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530548 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530549 ATRACE_END(__func__);
550 return ret;
551 }
552
553 /* if error during map, the caller will release the data */
554 entry->src_buf.state = MDP_BUF_STATE_ACTIVE;
555 ret = mdss_mdp_data_map(&entry->src_buf, true, DMA_TO_DEVICE);
556 if (ret) {
557 pr_err("source buffer mapping failed ret:%d\n", ret);
558 goto end;
559 }
560
561 entry->dst_buf.state = MDP_BUF_STATE_ACTIVE;
562 ret = mdss_mdp_data_map(&entry->dst_buf, true, DMA_FROM_DEVICE);
563 if (ret) {
564 pr_err("destination buffer mapping failed ret:%d\n", ret);
565 goto end;
566 }
567
568 fmt = mdss_mdp_get_format_params(input->format);
569 if (!fmt) {
570 pr_err("invalid input format:%d\n", input->format);
571 ret = -EINVAL;
572 goto end;
573 }
574
575 ret = mdss_mdp_get_plane_sizes(
576 fmt, input->width, input->height, &ps, 0, rotation);
577 if (ret) {
578 pr_err("fail to get input plane size ret=%d\n", ret);
579 goto end;
580 }
581
582 ret = mdss_mdp_data_check(&entry->src_buf, &ps, fmt);
583 if (ret) {
584 pr_err("fail to check input data ret=%d\n", ret);
585 goto end;
586 }
587
588 fmt = mdss_mdp_get_format_params(output->format);
589 if (!fmt) {
590 pr_err("invalid output format:%d\n", output->format);
591 ret = -EINVAL;
592 goto end;
593 }
594
595 ret = mdss_mdp_get_plane_sizes(
596 fmt, output->width, output->height, &ps, 0, rotation);
597 if (ret) {
598 pr_err("fail to get output plane size ret=%d\n", ret);
599 goto end;
600 }
601
602 ret = mdss_mdp_data_check(&entry->dst_buf, &ps, fmt);
603 if (ret) {
604 pr_err("fail to check output data ret=%d\n", ret);
605 goto end;
606 }
607
608end:
609 mdss_iommu_ctrl(0);
610 ATRACE_END(__func__);
611
612 return ret;
613}
614
615static struct mdss_rot_perf *__mdss_rotator_find_session(
616 struct mdss_rot_file_private *private,
617 u32 session_id)
618{
619 struct mdss_rot_perf *perf, *perf_next;
620 bool found = false;
621
622 list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) {
623 if (perf->config.session_id == session_id) {
624 found = true;
625 break;
626 }
627 }
628 if (!found)
629 perf = NULL;
630 return perf;
631}
632
633static struct mdss_rot_perf *mdss_rotator_find_session(
634 struct mdss_rot_file_private *private,
635 u32 session_id)
636{
637 struct mdss_rot_perf *perf;
638
639 mutex_lock(&private->perf_lock);
640 perf = __mdss_rotator_find_session(private, session_id);
641 mutex_unlock(&private->perf_lock);
642 return perf;
643}
644
645static void mdss_rotator_release_data(struct mdss_rot_entry *entry)
646{
647 struct mdss_mdp_data *src_buf = &entry->src_buf;
648 struct mdss_mdp_data *dst_buf = &entry->dst_buf;
649
650 mdss_mdp_data_free(src_buf, true, DMA_TO_DEVICE);
651 src_buf->last_freed = local_clock();
652 src_buf->state = MDP_BUF_STATE_UNUSED;
653
654 mdss_mdp_data_free(dst_buf, true, DMA_FROM_DEVICE);
655 dst_buf->last_freed = local_clock();
656 dst_buf->state = MDP_BUF_STATE_UNUSED;
657}
658
659static int mdss_rotator_import_data(struct mdss_rot_mgr *mgr,
660 struct mdss_rot_entry *entry)
661{
662 int ret;
663 struct mdp_layer_buffer *input;
664 struct mdp_layer_buffer *output;
665 u32 flag = 0;
666
667 input = &entry->item.input;
668 output = &entry->item.output;
669
670 if (entry->item.flags & MDP_ROTATION_SECURE)
671 flag = MDP_SECURE_OVERLAY_SESSION;
672
673 ret = mdss_rotator_import_buffer(input, &entry->src_buf, flag,
674 &mgr->pdev->dev, true);
675 if (ret) {
676 pr_err("fail to import input buffer\n");
677 return ret;
678 }
679
680 /*
681 * driver assumes output buffer is ready to be written
682 * immediately
683 */
684 ret = mdss_rotator_import_buffer(output, &entry->dst_buf, flag,
685 &mgr->pdev->dev, false);
686 if (ret) {
687 pr_err("fail to import output buffer\n");
688 return ret;
689 }
690
691 return ret;
692}
693
694static struct mdss_rot_hw_resource *mdss_rotator_hw_alloc(
695 struct mdss_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
696{
697 struct mdss_rot_hw_resource *hw;
698 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
699 u32 pipe_ndx, offset = mdss_mdp_get_wb_ctl_support(mdata, true);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530700 int ret = 0;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530701
702 hw = devm_kzalloc(&mgr->pdev->dev, sizeof(struct mdss_rot_hw_resource),
703 GFP_KERNEL);
704 if (!hw)
705 return ERR_PTR(-ENOMEM);
706
707 hw->ctl = mdss_mdp_ctl_alloc(mdata, offset);
708 if (IS_ERR_OR_NULL(hw->ctl)) {
709 pr_err("unable to allocate ctl\n");
710 ret = -ENODEV;
711 goto error;
712 }
713
714 if (wb_id == MDSS_ROTATION_HW_ANY)
715 hw->wb = mdss_mdp_wb_alloc(MDSS_MDP_WB_ROTATOR, hw->ctl->num);
716 else
717 hw->wb = mdss_mdp_wb_assign(wb_id, hw->ctl->num);
718
719 if (IS_ERR_OR_NULL(hw->wb)) {
720 pr_err("unable to allocate wb\n");
721 ret = -ENODEV;
722 goto error;
723 }
724 hw->ctl->wb = hw->wb;
725 hw->mixer = mdss_mdp_mixer_assign(hw->wb->num, true, true);
726
727 if (IS_ERR_OR_NULL(hw->mixer)) {
728 pr_err("unable to allocate wb mixer\n");
729 ret = -ENODEV;
730 goto error;
731 }
732 hw->ctl->mixer_left = hw->mixer;
733 hw->mixer->ctl = hw->ctl;
734
735 hw->mixer->rotator_mode = true;
736
737 switch (hw->mixer->num) {
738 case MDSS_MDP_WB_LAYERMIXER0:
739 hw->ctl->opmode = MDSS_MDP_CTL_OP_ROT0_MODE;
740 break;
741 case MDSS_MDP_WB_LAYERMIXER1:
742 hw->ctl->opmode = MDSS_MDP_CTL_OP_ROT1_MODE;
743 break;
744 default:
745 pr_err("invalid layer mixer=%d\n", hw->mixer->num);
746 ret = -EINVAL;
747 goto error;
748 }
749
750 hw->ctl->ops.start_fnc = mdss_mdp_writeback_start;
751 hw->ctl->power_state = MDSS_PANEL_POWER_ON;
752 hw->ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_BLOCK;
753
754
755 if (hw->ctl->ops.start_fnc)
756 ret = hw->ctl->ops.start_fnc(hw->ctl);
757
758 if (ret)
759 goto error;
760
761 if (pipe_id >= mdata->ndma_pipes)
762 goto error;
763
764 pipe_ndx = mdata->dma_pipes[pipe_id].ndx;
765 hw->pipe = mdss_mdp_pipe_assign(mdata, hw->mixer,
766 pipe_ndx, MDSS_MDP_PIPE_RECT0);
767 if (IS_ERR_OR_NULL(hw->pipe)) {
768 pr_err("dma pipe allocation failed\n");
769 ret = -ENODEV;
770 goto error;
771 }
772
773 hw->pipe->mixer_left = hw->mixer;
774 hw->pipe_id = hw->wb->num;
775 hw->wb_id = hw->wb->num;
776
777 return hw;
778error:
779 if (!IS_ERR_OR_NULL(hw->pipe))
780 mdss_mdp_pipe_destroy(hw->pipe);
781 if (!IS_ERR_OR_NULL(hw->ctl)) {
782 if (hw->ctl->ops.stop_fnc)
783 hw->ctl->ops.stop_fnc(hw->ctl, MDSS_PANEL_POWER_OFF);
784 mdss_mdp_ctl_free(hw->ctl);
785 }
786 devm_kfree(&mgr->pdev->dev, hw);
787
788 return ERR_PTR(ret);
789}
790
791static void mdss_rotator_free_hw(struct mdss_rot_mgr *mgr,
792 struct mdss_rot_hw_resource *hw)
793{
794 struct mdss_mdp_mixer *mixer;
795 struct mdss_mdp_ctl *ctl;
796
797 mixer = hw->pipe->mixer_left;
798
799 mdss_mdp_pipe_destroy(hw->pipe);
800
801 ctl = mdss_mdp_ctl_mixer_switch(mixer->ctl,
802 MDSS_MDP_WB_CTL_TYPE_BLOCK);
803 if (ctl) {
804 if (ctl->ops.stop_fnc)
805 ctl->ops.stop_fnc(ctl, MDSS_PANEL_POWER_OFF);
806 mdss_mdp_ctl_free(ctl);
807 }
808
809 devm_kfree(&mgr->pdev->dev, hw);
810}
811
812struct mdss_rot_hw_resource *mdss_rotator_get_hw_resource(
813 struct mdss_rot_queue *queue, struct mdss_rot_entry *entry)
814{
815 struct mdss_rot_hw_resource *hw = queue->hw;
816
817 if (!hw) {
818 pr_err("no hw in the queue\n");
819 return NULL;
820 }
821
822 mutex_lock(&queue->hw_lock);
823
824 if (hw->workload) {
825 hw = ERR_PTR(-EBUSY);
826 goto get_hw_resource_err;
827 }
828 hw->workload = entry;
829
830get_hw_resource_err:
831 mutex_unlock(&queue->hw_lock);
832 return hw;
833}
834
835static void mdss_rotator_put_hw_resource(struct mdss_rot_queue *queue,
836 struct mdss_rot_hw_resource *hw)
837{
838 mutex_lock(&queue->hw_lock);
839 hw->workload = NULL;
840 mutex_unlock(&queue->hw_lock);
841}
842
843/*
844 * caller will need to call mdss_rotator_deinit_queue when
845 * the function returns error
846 */
847static int mdss_rotator_init_queue(struct mdss_rot_mgr *mgr)
848{
849 int i, size, ret = 0;
850 char name[32];
851
852 size = sizeof(struct mdss_rot_queue) * mgr->queue_count;
853 mgr->queues = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
854 if (!mgr->queues)
855 return -ENOMEM;
856
857 for (i = 0; i < mgr->queue_count; i++) {
858 snprintf(name, sizeof(name), "rot_workq_%d", i);
859 pr_debug("work queue name=%s\n", name);
860 mgr->queues[i].rot_work_queue = alloc_ordered_workqueue("%s",
861 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, name);
862 if (!mgr->queues[i].rot_work_queue) {
863 ret = -EPERM;
864 break;
865 }
866
867 snprintf(name, sizeof(name), "rot_timeline_%d", i);
868 pr_debug("timeline name=%s\n", name);
869 mgr->queues[i].timeline.timeline =
870 sw_sync_timeline_create(name);
871 if (!mgr->queues[i].timeline.timeline) {
872 ret = -EPERM;
873 break;
874 }
875
876 size = sizeof(mgr->queues[i].timeline.fence_name);
877 snprintf(mgr->queues[i].timeline.fence_name, size,
878 "rot_fence_%d", i);
879 mutex_init(&mgr->queues[i].timeline.lock);
880
881 mutex_init(&mgr->queues[i].hw_lock);
882 }
883
884 return ret;
885}
886
887static void mdss_rotator_deinit_queue(struct mdss_rot_mgr *mgr)
888{
889 int i;
890
891 if (!mgr->queues)
892 return;
893
894 for (i = 0; i < mgr->queue_count; i++) {
895 if (mgr->queues[i].rot_work_queue)
896 destroy_workqueue(mgr->queues[i].rot_work_queue);
897
898 if (mgr->queues[i].timeline.timeline) {
899 struct sync_timeline *obj;
900
901 obj = (struct sync_timeline *)
902 mgr->queues[i].timeline.timeline;
903 sync_timeline_destroy(obj);
904 }
905 }
906 devm_kfree(&mgr->pdev->dev, mgr->queues);
907 mgr->queue_count = 0;
908}
909
910/*
911 * mdss_rotator_assign_queue() - Function assign rotation work onto hw
912 * @mgr: Rotator manager.
913 * @entry: Contains details on rotator work item being requested
914 * @private: Private struct used for access rot session performance struct
915 *
916 * This Function allocates hw required to complete rotation work item
917 * requested.
918 *
919 * Caller is responsible for calling cleanup function if error is returned
920 */
921static int mdss_rotator_assign_queue(struct mdss_rot_mgr *mgr,
922 struct mdss_rot_entry *entry,
923 struct mdss_rot_file_private *private)
924{
925 struct mdss_rot_perf *perf;
926 struct mdss_rot_queue *queue;
927 struct mdss_rot_hw_resource *hw;
928 struct mdp_rotation_item *item = &entry->item;
929 u32 wb_idx = item->wb_idx;
930 u32 pipe_idx = item->pipe_idx;
931 int ret = 0;
932
933 /*
934 * todo: instead of always assign writeback block 0, we can
935 * apply some load balancing logic in the future
936 */
937 if (wb_idx == MDSS_ROTATION_HW_ANY) {
938 wb_idx = 0;
939 pipe_idx = 0;
940 }
941
942 if (wb_idx >= mgr->queue_count) {
943 pr_err("Invalid wb idx = %d\n", wb_idx);
944 return -EINVAL;
945 }
946
947 queue = mgr->queues + wb_idx;
948
949 mutex_lock(&queue->hw_lock);
950
951 if (!queue->hw) {
952 hw = mdss_rotator_hw_alloc(mgr, pipe_idx, wb_idx);
953 if (IS_ERR_OR_NULL(hw)) {
954 pr_err("fail to allocate hw\n");
955 ret = PTR_ERR(hw);
956 } else {
957 queue->hw = hw;
958 }
959 }
960
961 if (queue->hw) {
962 entry->queue = queue;
963 queue->hw->pending_count++;
964 }
965
966 mutex_unlock(&queue->hw_lock);
967
968 perf = mdss_rotator_find_session(private, item->session_id);
969 if (!perf) {
970 pr_err("Could not find session based on rotation work item\n");
971 return -EINVAL;
972 }
973
974 entry->perf = perf;
975 perf->last_wb_idx = wb_idx;
976
977 return ret;
978}
979
980static void mdss_rotator_unassign_queue(struct mdss_rot_mgr *mgr,
981 struct mdss_rot_entry *entry)
982{
983 struct mdss_rot_queue *queue = entry->queue;
984
985 if (!queue)
986 return;
987
988 entry->queue = NULL;
989
990 mutex_lock(&queue->hw_lock);
991
992 if (!queue->hw) {
993 pr_err("entry assigned a queue with no hw\n");
994 mutex_unlock(&queue->hw_lock);
995 return;
996 }
997
998 queue->hw->pending_count--;
999 if (queue->hw->pending_count == 0) {
1000 mdss_rotator_free_hw(mgr, queue->hw);
1001 queue->hw = NULL;
1002 }
1003
1004 mutex_unlock(&queue->hw_lock);
1005}
1006
1007static void mdss_rotator_queue_request(struct mdss_rot_mgr *mgr,
1008 struct mdss_rot_file_private *private,
1009 struct mdss_rot_entry_container *req)
1010{
1011 struct mdss_rot_entry *entry;
1012 struct mdss_rot_queue *queue;
1013 unsigned long clk_rate;
1014 u32 wb_idx;
1015 int i;
1016
1017 for (i = 0; i < req->count; i++) {
1018 entry = req->entries + i;
1019 queue = entry->queue;
1020 wb_idx = queue->hw->wb_id;
1021 mutex_lock(&entry->perf->work_dis_lock);
1022 entry->perf->work_distribution[wb_idx]++;
1023 mutex_unlock(&entry->perf->work_dis_lock);
1024 entry->work_assigned = true;
1025 }
1026
1027 clk_rate = mdss_rotator_clk_rate_calc(mgr, private);
1028 mdss_rotator_set_clk_rate(mgr, clk_rate, MDSS_CLK_ROTATOR_CORE);
1029
1030 for (i = 0; i < req->count; i++) {
1031 entry = req->entries + i;
1032 queue = entry->queue;
1033 entry->output_fence = NULL;
1034 queue_work(queue->rot_work_queue, &entry->commit_work);
1035 }
1036}
1037
1038static int mdss_rotator_calc_perf(struct mdss_rot_perf *perf)
1039{
1040 struct mdp_rotation_config *config = &perf->config;
1041 u32 read_bw, write_bw;
1042 struct mdss_mdp_format_params *in_fmt, *out_fmt;
1043
1044 in_fmt = mdss_mdp_get_format_params(config->input.format);
1045 if (!in_fmt) {
1046 pr_err("invalid input format\n");
1047 return -EINVAL;
1048 }
1049 out_fmt = mdss_mdp_get_format_params(config->output.format);
1050 if (!out_fmt) {
1051 pr_err("invalid output format\n");
1052 return -EINVAL;
1053 }
1054 if (!config->input.width ||
1055 (0xffffffff/config->input.width < config->input.height))
1056 return -EINVAL;
1057
1058 perf->clk_rate = config->input.width * config->input.height;
1059
1060 if (!perf->clk_rate ||
1061 (0xffffffff/perf->clk_rate < config->frame_rate))
1062 return -EINVAL;
1063
1064 perf->clk_rate *= config->frame_rate;
1065 /* rotator processes 4 pixels per clock */
1066 perf->clk_rate /= 4;
1067
1068 read_bw = config->input.width * config->input.height *
1069 config->frame_rate;
1070 if (in_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
1071 read_bw = (read_bw * 3) / 2;
1072 else
1073 read_bw *= in_fmt->bpp;
1074
1075 write_bw = config->output.width * config->output.height *
1076 config->frame_rate;
1077 if (out_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
1078 write_bw = (write_bw * 3) / 2;
1079 else
1080 write_bw *= out_fmt->bpp;
1081
1082 read_bw = apply_comp_ratio_factor(read_bw, in_fmt,
1083 &config->input.comp_ratio);
1084 write_bw = apply_comp_ratio_factor(write_bw, out_fmt,
1085 &config->output.comp_ratio);
1086
1087 perf->bw = read_bw + write_bw;
1088 return 0;
1089}
1090
1091static int mdss_rotator_update_perf(struct mdss_rot_mgr *mgr)
1092{
1093 struct mdss_rot_file_private *priv;
1094 struct mdss_rot_perf *perf;
1095 int not_in_suspend_mode;
1096 u64 total_bw = 0;
1097
1098 ATRACE_BEGIN(__func__);
1099
1100 not_in_suspend_mode = !atomic_read(&mgr->device_suspended);
1101
1102 if (not_in_suspend_mode) {
1103 mutex_lock(&mgr->file_lock);
1104 list_for_each_entry(priv, &mgr->file_list, list) {
1105 mutex_lock(&priv->perf_lock);
1106 list_for_each_entry(perf, &priv->perf_list, list) {
1107 total_bw += perf->bw;
1108 }
1109 mutex_unlock(&priv->perf_lock);
1110 }
1111 mutex_unlock(&mgr->file_lock);
1112 }
1113
1114 mutex_lock(&mgr->bus_lock);
1115 total_bw += mgr->pending_close_bw_vote;
1116 mdss_rotator_enable_reg_bus(mgr, total_bw);
1117 mdss_rotator_bus_scale_set_quota(&mgr->data_bus, total_bw);
1118 mutex_unlock(&mgr->bus_lock);
1119
1120 ATRACE_END(__func__);
1121 return 0;
1122}
1123
1124static void mdss_rotator_release_from_work_distribution(
1125 struct mdss_rot_mgr *mgr,
1126 struct mdss_rot_entry *entry)
1127{
1128 if (entry->work_assigned) {
1129 bool free_perf = false;
1130 u32 wb_idx = entry->queue->hw->wb_id;
1131
1132 mutex_lock(&mgr->lock);
1133 mutex_lock(&entry->perf->work_dis_lock);
1134 if (entry->perf->work_distribution[wb_idx])
1135 entry->perf->work_distribution[wb_idx]--;
1136
1137 if (!entry->perf->work_distribution[wb_idx]
1138 && list_empty(&entry->perf->list)) {
1139 /* close session has offloaded perf free to us */
1140 free_perf = true;
1141 }
1142 mutex_unlock(&entry->perf->work_dis_lock);
1143 entry->work_assigned = false;
1144 if (free_perf) {
1145 mutex_lock(&mgr->bus_lock);
1146 mgr->pending_close_bw_vote -= entry->perf->bw;
1147 mutex_unlock(&mgr->bus_lock);
1148 mdss_rotator_resource_ctrl(mgr, false);
1149 devm_kfree(&mgr->pdev->dev,
1150 entry->perf->work_distribution);
1151 devm_kfree(&mgr->pdev->dev, entry->perf);
1152 mdss_rotator_update_perf(mgr);
1153 mdss_rotator_clk_ctrl(mgr, false);
1154 entry->perf = NULL;
1155 }
1156 mutex_unlock(&mgr->lock);
1157 }
1158}
1159
1160static void mdss_rotator_release_entry(struct mdss_rot_mgr *mgr,
1161 struct mdss_rot_entry *entry)
1162{
1163 mdss_rotator_release_from_work_distribution(mgr, entry);
1164 mdss_rotator_clear_fence(entry);
1165 mdss_rotator_release_data(entry);
1166 mdss_rotator_unassign_queue(mgr, entry);
1167}
1168
1169static int mdss_rotator_config_dnsc_factor(struct mdss_rot_mgr *mgr,
1170 struct mdss_rot_entry *entry)
1171{
1172 int ret = 0;
1173 u16 src_w, src_h, dst_w, dst_h, bit;
1174 struct mdp_rotation_item *item = &entry->item;
1175 struct mdss_mdp_format_params *fmt;
1176
1177 src_w = item->src_rect.w;
1178 src_h = item->src_rect.h;
1179
1180 if (item->flags & MDP_ROTATION_90) {
1181 dst_w = item->dst_rect.h;
1182 dst_h = item->dst_rect.w;
1183 } else {
1184 dst_w = item->dst_rect.w;
1185 dst_h = item->dst_rect.h;
1186 }
1187
1188 if (!mgr->has_downscale &&
1189 (src_w != dst_w || src_h != dst_h)) {
1190 pr_err("rotator downscale not supported\n");
1191 ret = -EINVAL;
1192 goto dnsc_err;
1193 }
1194
1195 entry->dnsc_factor_w = 0;
1196 entry->dnsc_factor_h = 0;
1197
1198 if ((src_w != dst_w) || (src_h != dst_h)) {
1199 if ((src_w % dst_w) || (src_h % dst_h)) {
1200 ret = -EINVAL;
1201 goto dnsc_err;
1202 }
1203 entry->dnsc_factor_w = src_w / dst_w;
1204 bit = fls(entry->dnsc_factor_w);
1205 /*
1206 * New Chipsets supports downscale upto 1/64
1207 * change the Bit check from 5 to 7 to support 1/64 down scale
1208 */
1209 if ((entry->dnsc_factor_w & ~BIT(bit - 1)) || (bit > 7)) {
1210 ret = -EINVAL;
1211 goto dnsc_err;
1212 }
1213 entry->dnsc_factor_h = src_h / dst_h;
1214 bit = fls(entry->dnsc_factor_h);
1215 if ((entry->dnsc_factor_h & ~BIT(bit - 1)) || (bit > 7)) {
1216 ret = -EINVAL;
1217 goto dnsc_err;
1218 }
1219 }
1220
1221 fmt = mdss_mdp_get_format_params(item->output.format);
1222 if (mdss_mdp_is_ubwc_format(fmt) &&
1223 (entry->dnsc_factor_h || entry->dnsc_factor_w)) {
1224 pr_err("ubwc not supported with downscale %d\n",
1225 item->output.format);
1226 ret = -EINVAL;
1227 }
1228
1229dnsc_err:
1230
1231 /* Downscaler does not support asymmetrical dnsc */
1232 if (entry->dnsc_factor_w != entry->dnsc_factor_h)
1233 ret = -EINVAL;
1234
1235 if (ret) {
1236 pr_err("Invalid rotator downscale ratio %dx%d->%dx%d\n",
1237 src_w, src_h, dst_w, dst_h);
1238 entry->dnsc_factor_w = 0;
1239 entry->dnsc_factor_h = 0;
1240 }
1241 return ret;
1242}
1243
1244static bool mdss_rotator_verify_format(struct mdss_rot_mgr *mgr,
1245 struct mdss_mdp_format_params *in_fmt,
1246 struct mdss_mdp_format_params *out_fmt, bool rotation)
1247{
1248 u8 in_v_subsample, in_h_subsample;
1249 u8 out_v_subsample, out_h_subsample;
1250
1251 if (!mgr->has_ubwc && (mdss_mdp_is_ubwc_format(in_fmt) ||
1252 mdss_mdp_is_ubwc_format(out_fmt))) {
1253 pr_err("Rotator doesn't allow ubwc\n");
1254 return -EINVAL;
1255 }
1256
1257 if (!(out_fmt->flag & VALID_ROT_WB_FORMAT)) {
1258 pr_err("Invalid output format\n");
1259 return false;
1260 }
1261
1262 if (in_fmt->is_yuv != out_fmt->is_yuv) {
1263 pr_err("Rotator does not support CSC\n");
1264 return false;
1265 }
1266
1267 /* Forcing same pixel depth */
1268 if (memcmp(in_fmt->bits, out_fmt->bits, sizeof(in_fmt->bits))) {
1269 /* Exception is that RGB can drop alpha or add X */
1270 if (in_fmt->is_yuv || out_fmt->alpha_enable ||
1271 (in_fmt->bits[C2_R_Cr] != out_fmt->bits[C2_R_Cr]) ||
1272 (in_fmt->bits[C0_G_Y] != out_fmt->bits[C0_G_Y]) ||
1273 (in_fmt->bits[C1_B_Cb] != out_fmt->bits[C1_B_Cb])) {
1274 pr_err("Bit format does not match\n");
1275 return false;
1276 }
1277 }
1278
1279 /* Need to make sure that sub-sampling persists through rotation */
1280 if (rotation) {
1281 mdss_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample,
1282 &in_v_subsample, &in_h_subsample);
1283 mdss_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample,
1284 &out_v_subsample, &out_h_subsample);
1285
1286 if ((in_v_subsample != out_h_subsample) ||
1287 (in_h_subsample != out_v_subsample)) {
1288 pr_err("Rotation has invalid subsampling\n");
1289 return false;
1290 }
1291 } else {
1292 if (in_fmt->chroma_sample != out_fmt->chroma_sample) {
1293 pr_err("Format subsampling mismatch\n");
1294 return false;
1295 }
1296 }
1297
1298 pr_debug("in_fmt=%0d, out_fmt=%d, has_ubwc=%d\n",
1299 in_fmt->format, out_fmt->format, mgr->has_ubwc);
1300 return true;
1301}
1302
1303static int mdss_rotator_verify_config(struct mdss_rot_mgr *mgr,
1304 struct mdp_rotation_config *config)
1305{
1306 struct mdss_mdp_format_params *in_fmt, *out_fmt;
1307 u8 in_v_subsample, in_h_subsample;
1308 u8 out_v_subsample, out_h_subsample;
1309 u32 input, output;
1310 bool rotation;
1311
1312 input = config->input.format;
1313 output = config->output.format;
1314 rotation = (config->flags & MDP_ROTATION_90) ? true : false;
1315
1316 in_fmt = mdss_mdp_get_format_params(input);
1317 if (!in_fmt) {
1318 pr_err("Unrecognized input format:%u\n", input);
1319 return -EINVAL;
1320 }
1321
1322 out_fmt = mdss_mdp_get_format_params(output);
1323 if (!out_fmt) {
1324 pr_err("Unrecognized output format:%u\n", output);
1325 return -EINVAL;
1326 }
1327
1328 mdss_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample,
1329 &in_v_subsample, &in_h_subsample);
1330 mdss_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample,
1331 &out_v_subsample, &out_h_subsample);
1332
1333 /* Dimension of image needs to be divisible by subsample rate */
1334 if ((config->input.height % in_v_subsample) ||
1335 (config->input.width % in_h_subsample)) {
1336 pr_err("In ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n",
1337 config->input.width, config->input.height,
1338 in_v_subsample, in_h_subsample);
1339 return -EINVAL;
1340 }
1341
1342 if ((config->output.height % out_v_subsample) ||
1343 (config->output.width % out_h_subsample)) {
1344 pr_err("Out ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n",
1345 config->output.width, config->output.height,
1346 out_v_subsample, out_h_subsample);
1347 return -EINVAL;
1348 }
1349
1350 if (!mdss_rotator_verify_format(mgr, in_fmt,
1351 out_fmt, rotation)) {
1352 pr_err("Rot format pairing invalid, in_fmt:%d, out_fmt:%d\n",
1353 input, output);
1354 return -EINVAL;
1355 }
1356
1357 return 0;
1358}
1359
1360static int mdss_rotator_validate_item_matches_session(
1361 struct mdp_rotation_config *config, struct mdp_rotation_item *item)
1362{
1363 int ret;
1364
1365 ret = __compare_session_item_rect(&config->input,
1366 &item->src_rect, item->input.format, true);
1367 if (ret)
1368 return ret;
1369
1370 ret = __compare_session_item_rect(&config->output,
1371 &item->dst_rect, item->output.format, false);
1372 if (ret)
1373 return ret;
1374
1375 ret = __compare_session_rotations(config->flags, item->flags);
1376 if (ret)
1377 return ret;
1378
1379 return 0;
1380}
1381
1382static int mdss_rotator_validate_img_roi(struct mdp_rotation_item *item)
1383{
1384 struct mdss_mdp_format_params *fmt;
1385 uint32_t width, height;
1386 int ret = 0;
1387
1388 width = item->input.width;
1389 height = item->input.height;
1390 if (item->flags & MDP_ROTATION_DEINTERLACE) {
1391 width *= 2;
1392 height /= 2;
1393 }
1394
1395 /* Check roi bounds */
1396 if (ROT_CHECK_BOUNDS(item->src_rect.x, item->src_rect.w, width) ||
1397 ROT_CHECK_BOUNDS(item->src_rect.y, item->src_rect.h,
1398 height)) {
1399 pr_err("invalid src flag=%08x img wh=%dx%d rect=%d,%d,%d,%d\n",
1400 item->flags, width, height, item->src_rect.x,
1401 item->src_rect.y, item->src_rect.w, item->src_rect.h);
1402 return -EINVAL;
1403 }
1404 if (ROT_CHECK_BOUNDS(item->dst_rect.x, item->dst_rect.w,
1405 item->output.width) ||
1406 ROT_CHECK_BOUNDS(item->dst_rect.y, item->dst_rect.h,
1407 item->output.height)) {
1408 pr_err("invalid dst img wh=%dx%d rect=%d,%d,%d,%d\n",
1409 item->output.width, item->output.height,
1410 item->dst_rect.x, item->dst_rect.y, item->dst_rect.w,
1411 item->dst_rect.h);
1412 return -EINVAL;
1413 }
1414
1415 fmt = mdss_mdp_get_format_params(item->output.format);
1416 if (!fmt) {
1417 pr_err("invalid output format:%d\n", item->output.format);
1418 return -EINVAL;
1419 }
1420
1421 if (mdss_mdp_is_ubwc_format(fmt))
1422 ret = mdss_mdp_validate_offset_for_ubwc_format(fmt,
1423 item->dst_rect.x, item->dst_rect.y);
1424
1425 return ret;
1426}
1427
1428static int mdss_rotator_validate_fmt_and_item_flags(
1429 struct mdp_rotation_config *config, struct mdp_rotation_item *item)
1430{
1431 struct mdss_mdp_format_params *fmt;
1432
1433 fmt = mdss_mdp_get_format_params(item->input.format);
1434 if ((item->flags & MDP_ROTATION_DEINTERLACE) &&
1435 mdss_mdp_is_ubwc_format(fmt)) {
1436 pr_err("cannot perform mdp deinterlace on tiled formats\n");
1437 return -EINVAL;
1438 }
1439 return 0;
1440}
1441
1442static int mdss_rotator_validate_entry(struct mdss_rot_mgr *mgr,
1443 struct mdss_rot_file_private *private,
1444 struct mdss_rot_entry *entry)
1445{
1446 int ret;
1447 struct mdp_rotation_item *item;
1448 struct mdss_rot_perf *perf;
1449
1450 item = &entry->item;
1451
1452 if (item->wb_idx != item->pipe_idx) {
1453 pr_err("invalid writeback and pipe idx\n");
1454 return -EINVAL;
1455 }
1456
1457 if (item->wb_idx != MDSS_ROTATION_HW_ANY &&
1458 item->wb_idx > mgr->queue_count) {
1459 pr_err("invalid writeback idx\n");
1460 return -EINVAL;
1461 }
1462
1463 perf = mdss_rotator_find_session(private, item->session_id);
1464 if (!perf) {
1465 pr_err("Could not find session:%u\n", item->session_id);
1466 return -EINVAL;
1467 }
1468
1469 ret = mdss_rotator_validate_item_matches_session(&perf->config, item);
1470 if (ret) {
1471 pr_err("Work item does not match session:%u\n",
1472 item->session_id);
1473 return ret;
1474 }
1475
1476 ret = mdss_rotator_validate_img_roi(item);
1477 if (ret) {
1478 pr_err("Image roi is invalid\n");
1479 return ret;
1480 }
1481
1482 ret = mdss_rotator_validate_fmt_and_item_flags(&perf->config, item);
1483 if (ret)
1484 return ret;
1485
1486 ret = mdss_rotator_config_dnsc_factor(mgr, entry);
1487 if (ret) {
1488 pr_err("fail to configure downscale factor\n");
1489 return ret;
1490 }
1491 return ret;
1492}
1493
1494/*
1495 * Upon failure from the function, caller needs to make sure
1496 * to call mdss_rotator_remove_request to clean up resources.
1497 */
1498static int mdss_rotator_add_request(struct mdss_rot_mgr *mgr,
1499 struct mdss_rot_file_private *private,
1500 struct mdss_rot_entry_container *req)
1501{
1502 struct mdss_rot_entry *entry;
1503 struct mdp_rotation_item *item;
1504 u32 flag = 0;
1505 int i, ret;
1506
1507 for (i = 0; i < req->count; i++) {
1508 entry = req->entries + i;
1509 item = &entry->item;
1510
1511 if (item->flags & MDP_ROTATION_SECURE)
1512 flag = MDP_SECURE_OVERLAY_SESSION;
1513
1514 ret = mdss_rotator_validate_entry(mgr, private, entry);
1515 if (ret) {
1516 pr_err("fail to validate the entry\n");
1517 return ret;
1518 }
1519
1520 ret = mdss_rotator_import_data(mgr, entry);
1521 if (ret) {
1522 pr_err("fail to import the data\n");
1523 return ret;
1524 }
1525
1526 if (item->input.fence >= 0) {
1527 entry->input_fence =
1528 sync_fence_fdget(item->input.fence);
1529 if (!entry->input_fence) {
1530 pr_err("invalid input fence fd\n");
1531 return -EINVAL;
1532 }
1533 }
1534
1535 ret = mdss_rotator_assign_queue(mgr, entry, private);
1536 if (ret) {
1537 pr_err("fail to assign queue to entry\n");
1538 return ret;
1539 }
1540
1541 entry->request = req;
1542
1543 INIT_WORK(&entry->commit_work, mdss_rotator_wq_handler);
1544
1545 ret = mdss_rotator_create_fence(entry);
1546 if (ret) {
1547 pr_err("fail to create fence\n");
1548 return ret;
1549 }
1550 item->output.fence = entry->output_fence_fd;
1551
1552 pr_debug("Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%u\n"
1553 "dst{%u,%u,%u,%u}f=%u session_id=%u\n", item->wb_idx,
1554 item->src_rect.x, item->src_rect.y,
1555 item->src_rect.w, item->src_rect.h, item->input.format,
1556 item->dst_rect.x, item->dst_rect.y,
1557 item->dst_rect.w, item->dst_rect.h, item->output.format,
1558 item->session_id);
1559 }
1560
1561 mutex_lock(&private->req_lock);
1562 list_add(&req->list, &private->req_list);
1563 mutex_unlock(&private->req_lock);
1564
1565 return 0;
1566}
1567
1568static void mdss_rotator_remove_request(struct mdss_rot_mgr *mgr,
1569 struct mdss_rot_file_private *private,
1570 struct mdss_rot_entry_container *req)
1571{
1572 int i;
1573
1574 mutex_lock(&private->req_lock);
1575 for (i = 0; i < req->count; i++)
1576 mdss_rotator_release_entry(mgr, req->entries + i);
1577 list_del_init(&req->list);
1578 mutex_unlock(&private->req_lock);
1579}
1580
1581/* This function should be called with req_lock */
1582static void mdss_rotator_cancel_request(struct mdss_rot_mgr *mgr,
1583 struct mdss_rot_entry_container *req)
1584{
1585 struct mdss_rot_entry *entry;
1586 int i;
1587
1588 /*
1589 * To avoid signal the rotation entry output fence in the wrong
1590 * order, all the entries in the same request needs to be cancelled
1591 * first, before signaling the output fence.
1592 */
1593 for (i = req->count - 1; i >= 0; i--) {
1594 entry = req->entries + i;
1595 cancel_work_sync(&entry->commit_work);
1596 }
1597
1598 for (i = req->count - 1; i >= 0; i--) {
1599 entry = req->entries + i;
1600 mdss_rotator_signal_output(entry);
1601 mdss_rotator_release_entry(mgr, entry);
1602 }
1603
1604 list_del_init(&req->list);
1605 devm_kfree(&mgr->pdev->dev, req);
1606}
1607
1608static void mdss_rotator_cancel_all_requests(struct mdss_rot_mgr *mgr,
1609 struct mdss_rot_file_private *private)
1610{
1611 struct mdss_rot_entry_container *req, *req_next;
1612
1613 pr_debug("Canceling all rotator requests\n");
1614
1615 mutex_lock(&private->req_lock);
1616 list_for_each_entry_safe(req, req_next, &private->req_list, list)
1617 mdss_rotator_cancel_request(mgr, req);
1618 mutex_unlock(&private->req_lock);
1619}
1620
1621static void mdss_rotator_free_competed_request(struct mdss_rot_mgr *mgr,
1622 struct mdss_rot_file_private *private)
1623{
1624 struct mdss_rot_entry_container *req, *req_next;
1625
1626 mutex_lock(&private->req_lock);
1627 list_for_each_entry_safe(req, req_next, &private->req_list, list) {
1628 if (atomic_read(&req->pending_count) == 0) {
1629 list_del_init(&req->list);
1630 devm_kfree(&mgr->pdev->dev, req);
1631 }
1632 }
1633 mutex_unlock(&private->req_lock);
1634}
1635
1636static void mdss_rotator_release_rotator_perf_session(
1637 struct mdss_rot_mgr *mgr,
1638 struct mdss_rot_file_private *private)
1639{
1640 struct mdss_rot_perf *perf, *perf_next;
1641
1642 pr_debug("Releasing all rotator request\n");
1643 mdss_rotator_cancel_all_requests(mgr, private);
1644
1645 mutex_lock(&private->perf_lock);
1646 list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) {
1647 list_del_init(&perf->list);
1648 devm_kfree(&mgr->pdev->dev, perf->work_distribution);
1649 devm_kfree(&mgr->pdev->dev, perf);
1650 }
1651 mutex_unlock(&private->perf_lock);
1652}
1653
1654static void mdss_rotator_release_all(struct mdss_rot_mgr *mgr)
1655{
1656 struct mdss_rot_file_private *priv, *priv_next;
1657
1658 mutex_lock(&mgr->file_lock);
1659 list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) {
1660 mdss_rotator_release_rotator_perf_session(mgr, priv);
1661 mdss_rotator_resource_ctrl(mgr, false);
1662 list_del_init(&priv->list);
1663 priv->file->private_data = NULL;
1664 devm_kfree(&mgr->pdev->dev, priv);
1665 }
1666 mutex_unlock(&rot_mgr->file_lock);
1667
1668 mdss_rotator_update_perf(mgr);
1669}
1670
1671static int mdss_rotator_prepare_hw(struct mdss_rot_hw_resource *hw,
1672 struct mdss_rot_entry *entry)
1673{
1674 struct mdss_mdp_pipe *pipe;
1675 struct mdss_mdp_ctl *orig_ctl, *rot_ctl;
1676 int ret;
1677
1678 pipe = hw->pipe;
1679 orig_ctl = pipe->mixer_left->ctl;
1680 if (orig_ctl->shared_lock)
1681 mutex_lock(orig_ctl->shared_lock);
1682
1683 rot_ctl = mdss_mdp_ctl_mixer_switch(orig_ctl,
1684 MDSS_MDP_WB_CTL_TYPE_BLOCK);
1685 if (!rot_ctl) {
1686 ret = -EINVAL;
1687 goto error;
1688 } else {
1689 hw->ctl = rot_ctl;
1690 pipe->mixer_left = rot_ctl->mixer_left;
1691 }
1692
1693 return 0;
1694
1695error:
1696 if (orig_ctl->shared_lock)
1697 mutex_unlock(orig_ctl->shared_lock);
1698 return ret;
1699}
1700
1701static void mdss_rotator_translate_rect(struct mdss_rect *dst,
1702 struct mdp_rect *src)
1703{
1704 dst->x = src->x;
1705 dst->y = src->y;
1706 dst->w = src->w;
1707 dst->h = src->h;
1708}
1709
1710static u32 mdss_rotator_translate_flags(u32 input)
1711{
1712 u32 output = 0;
1713
1714 if (input & MDP_ROTATION_NOP)
1715 output |= MDP_ROT_NOP;
1716 if (input & MDP_ROTATION_FLIP_LR)
1717 output |= MDP_FLIP_LR;
1718 if (input & MDP_ROTATION_FLIP_UD)
1719 output |= MDP_FLIP_UD;
1720 if (input & MDP_ROTATION_90)
1721 output |= MDP_ROT_90;
1722 if (input & MDP_ROTATION_DEINTERLACE)
1723 output |= MDP_DEINTERLACE;
1724 if (input & MDP_ROTATION_SECURE)
1725 output |= MDP_SECURE_OVERLAY_SESSION;
1726 if (input & MDP_ROTATION_BWC_EN)
1727 output |= MDP_BWC_EN;
1728
1729 return output;
1730}
1731
1732static int mdss_rotator_config_hw(struct mdss_rot_hw_resource *hw,
1733 struct mdss_rot_entry *entry)
1734{
1735 struct mdss_mdp_pipe *pipe;
1736 struct mdp_rotation_item *item;
1737 struct mdss_rot_perf *perf;
1738 int ret;
1739
1740 ATRACE_BEGIN(__func__);
1741 pipe = hw->pipe;
1742 item = &entry->item;
1743 perf = entry->perf;
1744
1745 pipe->flags = mdss_rotator_translate_flags(item->flags);
1746 pipe->src_fmt = mdss_mdp_get_format_params(item->input.format);
1747 pipe->img_width = item->input.width;
1748 pipe->img_height = item->input.height;
1749 mdss_rotator_translate_rect(&pipe->src, &item->src_rect);
1750 mdss_rotator_translate_rect(&pipe->dst, &item->src_rect);
1751 pipe->scaler.enable = 0;
1752 pipe->frame_rate = perf->config.frame_rate;
1753
1754 pipe->params_changed++;
1755
1756 mdss_mdp_smp_release(pipe);
1757
1758 ret = mdss_mdp_smp_reserve(pipe);
1759 if (ret) {
1760 pr_err("unable to mdss_mdp_smp_reserve rot data\n");
1761 goto done;
1762 }
1763
1764 ret = mdss_mdp_overlay_setup_scaling(pipe);
1765 if (ret) {
1766 pr_err("scaling setup failed %d\n", ret);
1767 goto done;
1768 }
1769
1770 ret = mdss_mdp_pipe_queue_data(pipe, &entry->src_buf);
1771 pr_debug("Config pipe. src{%u,%u,%u,%u}f=%u\n"
1772 "dst{%u,%u,%u,%u}f=%u session_id=%u\n",
1773 item->src_rect.x, item->src_rect.y,
1774 item->src_rect.w, item->src_rect.h, item->input.format,
1775 item->dst_rect.x, item->dst_rect.y,
1776 item->dst_rect.w, item->dst_rect.h, item->output.format,
1777 item->session_id);
1778 MDSS_XLOG(item->input.format, pipe->img_width, pipe->img_height,
1779 pipe->flags);
1780done:
1781 ATRACE_END(__func__);
1782 return ret;
1783}
1784
1785static int mdss_rotator_kickoff_entry(struct mdss_rot_hw_resource *hw,
1786 struct mdss_rot_entry *entry)
1787{
1788 int ret;
1789 struct mdss_mdp_writeback_arg wb_args = {
1790 .data = &entry->dst_buf,
1791 .priv_data = entry,
1792 };
1793
1794 ret = mdss_mdp_writeback_display_commit(hw->ctl, &wb_args);
1795 return ret;
1796}
1797
1798static int mdss_rotator_wait_for_entry(struct mdss_rot_hw_resource *hw,
1799 struct mdss_rot_entry *entry)
1800{
1801 int ret;
1802 struct mdss_mdp_ctl *ctl = hw->ctl;
1803
1804 ret = mdss_mdp_display_wait4comp(ctl);
1805 if (ctl->shared_lock)
1806 mutex_unlock(ctl->shared_lock);
1807 return ret;
1808}
1809
1810static int mdss_rotator_commit_entry(struct mdss_rot_hw_resource *hw,
1811 struct mdss_rot_entry *entry)
1812{
1813 int ret;
1814
1815 ret = mdss_rotator_prepare_hw(hw, entry);
1816 if (ret) {
1817 pr_err("fail to prepare hw resource %d\n", ret);
1818 return ret;
1819 }
1820
1821 ret = mdss_rotator_config_hw(hw, entry);
1822 if (ret) {
1823 pr_err("fail to configure hw resource %d\n", ret);
1824 return ret;
1825 }
1826
1827 ret = mdss_rotator_kickoff_entry(hw, entry);
1828 if (ret) {
1829 pr_err("fail to do kickoff %d\n", ret);
1830 return ret;
1831 }
1832
1833 ret = mdss_rotator_wait_for_entry(hw, entry);
1834 if (ret) {
1835 pr_err("fail to wait for completion %d\n", ret);
1836 return ret;
1837 }
1838
1839 return ret;
1840}
1841
1842static int mdss_rotator_handle_entry(struct mdss_rot_hw_resource *hw,
1843 struct mdss_rot_entry *entry)
1844{
1845 int ret;
1846
1847 ret = mdss_rotator_wait_for_input(entry);
1848 if (ret) {
1849 pr_err("wait for input buffer failed %d\n", ret);
1850 return ret;
1851 }
1852
1853 ret = mdss_rotator_map_and_check_data(entry);
1854 if (ret) {
1855 pr_err("fail to prepare input/output data %d\n", ret);
1856 return ret;
1857 }
1858
1859 ret = mdss_rotator_commit_entry(hw, entry);
1860 if (ret)
1861 pr_err("rotator commit failed %d\n", ret);
1862
1863 return ret;
1864}
1865
1866static void mdss_rotator_wq_handler(struct work_struct *work)
1867{
1868 struct mdss_rot_entry *entry;
1869 struct mdss_rot_entry_container *request;
1870 struct mdss_rot_hw_resource *hw;
1871 int ret;
1872
1873 entry = container_of(work, struct mdss_rot_entry, commit_work);
1874 request = entry->request;
1875
1876 if (!request) {
1877 pr_err("fatal error, no request with entry\n");
1878 return;
1879 }
1880
1881 hw = mdss_rotator_get_hw_resource(entry->queue, entry);
1882 if (!hw) {
1883 pr_err("no hw for the queue\n");
1884 goto get_hw_res_err;
1885 }
1886
1887 ret = mdss_rotator_handle_entry(hw, entry);
1888 if (ret) {
1889 struct mdp_rotation_item *item = &entry->item;
1890
1891 pr_err("Rot req fail. src{%u,%u,%u,%u}f=%u\n"
1892 "dst{%u,%u,%u,%u}f=%u session_id=%u, wbidx%d, pipe_id=%d\n",
1893 item->src_rect.x, item->src_rect.y,
1894 item->src_rect.w, item->src_rect.h, item->input.format,
1895 item->dst_rect.x, item->dst_rect.y,
1896 item->dst_rect.w, item->dst_rect.h, item->output.format,
1897 item->session_id, item->wb_idx, item->pipe_idx);
1898 }
1899
1900 mdss_rotator_put_hw_resource(entry->queue, hw);
1901
1902get_hw_res_err:
1903 mdss_rotator_signal_output(entry);
1904 mdss_rotator_release_entry(rot_mgr, entry);
1905 atomic_dec(&request->pending_count);
1906}
1907
1908static int mdss_rotator_validate_request(struct mdss_rot_mgr *mgr,
1909 struct mdss_rot_file_private *private,
1910 struct mdss_rot_entry_container *req)
1911{
1912 int i, ret = 0;
1913 struct mdss_rot_entry *entry;
1914
1915 for (i = 0; i < req->count; i++) {
1916 entry = req->entries + i;
1917 ret = mdss_rotator_validate_entry(mgr, private,
1918 entry);
1919 if (ret) {
1920 pr_err("fail to validate the entry\n");
1921 return ret;
1922 }
1923 }
1924
1925 return ret;
1926}
1927
1928static u32 mdss_rotator_generator_session_id(struct mdss_rot_mgr *mgr)
1929{
1930 u32 id;
1931
1932 mutex_lock(&mgr->lock);
1933 id = mgr->session_id_generator++;
1934 mutex_unlock(&mgr->lock);
1935 return id;
1936}
1937
1938static int mdss_rotator_open_session(struct mdss_rot_mgr *mgr,
1939 struct mdss_rot_file_private *private, unsigned long arg)
1940{
1941 struct mdp_rotation_config config;
1942 struct mdss_rot_perf *perf;
1943 int ret;
1944
1945 ret = copy_from_user(&config, (void __user *)arg, sizeof(config));
1946 if (ret) {
1947 pr_err("fail to copy session data\n");
1948 return ret;
1949 }
1950
1951 ret = mdss_rotator_verify_config(mgr, &config);
1952 if (ret) {
1953 pr_err("Rotator verify format failed\n");
1954 return ret;
1955 }
1956
1957 perf = devm_kzalloc(&mgr->pdev->dev, sizeof(*perf), GFP_KERNEL);
1958 if (!perf)
1959 return -ENOMEM;
1960
1961 ATRACE_BEGIN(__func__); /* Open session votes for bw */
1962 perf->work_distribution = devm_kzalloc(&mgr->pdev->dev,
1963 sizeof(u32) * mgr->queue_count, GFP_KERNEL);
1964 if (!perf->work_distribution) {
1965 ret = -ENOMEM;
1966 goto alloc_err;
1967 }
1968
1969 config.session_id = mdss_rotator_generator_session_id(mgr);
1970 perf->config = config;
1971 perf->last_wb_idx = -1;
1972 mutex_init(&perf->work_dis_lock);
1973
1974 INIT_LIST_HEAD(&perf->list);
1975
1976 ret = mdss_rotator_calc_perf(perf);
1977 if (ret) {
1978 pr_err("error setting the session%d\n", ret);
1979 goto copy_user_err;
1980 }
1981
1982 ret = copy_to_user((void *)arg, &config, sizeof(config));
1983 if (ret) {
1984 pr_err("fail to copy to user\n");
1985 goto copy_user_err;
1986 }
1987
1988 mutex_lock(&private->perf_lock);
1989 list_add(&perf->list, &private->perf_list);
1990 mutex_unlock(&private->perf_lock);
1991
1992 ret = mdss_rotator_resource_ctrl(mgr, true);
1993 if (ret) {
1994 pr_err("Failed to aqcuire rotator resources\n");
1995 goto resource_err;
1996 }
1997
1998 mdss_rotator_clk_ctrl(rot_mgr, true);
1999 ret = mdss_rotator_update_perf(mgr);
2000 if (ret) {
2001 pr_err("fail to open session, not enough clk/bw\n");
2002 goto perf_err;
2003 }
2004 pr_debug("open session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n",
2005 config.session_id, config.input.width, config.input.height,
2006 config.input.format, config.output.width, config.output.height,
2007 config.output.format);
2008
2009 goto done;
2010perf_err:
2011 mdss_rotator_clk_ctrl(rot_mgr, false);
2012 mdss_rotator_resource_ctrl(mgr, false);
2013resource_err:
2014 mutex_lock(&private->perf_lock);
2015 list_del_init(&perf->list);
2016 mutex_unlock(&private->perf_lock);
2017copy_user_err:
2018 devm_kfree(&mgr->pdev->dev, perf->work_distribution);
2019alloc_err:
2020 devm_kfree(&mgr->pdev->dev, perf);
2021done:
2022 ATRACE_END(__func__);
2023 return ret;
2024}
2025
2026static int mdss_rotator_close_session(struct mdss_rot_mgr *mgr,
2027 struct mdss_rot_file_private *private, unsigned long arg)
2028{
2029 struct mdss_rot_perf *perf;
2030 bool offload_release_work = false;
2031 u32 id;
2032
2033 id = (u32)arg;
2034 mutex_lock(&mgr->lock);
2035 mutex_lock(&private->perf_lock);
2036 perf = __mdss_rotator_find_session(private, id);
2037 if (!perf) {
2038 mutex_unlock(&private->perf_lock);
2039 mutex_unlock(&mgr->lock);
2040 pr_err("Trying to close session that does not exist\n");
2041 return -EINVAL;
2042 }
2043
2044 ATRACE_BEGIN(__func__);
2045 mutex_lock(&perf->work_dis_lock);
2046 if (mdss_rotator_is_work_pending(mgr, perf)) {
2047 pr_debug("Work is still pending, offload free to wq\n");
2048 mutex_lock(&mgr->bus_lock);
2049 mgr->pending_close_bw_vote += perf->bw;
2050 mutex_unlock(&mgr->bus_lock);
2051 offload_release_work = true;
2052 }
2053 list_del_init(&perf->list);
2054 mutex_unlock(&perf->work_dis_lock);
2055 mutex_unlock(&private->perf_lock);
2056
2057 if (offload_release_work)
2058 goto done;
2059
2060 mdss_rotator_resource_ctrl(mgr, false);
2061 devm_kfree(&mgr->pdev->dev, perf->work_distribution);
2062 devm_kfree(&mgr->pdev->dev, perf);
2063 mdss_rotator_update_perf(mgr);
2064 mdss_rotator_clk_ctrl(rot_mgr, false);
2065done:
2066 pr_debug("Closed session id:%u", id);
2067 ATRACE_END(__func__);
2068 mutex_unlock(&mgr->lock);
2069 return 0;
2070}
2071
2072static int mdss_rotator_config_session(struct mdss_rot_mgr *mgr,
2073 struct mdss_rot_file_private *private, unsigned long arg)
2074{
2075 int ret = 0;
2076 struct mdss_rot_perf *perf;
2077 struct mdp_rotation_config config;
2078
2079 ret = copy_from_user(&config, (void __user *)arg,
2080 sizeof(config));
2081 if (ret) {
2082 pr_err("fail to copy session data\n");
2083 return ret;
2084 }
2085
2086 ret = mdss_rotator_verify_config(mgr, &config);
2087 if (ret) {
2088 pr_err("Rotator verify format failed\n");
2089 return ret;
2090 }
2091
2092 mutex_lock(&mgr->lock);
2093 perf = mdss_rotator_find_session(private, config.session_id);
2094 if (!perf) {
2095 pr_err("No session with id=%u could be found\n",
2096 config.session_id);
2097 mutex_unlock(&mgr->lock);
2098 return -EINVAL;
2099 }
2100
2101 ATRACE_BEGIN(__func__);
2102 mutex_lock(&private->perf_lock);
2103 perf->config = config;
2104 ret = mdss_rotator_calc_perf(perf);
2105 mutex_unlock(&private->perf_lock);
2106
2107 if (ret) {
2108 pr_err("error in configuring the session %d\n", ret);
2109 goto done;
2110 }
2111
2112 ret = mdss_rotator_update_perf(mgr);
2113
2114 pr_debug("reconfig session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n",
2115 config.session_id, config.input.width, config.input.height,
2116 config.input.format, config.output.width, config.output.height,
2117 config.output.format);
2118done:
2119 ATRACE_END(__func__);
2120 mutex_unlock(&mgr->lock);
2121 return ret;
2122}
2123
2124struct mdss_rot_entry_container *mdss_rotator_req_init(
2125 struct mdss_rot_mgr *mgr, struct mdp_rotation_item *items,
2126 u32 count, u32 flags)
2127{
2128 struct mdss_rot_entry_container *req;
2129 int size, i;
2130
2131 /*
2132 * Check input and output plane_count from each given item
2133 * are within the MAX_PLANES limit
2134 */
2135 for (i = 0 ; i < count; i++) {
2136 if ((items[i].input.plane_count > MAX_PLANES) ||
2137 (items[i].output.plane_count > MAX_PLANES)) {
2138 pr_err("Input/Output plane_count exceeds MAX_PLANES limit, input:%d, output:%d\n",
2139 items[i].input.plane_count,
2140 items[i].output.plane_count);
2141 return ERR_PTR(-EINVAL);
2142 }
2143 }
2144
2145 size = sizeof(struct mdss_rot_entry_container);
2146 size += sizeof(struct mdss_rot_entry) * count;
2147 req = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
2148
2149 if (!req)
2150 return ERR_PTR(-ENOMEM);
2151
2152
2153 INIT_LIST_HEAD(&req->list);
2154 req->count = count;
2155 req->entries = (struct mdss_rot_entry *)
2156 ((void *)req + sizeof(struct mdss_rot_entry_container));
2157 req->flags = flags;
2158 atomic_set(&req->pending_count, count);
2159
2160 for (i = 0; i < count; i++)
2161 req->entries[i].item = items[i];
2162
2163 return req;
2164}
2165
2166static int mdss_rotator_handle_request_common(struct mdss_rot_mgr *mgr,
2167 struct mdss_rot_file_private *private,
2168 struct mdss_rot_entry_container *req,
2169 struct mdp_rotation_item *items)
2170{
2171 int i, ret;
2172
2173 mdss_rotator_free_competed_request(mgr, private);
2174
2175 ret = mdss_rotator_add_request(mgr, private, req);
2176 if (ret) {
2177 pr_err("fail to add rotation request\n");
2178 mdss_rotator_remove_request(mgr, private, req);
2179 return ret;
2180 }
2181
2182 for (i = 0; i < req->count; i++)
2183 items[i].output.fence =
2184 req->entries[i].item.output.fence;
2185
2186 return ret;
2187}
2188
2189static int mdss_rotator_handle_request(struct mdss_rot_mgr *mgr,
2190 struct mdss_rot_file_private *private, unsigned long arg)
2191{
2192 struct mdp_rotation_request user_req;
2193 struct mdp_rotation_item *items = NULL;
2194 struct mdss_rot_entry_container *req = NULL;
2195 int size, ret;
2196 uint32_t req_count;
2197 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
2198
2199 if (mdata->handoff_pending) {
2200 pr_err("Rotator request failed. Handoff pending\n");
2201 return -EPERM;
2202 }
2203
2204 if (mdss_get_sd_client_cnt()) {
2205 pr_err("rot request not permitted during secure display session\n");
2206 return -EPERM;
2207 }
2208
2209 ret = copy_from_user(&user_req, (void __user *)arg,
2210 sizeof(user_req));
2211 if (ret) {
2212 pr_err("fail to copy rotation request\n");
2213 return ret;
2214 }
2215
2216 req_count = user_req.count;
2217 if ((!req_count) || (req_count > MAX_LAYER_COUNT)) {
2218 pr_err("invalid rotator req count :%d\n", req_count);
2219 return -EINVAL;
2220 }
2221
2222 /*
2223 * here, we make a copy of the items so that we can copy
2224 * all the output fences to the client in one call. Otherwise,
2225 * we will have to call multiple copy_to_user
2226 */
2227 size = sizeof(struct mdp_rotation_item) * req_count;
2228 items = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
2229 if (!items) {
2230 pr_err("fail to allocate rotation items\n");
2231 return -ENOMEM;
2232 }
2233 ret = copy_from_user(items, user_req.list, size);
2234 if (ret) {
2235 pr_err("fail to copy rotation items\n");
2236 goto handle_request_err;
2237 }
2238
2239 req = mdss_rotator_req_init(mgr, items, user_req.count, user_req.flags);
2240 if (IS_ERR_OR_NULL(req)) {
2241 pr_err("fail to allocate rotation request\n");
2242 ret = PTR_ERR(req);
2243 goto handle_request_err;
2244 }
2245
2246 mutex_lock(&mgr->lock);
2247
2248 if (req->flags & MDSS_ROTATION_REQUEST_VALIDATE) {
2249 ret = mdss_rotator_validate_request(mgr, private, req);
2250 goto handle_request_err1;
2251 }
2252
2253 ret = mdss_rotator_handle_request_common(mgr, private, req, items);
2254 if (ret) {
2255 pr_err("fail to handle request\n");
2256 goto handle_request_err1;
2257 }
2258
2259 ret = copy_to_user(user_req.list, items, size);
2260 if (ret) {
2261 pr_err("fail to copy output fence to user\n");
2262 mdss_rotator_remove_request(mgr, private, req);
2263 goto handle_request_err1;
2264 }
2265
2266 mdss_rotator_install_fence_fd(req);
2267 mdss_rotator_queue_request(mgr, private, req);
2268
2269 mutex_unlock(&mgr->lock);
2270
2271 devm_kfree(&mgr->pdev->dev, items);
2272 return ret;
2273
2274handle_request_err1:
2275 mutex_unlock(&mgr->lock);
2276handle_request_err:
2277 devm_kfree(&mgr->pdev->dev, items);
2278 devm_kfree(&mgr->pdev->dev, req);
2279 return ret;
2280}
2281
2282static int mdss_rotator_open(struct inode *inode, struct file *file)
2283{
2284 struct mdss_rot_file_private *private;
2285
2286 if (!rot_mgr)
2287 return -ENODEV;
2288
2289 if (atomic_read(&rot_mgr->device_suspended))
2290 return -EPERM;
2291
2292 private = devm_kzalloc(&rot_mgr->pdev->dev, sizeof(*private),
2293 GFP_KERNEL);
2294 if (!private)
2295 return -ENOMEM;
2296
2297 mutex_init(&private->req_lock);
2298 mutex_init(&private->perf_lock);
2299 INIT_LIST_HEAD(&private->req_list);
2300 INIT_LIST_HEAD(&private->perf_list);
2301 INIT_LIST_HEAD(&private->list);
2302
2303 mutex_lock(&rot_mgr->file_lock);
2304 list_add(&private->list, &rot_mgr->file_list);
2305 file->private_data = private;
2306 private->file = file;
2307 mutex_unlock(&rot_mgr->file_lock);
2308
2309 return 0;
2310}
2311
2312static bool mdss_rotator_file_priv_allowed(struct mdss_rot_mgr *mgr,
2313 struct mdss_rot_file_private *priv)
2314{
2315 struct mdss_rot_file_private *_priv, *_priv_next;
2316 bool ret = false;
2317
2318 mutex_lock(&mgr->file_lock);
2319 list_for_each_entry_safe(_priv, _priv_next, &mgr->file_list, list) {
2320 if (_priv == priv) {
2321 ret = true;
2322 break;
2323 }
2324 }
2325 mutex_unlock(&mgr->file_lock);
2326 return ret;
2327}
2328
2329static int mdss_rotator_close(struct inode *inode, struct file *file)
2330{
2331 struct mdss_rot_file_private *private;
2332
2333 if (!rot_mgr)
2334 return -ENODEV;
2335
2336 if (!file->private_data)
2337 return -EINVAL;
2338
2339 private = (struct mdss_rot_file_private *)file->private_data;
2340
2341 if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) {
2342 pr_err("Calling close with unrecognized rot_file_private\n");
2343 return -EINVAL;
2344 }
2345
2346 mdss_rotator_release_rotator_perf_session(rot_mgr, private);
2347
2348 mutex_lock(&rot_mgr->file_lock);
2349 list_del_init(&private->list);
2350 devm_kfree(&rot_mgr->pdev->dev, private);
2351 file->private_data = NULL;
2352 mutex_unlock(&rot_mgr->file_lock);
2353
2354 mdss_rotator_update_perf(rot_mgr);
2355 return 0;
2356}
2357
2358#ifdef CONFIG_COMPAT
2359static int mdss_rotator_handle_request32(struct mdss_rot_mgr *mgr,
2360 struct mdss_rot_file_private *private, unsigned long arg)
2361{
2362 struct mdp_rotation_request32 user_req32;
2363 struct mdp_rotation_item *items = NULL;
2364 struct mdss_rot_entry_container *req = NULL;
2365 int size, ret;
2366 uint32_t req_count;
2367
2368 if (mdss_get_sd_client_cnt()) {
2369 pr_err("rot request not permitted during secure display session\n");
2370 return -EPERM;
2371 }
2372
2373 ret = copy_from_user(&user_req32, (void __user *)arg,
2374 sizeof(user_req32));
2375 if (ret) {
2376 pr_err("fail to copy rotation request\n");
2377 return ret;
2378 }
2379
2380 req_count = user_req32.count;
2381 if ((!req_count) || (req_count > MAX_LAYER_COUNT)) {
2382 pr_err("invalid rotator req count :%d\n", req_count);
2383 return -EINVAL;
2384 }
2385
2386 size = sizeof(struct mdp_rotation_item) * req_count;
2387 items = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
2388 if (!items) {
2389 pr_err("fail to allocate rotation items\n");
2390 return -ENOMEM;
2391 }
2392 ret = copy_from_user(items, compat_ptr(user_req32.list), size);
2393 if (ret) {
2394 pr_err("fail to copy rotation items\n");
2395 goto handle_request32_err;
2396 }
2397
2398 req = mdss_rotator_req_init(mgr, items, user_req32.count,
2399 user_req32.flags);
2400 if (IS_ERR_OR_NULL(req)) {
2401 pr_err("fail to allocate rotation request\n");
2402 ret = PTR_ERR(req);
2403 goto handle_request32_err;
2404 }
2405
2406 mutex_lock(&mgr->lock);
2407
2408 if (req->flags & MDSS_ROTATION_REQUEST_VALIDATE) {
2409 ret = mdss_rotator_validate_request(mgr, private, req);
2410 goto handle_request32_err1;
2411 }
2412
2413 ret = mdss_rotator_handle_request_common(mgr, private, req, items);
2414 if (ret) {
2415 pr_err("fail to handle request\n");
2416 goto handle_request32_err1;
2417 }
2418
2419 ret = copy_to_user(compat_ptr(user_req32.list), items, size);
2420 if (ret) {
2421 pr_err("fail to copy output fence to user\n");
2422 mdss_rotator_remove_request(mgr, private, req);
2423 goto handle_request32_err1;
2424 }
2425
2426 mdss_rotator_install_fence_fd(req);
2427 mdss_rotator_queue_request(mgr, private, req);
2428
2429 mutex_unlock(&mgr->lock);
2430
2431 devm_kfree(&mgr->pdev->dev, items);
2432 return ret;
2433
2434handle_request32_err1:
2435 mutex_unlock(&mgr->lock);
2436handle_request32_err:
2437 devm_kfree(&mgr->pdev->dev, items);
2438 devm_kfree(&mgr->pdev->dev, req);
2439 return ret;
2440}
2441
2442static unsigned int __do_compat_ioctl_rot(unsigned int cmd32)
2443{
2444 unsigned int cmd;
2445
2446 switch (cmd32) {
2447 case MDSS_ROTATION_REQUEST32:
2448 cmd = MDSS_ROTATION_REQUEST;
2449 break;
2450 case MDSS_ROTATION_OPEN32:
2451 cmd = MDSS_ROTATION_OPEN;
2452 break;
2453 case MDSS_ROTATION_CLOSE32:
2454 cmd = MDSS_ROTATION_CLOSE;
2455 break;
2456 case MDSS_ROTATION_CONFIG32:
2457 cmd = MDSS_ROTATION_CONFIG;
2458 break;
2459 default:
2460 cmd = cmd32;
2461 break;
2462 }
2463
2464 return cmd;
2465}
2466
2467static long mdss_rotator_compat_ioctl(struct file *file, unsigned int cmd,
2468 unsigned long arg)
2469{
2470 struct mdss_rot_file_private *private;
2471 int ret = -EINVAL;
2472
2473 if (!rot_mgr)
2474 return -ENODEV;
2475
2476 if (atomic_read(&rot_mgr->device_suspended))
2477 return -EPERM;
2478
2479 if (!file->private_data)
2480 return -EINVAL;
2481
2482 private = (struct mdss_rot_file_private *)file->private_data;
2483
2484 if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) {
2485 pr_err("Calling ioctl with unrecognized rot_file_private\n");
2486 return -EINVAL;
2487 }
2488
2489 cmd = __do_compat_ioctl_rot(cmd);
2490
2491 switch (cmd) {
2492 case MDSS_ROTATION_REQUEST:
2493 ATRACE_BEGIN("rotator_request32");
2494 ret = mdss_rotator_handle_request32(rot_mgr, private, arg);
2495 ATRACE_END("rotator_request32");
2496 break;
2497 case MDSS_ROTATION_OPEN:
2498 ret = mdss_rotator_open_session(rot_mgr, private, arg);
2499 break;
2500 case MDSS_ROTATION_CLOSE:
2501 ret = mdss_rotator_close_session(rot_mgr, private, arg);
2502 break;
2503 case MDSS_ROTATION_CONFIG:
2504 ret = mdss_rotator_config_session(rot_mgr, private, arg);
2505 break;
2506 default:
2507 pr_err("unexpected IOCTL %d\n", cmd);
2508 }
2509
2510 if (ret)
2511 pr_err("rotator ioctl=%d failed, err=%d\n", cmd, ret);
2512 return ret;
2513
2514}
2515#endif
2516
2517static long mdss_rotator_ioctl(struct file *file, unsigned int cmd,
2518 unsigned long arg)
2519{
2520 struct mdss_rot_file_private *private;
2521 int ret = -EINVAL;
2522
2523 if (!rot_mgr)
2524 return -ENODEV;
2525
2526 if (atomic_read(&rot_mgr->device_suspended))
2527 return -EPERM;
2528
2529 if (!file->private_data)
2530 return -EINVAL;
2531
2532 private = (struct mdss_rot_file_private *)file->private_data;
2533
2534 if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) {
2535 pr_err("Calling ioctl with unrecognized rot_file_private\n");
2536 return -EINVAL;
2537 }
2538
2539 switch (cmd) {
2540 case MDSS_ROTATION_REQUEST:
2541 ATRACE_BEGIN("rotator_request");
2542 ret = mdss_rotator_handle_request(rot_mgr, private, arg);
2543 ATRACE_END("rotator_request");
2544 break;
2545 case MDSS_ROTATION_OPEN:
2546 ret = mdss_rotator_open_session(rot_mgr, private, arg);
2547 break;
2548 case MDSS_ROTATION_CLOSE:
2549 ret = mdss_rotator_close_session(rot_mgr, private, arg);
2550 break;
2551 case MDSS_ROTATION_CONFIG:
2552 ret = mdss_rotator_config_session(rot_mgr, private, arg);
2553 break;
2554 default:
2555 pr_err("unexpected IOCTL %d\n", cmd);
2556 }
2557
2558 if (ret)
2559 pr_err("rotator ioctl=%d failed, err=%d\n", cmd, ret);
2560 return ret;
2561}
2562
2563static ssize_t mdss_rotator_show_capabilities(struct device *dev,
2564 struct device_attribute *attr, char *buf)
2565{
2566 size_t len = PAGE_SIZE;
2567 int cnt = 0;
2568
2569 if (!rot_mgr)
2570 return cnt;
2571
2572#define SPRINT(fmt, ...) \
2573 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2574
2575 SPRINT("wb_count=%d\n", rot_mgr->queue_count);
2576 SPRINT("downscale=%d\n", rot_mgr->has_downscale);
2577
2578 return cnt;
2579}
2580
2581static DEVICE_ATTR(caps, 0444, mdss_rotator_show_capabilities, NULL);
2582
2583static struct attribute *mdss_rotator_fs_attrs[] = {
2584 &dev_attr_caps.attr,
2585 NULL
2586};
2587
2588static struct attribute_group mdss_rotator_fs_attr_group = {
2589 .attrs = mdss_rotator_fs_attrs
2590};
2591
2592static const struct file_operations mdss_rotator_fops = {
2593 .owner = THIS_MODULE,
2594 .open = mdss_rotator_open,
2595 .release = mdss_rotator_close,
2596 .unlocked_ioctl = mdss_rotator_ioctl,
2597#ifdef CONFIG_COMPAT
2598 .compat_ioctl = mdss_rotator_compat_ioctl,
2599#endif
2600};
2601
2602static int mdss_rotator_parse_dt_bus(struct mdss_rot_mgr *mgr,
2603 struct platform_device *dev)
2604{
2605 struct device_node *node;
2606 int ret = 0, i;
2607 bool register_bus_needed;
2608 int usecases;
2609
2610 mgr->data_bus.bus_scale_pdata = msm_bus_cl_get_pdata(dev);
2611 if (IS_ERR_OR_NULL(mgr->data_bus.bus_scale_pdata)) {
2612 ret = PTR_ERR(mgr->data_bus.bus_scale_pdata);
2613 if (!ret) {
2614 ret = -EINVAL;
2615 pr_err("msm_bus_cl_get_pdata failed. ret=%d\n", ret);
2616 mgr->data_bus.bus_scale_pdata = NULL;
2617 }
2618 }
2619
2620 register_bus_needed = of_property_read_bool(dev->dev.of_node,
2621 "qcom,mdss-has-reg-bus");
2622 if (register_bus_needed) {
2623 node = of_get_child_by_name(
2624 dev->dev.of_node, "qcom,mdss-rot-reg-bus");
2625 if (!node) {
2626 mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table;
2627 usecases = mgr->reg_bus.bus_scale_pdata->num_usecases;
2628 for (i = 0; i < usecases; i++) {
2629 rot_reg_bus_usecases[i].num_paths = 1;
2630 rot_reg_bus_usecases[i].vectors =
2631 &rot_reg_bus_vectors[i];
2632 }
2633 } else {
2634 mgr->reg_bus.bus_scale_pdata =
2635 msm_bus_pdata_from_node(dev, node);
2636 if (IS_ERR_OR_NULL(mgr->reg_bus.bus_scale_pdata)) {
2637 ret = PTR_ERR(mgr->reg_bus.bus_scale_pdata);
2638 if (!ret)
2639 ret = -EINVAL;
2640 pr_err("reg_rot_bus failed rc=%d\n", ret);
2641 mgr->reg_bus.bus_scale_pdata = NULL;
2642 }
2643 }
2644 }
2645 return ret;
2646}
2647
2648static int mdss_rotator_parse_dt(struct mdss_rot_mgr *mgr,
2649 struct platform_device *dev)
2650{
2651 int ret = 0;
2652 u32 data;
2653
2654 ret = of_property_read_u32(dev->dev.of_node,
2655 "qcom,mdss-wb-count", &data);
2656 if (ret) {
2657 pr_err("Error in device tree\n");
2658 return ret;
2659 }
2660 if (data > ROT_MAX_HW_BLOCKS) {
2661 pr_err("Err, num of wb block (%d) larger than sw max %d\n",
2662 data, ROT_MAX_HW_BLOCKS);
2663 return -EINVAL;
2664 }
2665
2666 rot_mgr->queue_count = data;
2667 rot_mgr->has_downscale = of_property_read_bool(dev->dev.of_node,
2668 "qcom,mdss-has-downscale");
2669 rot_mgr->has_ubwc = of_property_read_bool(dev->dev.of_node,
2670 "qcom,mdss-has-ubwc");
2671
2672 ret = mdss_rotator_parse_dt_bus(mgr, dev);
2673 if (ret)
2674 pr_err("Failed to parse bus data\n");
2675
2676 return ret;
2677}
2678
2679static void mdss_rotator_put_dt_vreg_data(struct device *dev,
2680 struct dss_module_power *mp)
2681{
2682 if (!mp) {
2683 DEV_ERR("%s: invalid input\n", __func__);
2684 return;
2685 }
2686
2687 msm_dss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 0);
2688 if (mp->vreg_config) {
2689 devm_kfree(dev, mp->vreg_config);
2690 mp->vreg_config = NULL;
2691 }
2692 mp->num_vreg = 0;
2693}
2694
2695static int mdss_rotator_get_dt_vreg_data(struct device *dev,
2696 struct dss_module_power *mp)
2697{
2698 const char *st = NULL;
2699 struct device_node *of_node = NULL;
2700 int dt_vreg_total = 0;
2701 int i;
2702 int rc;
2703
2704 if (!dev || !mp) {
2705 DEV_ERR("%s: invalid input\n", __func__);
2706 return -EINVAL;
2707 }
2708
2709 of_node = dev->of_node;
2710
2711 dt_vreg_total = of_property_count_strings(of_node, "qcom,supply-names");
2712 if (dt_vreg_total < 0) {
2713 DEV_ERR("%s: vreg not found. rc=%d\n", __func__,
2714 dt_vreg_total);
2715 return 0;
2716 }
2717 mp->num_vreg = dt_vreg_total;
2718 mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) *
2719 dt_vreg_total, GFP_KERNEL);
2720 if (!mp->vreg_config) {
2721 DEV_ERR("%s: can't alloc vreg mem\n", __func__);
2722 return -ENOMEM;
2723 }
2724
2725 /* vreg-name */
2726 for (i = 0; i < dt_vreg_total; i++) {
2727 rc = of_property_read_string_index(of_node,
2728 "qcom,supply-names", i, &st);
2729 if (rc) {
2730 DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
2731 __func__, i, rc);
2732 goto error;
2733 }
2734 snprintf(mp->vreg_config[i].vreg_name, 32, "%s", st);
2735 }
2736 msm_dss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 1);
2737
2738 for (i = 0; i < dt_vreg_total; i++) {
2739 DEV_DBG("%s: %s min=%d, max=%d, enable=%d disable=%d\n",
2740 __func__,
2741 mp->vreg_config[i].vreg_name,
2742 mp->vreg_config[i].min_voltage,
2743 mp->vreg_config[i].max_voltage,
2744 mp->vreg_config[i].load[DSS_REG_MODE_ENABLE],
2745 mp->vreg_config[i].load[DSS_REG_MODE_DISABLE]);
2746 }
2747 return rc;
2748
2749error:
2750 if (mp->vreg_config) {
2751 devm_kfree(dev, mp->vreg_config);
2752 mp->vreg_config = NULL;
2753 }
2754 mp->num_vreg = 0;
2755 return rc;
2756}
2757
2758static void mdss_rotator_bus_scale_unregister(struct mdss_rot_mgr *mgr)
2759{
2760 pr_debug("unregister bus_hdl=%x, reg_bus_hdl=%x\n",
2761 mgr->data_bus.bus_hdl, mgr->reg_bus.bus_hdl);
2762
2763 if (mgr->data_bus.bus_hdl)
2764 msm_bus_scale_unregister_client(mgr->data_bus.bus_hdl);
2765
2766 if (mgr->reg_bus.bus_hdl)
2767 msm_bus_scale_unregister_client(mgr->reg_bus.bus_hdl);
2768}
2769
2770static int mdss_rotator_bus_scale_register(struct mdss_rot_mgr *mgr)
2771{
2772 if (!mgr->data_bus.bus_scale_pdata) {
2773 pr_err("Scale table is NULL\n");
2774 return -EINVAL;
2775 }
2776
2777 mgr->data_bus.bus_hdl =
2778 msm_bus_scale_register_client(
2779 mgr->data_bus.bus_scale_pdata);
2780 if (!mgr->data_bus.bus_hdl) {
2781 pr_err("bus_client register failed\n");
2782 return -EINVAL;
2783 }
2784 pr_debug("registered bus_hdl=%x\n", mgr->data_bus.bus_hdl);
2785
2786 if (mgr->reg_bus.bus_scale_pdata) {
2787 mgr->reg_bus.bus_hdl =
2788 msm_bus_scale_register_client(
2789 mgr->reg_bus.bus_scale_pdata);
2790 if (!mgr->reg_bus.bus_hdl) {
2791 pr_err("register bus_client register failed\n");
2792 mdss_rotator_bus_scale_unregister(mgr);
2793 return -EINVAL;
2794 }
2795 pr_debug("registered register bus_hdl=%x\n",
2796 mgr->reg_bus.bus_hdl);
2797 }
2798
2799 return 0;
2800}
2801
2802static int mdss_rotator_clk_register(struct platform_device *pdev,
2803 struct mdss_rot_mgr *mgr, char *clk_name, u32 clk_idx)
2804{
2805 struct clk *tmp;
2806
2807 pr_debug("registered clk_reg\n");
2808
2809 if (clk_idx >= MDSS_CLK_ROTATOR_END_IDX) {
2810 pr_err("invalid clk index %d\n", clk_idx);
2811 return -EINVAL;
2812 }
2813
2814 if (mgr->rot_clk[clk_idx]) {
2815 pr_err("Stomping on clk prev registered:%d\n", clk_idx);
2816 return -EINVAL;
2817 }
2818
2819 tmp = devm_clk_get(&pdev->dev, clk_name);
2820 if (IS_ERR(tmp)) {
2821 pr_err("unable to get clk: %s\n", clk_name);
2822 return PTR_ERR(tmp);
2823 }
2824 mgr->rot_clk[clk_idx] = tmp;
2825 return 0;
2826}
2827
2828static int mdss_rotator_res_init(struct platform_device *pdev,
2829 struct mdss_rot_mgr *mgr)
2830{
2831 int ret;
2832
2833 ret = mdss_rotator_get_dt_vreg_data(&pdev->dev, &mgr->module_power);
2834 if (ret)
2835 return ret;
2836
2837 ret = mdss_rotator_clk_register(pdev, mgr,
2838 "iface_clk", MDSS_CLK_ROTATOR_AHB);
2839 if (ret)
2840 goto error;
2841
2842 ret = mdss_rotator_clk_register(pdev, mgr,
2843 "rot_core_clk", MDSS_CLK_ROTATOR_CORE);
2844 if (ret)
2845 goto error;
2846
2847 ret = mdss_rotator_bus_scale_register(mgr);
2848 if (ret)
2849 goto error;
2850
2851 return 0;
2852error:
2853 mdss_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power);
2854 return ret;
2855}
2856
2857static int mdss_rotator_probe(struct platform_device *pdev)
2858{
2859 int ret;
2860
2861 rot_mgr = devm_kzalloc(&pdev->dev, sizeof(struct mdss_rot_mgr),
2862 GFP_KERNEL);
2863 if (!rot_mgr)
2864 return -ENOMEM;
2865
2866 rot_mgr->pdev = pdev;
2867 ret = mdss_rotator_parse_dt(rot_mgr, pdev);
2868 if (ret) {
2869 pr_err("fail to parse the dt\n");
2870 goto error_parse_dt;
2871 }
2872
2873 mutex_init(&rot_mgr->lock);
2874 mutex_init(&rot_mgr->clk_lock);
2875 mutex_init(&rot_mgr->bus_lock);
2876 atomic_set(&rot_mgr->device_suspended, 0);
2877 ret = mdss_rotator_init_queue(rot_mgr);
2878 if (ret) {
2879 pr_err("fail to init queue\n");
2880 goto error_get_dev_num;
2881 }
2882
2883 mutex_init(&rot_mgr->file_lock);
2884 INIT_LIST_HEAD(&rot_mgr->file_list);
2885
2886 platform_set_drvdata(pdev, rot_mgr);
2887
2888 ret = alloc_chrdev_region(&rot_mgr->dev_num, 0, 1, DRIVER_NAME);
2889 if (ret < 0) {
2890 pr_err("alloc_chrdev_region failed ret = %d\n", ret);
2891 goto error_get_dev_num;
2892 }
2893
2894 rot_mgr->class = class_create(THIS_MODULE, CLASS_NAME);
2895 if (IS_ERR(rot_mgr->class)) {
2896 ret = PTR_ERR(rot_mgr->class);
2897 pr_err("couldn't create class rc = %d\n", ret);
2898 goto error_class_create;
2899 }
2900
2901 rot_mgr->device = device_create(rot_mgr->class, NULL,
2902 rot_mgr->dev_num, NULL, DRIVER_NAME);
2903 if (IS_ERR(rot_mgr->device)) {
2904 ret = PTR_ERR(rot_mgr->device);
2905 pr_err("device_create failed %d\n", ret);
2906 goto error_class_device_create;
2907 }
2908
2909 cdev_init(&rot_mgr->cdev, &mdss_rotator_fops);
2910 ret = cdev_add(&rot_mgr->cdev,
2911 MKDEV(MAJOR(rot_mgr->dev_num), 0), 1);
2912 if (ret < 0) {
2913 pr_err("cdev_add failed %d\n", ret);
2914 goto error_cdev_add;
2915 }
2916
2917 ret = sysfs_create_group(&rot_mgr->device->kobj,
2918 &mdss_rotator_fs_attr_group);
2919 if (ret)
2920 pr_err("unable to register rotator sysfs nodes\n");
2921
2922 ret = mdss_rotator_res_init(pdev, rot_mgr);
2923 if (ret < 0) {
2924 pr_err("res_init failed %d\n", ret);
2925 goto error_res_init;
2926 }
2927 return 0;
2928
2929error_res_init:
2930 cdev_del(&rot_mgr->cdev);
2931error_cdev_add:
2932 device_destroy(rot_mgr->class, rot_mgr->dev_num);
2933error_class_device_create:
2934 class_destroy(rot_mgr->class);
2935error_class_create:
2936 unregister_chrdev_region(rot_mgr->dev_num, 1);
2937error_get_dev_num:
2938 mdss_rotator_deinit_queue(rot_mgr);
2939error_parse_dt:
2940 devm_kfree(&pdev->dev, rot_mgr);
2941 rot_mgr = NULL;
2942 return ret;
2943}
2944
2945static int mdss_rotator_remove(struct platform_device *dev)
2946{
2947 struct mdss_rot_mgr *mgr;
2948
2949 mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev);
2950 if (!mgr)
2951 return -ENODEV;
2952
2953 sysfs_remove_group(&rot_mgr->device->kobj, &mdss_rotator_fs_attr_group);
2954
2955 mdss_rotator_release_all(mgr);
2956
2957 mdss_rotator_put_dt_vreg_data(&dev->dev, &mgr->module_power);
2958 mdss_rotator_bus_scale_unregister(mgr);
2959 cdev_del(&rot_mgr->cdev);
2960 device_destroy(rot_mgr->class, rot_mgr->dev_num);
2961 class_destroy(rot_mgr->class);
2962 unregister_chrdev_region(rot_mgr->dev_num, 1);
2963
2964 mdss_rotator_deinit_queue(rot_mgr);
2965 devm_kfree(&dev->dev, rot_mgr);
2966 rot_mgr = NULL;
2967 return 0;
2968}
2969
2970static void mdss_rotator_suspend_cancel_rot_work(struct mdss_rot_mgr *mgr)
2971{
2972 struct mdss_rot_file_private *priv, *priv_next;
2973
2974 mutex_lock(&mgr->file_lock);
2975 list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) {
2976 mdss_rotator_cancel_all_requests(mgr, priv);
2977 }
2978 mutex_unlock(&rot_mgr->file_lock);
2979}
2980
2981#if defined(CONFIG_PM)
2982static int mdss_rotator_suspend(struct platform_device *dev, pm_message_t state)
2983{
2984 struct mdss_rot_mgr *mgr;
2985
2986 mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev);
2987 if (!mgr)
2988 return -ENODEV;
2989
2990 atomic_inc(&mgr->device_suspended);
2991 mdss_rotator_suspend_cancel_rot_work(mgr);
2992 mdss_rotator_update_perf(mgr);
2993 return 0;
2994}
2995
2996static int mdss_rotator_resume(struct platform_device *dev)
2997{
2998 struct mdss_rot_mgr *mgr;
2999
3000 mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev);
3001 if (!mgr)
3002 return -ENODEV;
3003
3004 atomic_dec(&mgr->device_suspended);
3005 mdss_rotator_update_perf(mgr);
3006 return 0;
3007}
3008#endif
3009
3010static const struct of_device_id mdss_rotator_dt_match[] = {
3011 { .compatible = "qcom,mdss_rotator",},
3012 {}
3013};
3014
3015MODULE_DEVICE_TABLE(of, mdss_rotator_dt_match);
3016
3017static struct platform_driver mdss_rotator_driver = {
3018 .probe = mdss_rotator_probe,
3019 .remove = mdss_rotator_remove,
3020#if defined(CONFIG_PM)
3021 .suspend = mdss_rotator_suspend,
3022 .resume = mdss_rotator_resume,
3023#endif
3024 .driver = {
3025 .name = "mdss_rotator",
3026 .of_match_table = mdss_rotator_dt_match,
3027 .pm = NULL,
3028 }
3029};
3030
3031static int __init mdss_rotator_init(void)
3032{
3033 return platform_driver_register(&mdss_rotator_driver);
3034}
3035
3036static void __exit mdss_rotator_exit(void)
3037{
3038 return platform_driver_unregister(&mdss_rotator_driver);
3039}
3040
3041module_init(mdss_rotator_init);
3042module_exit(mdss_rotator_exit);
3043
3044MODULE_DESCRIPTION("MSM Rotator driver");
3045MODULE_LICENSE("GPL v2");