blob: e472e7fb5568b2ce6ad3c455ac31b3e396d641f5 [file] [log] [blame]
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301/*
2 * MDSS MDP Interface (used by framebuffer core)
3 *
4 * Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
5 * Copyright (C) 2007 Google Incorporated
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#define pr_fmt(fmt) "%s: " fmt, __func__
18
19#include <linux/clk.h>
20#include <linux/debugfs.h>
21#include <linux/delay.h>
22#include <linux/hrtimer.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/iommu.h>
28#include <linux/iopoll.h>
29#include <linux/of.h>
30#include <linux/of_address.h>
31#include <linux/pm.h>
32#include <linux/pm_runtime.h>
33#include <linux/regulator/consumer.h>
34#include <linux/regulator/rpm-smd-regulator.h>
35#include <linux/module.h>
36#include <linux/mutex.h>
37#include <linux/sched.h>
38#include <linux/time.h>
39#include <linux/spinlock.h>
40#include <linux/semaphore.h>
41#include <linux/uaccess.h>
42#include <linux/clk/msm-clk.h>
43#include <linux/irqdomain.h>
44#include <linux/irq.h>
45
46#include <linux/msm-bus.h>
47#include <linux/msm-bus-board.h>
48#include <soc/qcom/scm.h>
49#include <soc/qcom/rpm-smd.h>
50
51#include "mdss.h"
52#include "mdss_fb.h"
53#include "mdss_mdp.h"
54#include "mdss_panel.h"
55#include "mdss_debug.h"
56#include "mdss_mdp_debug.h"
57#include "mdss_smmu.h"
58
59#include "mdss_mdp_trace.h"
60
61#define AXI_HALT_TIMEOUT_US 0x4000
62#define AUTOSUSPEND_TIMEOUT_MS 200
63#define DEFAULT_MDP_PIPE_WIDTH 2048
64#define RES_1080p (1088*1920)
65#define RES_UHD (3840*2160)
66
67struct mdss_data_type *mdss_res;
68static u32 mem_protect_sd_ctrl_id;
69
70static int mdss_fb_mem_get_iommu_domain(void)
71{
72 return mdss_smmu_get_domain_id(MDSS_IOMMU_DOMAIN_UNSECURE);
73}
74
75struct msm_mdp_interface mdp5 = {
76 .init_fnc = mdss_mdp_overlay_init,
77 .fb_mem_get_iommu_domain = mdss_fb_mem_get_iommu_domain,
78 .fb_stride = mdss_mdp_fb_stride,
79 .check_dsi_status = mdss_check_dsi_ctrl_status,
80 .get_format_params = mdss_mdp_get_format_params,
81};
82
83#define IB_QUOTA 2000000000
84#define AB_QUOTA 2000000000
85
86#define MAX_AXI_PORT_COUNT 3
87
88#define MEM_PROTECT_SD_CTRL 0xF
89#define MEM_PROTECT_SD_CTRL_FLAT 0x14
90
91static DEFINE_SPINLOCK(mdp_lock);
92static DEFINE_SPINLOCK(mdss_mdp_intr_lock);
93static DEFINE_MUTEX(mdp_clk_lock);
94static DEFINE_MUTEX(mdp_iommu_ref_cnt_lock);
95static DEFINE_MUTEX(mdp_fs_idle_pc_lock);
96
97static struct mdss_panel_intf pan_types[] = {
98 {"dsi", MDSS_PANEL_INTF_DSI},
99 {"edp", MDSS_PANEL_INTF_EDP},
100 {"hdmi", MDSS_PANEL_INTF_HDMI},
101};
102static char mdss_mdp_panel[MDSS_MAX_PANEL_LEN];
103
104struct mdss_hw mdss_mdp_hw = {
105 .hw_ndx = MDSS_HW_MDP,
106 .ptr = NULL,
107 .irq_handler = mdss_mdp_isr,
108};
109
110/* define for h/w block with external driver */
111struct mdss_hw mdss_misc_hw = {
112 .hw_ndx = MDSS_HW_MISC,
113 .ptr = NULL,
114 .irq_handler = NULL,
115};
116
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530117#ifdef CONFIG_QCOM_BUS_SCALING
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530118#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \
119 { \
120 .src = MSM_BUS_MASTER_AMPSS_M0, \
121 .dst = MSM_BUS_SLAVE_DISPLAY_CFG, \
122 .ab = (ab_val), \
123 .ib = (ib_val), \
124 }
125
126#define BUS_VOTE_19_MHZ 153600000
127#define BUS_VOTE_40_MHZ 320000000
128#define BUS_VOTE_80_MHZ 640000000
129
130static struct msm_bus_vectors mdp_reg_bus_vectors[] = {
131 MDP_REG_BUS_VECTOR_ENTRY(0, 0),
132 MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
133 MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_40_MHZ),
134 MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_80_MHZ),
135};
136static struct msm_bus_paths mdp_reg_bus_usecases[ARRAY_SIZE(
137 mdp_reg_bus_vectors)];
138static struct msm_bus_scale_pdata mdp_reg_bus_scale_table = {
139 .usecase = mdp_reg_bus_usecases,
140 .num_usecases = ARRAY_SIZE(mdp_reg_bus_usecases),
141 .name = "mdss_reg",
142 .active_only = true,
143};
144#endif
145
146u32 invalid_mdp107_wb_output_fmts[] = {
147 MDP_XRGB_8888,
148 MDP_RGBX_8888,
149 MDP_BGRX_8888,
150};
151
152/*
153 * struct intr_call - array of intr handlers
154 * @func: intr handler
155 * @arg: requested argument to the handler
156 */
157struct intr_callback {
158 void (*func)(void *);
159 void *arg;
160};
161
162/*
163 * struct mdss_mdp_intr_reg - array of MDP intr register sets
164 * @clr_off: offset to CLEAR reg
165 * @en_off: offset to ENABLE reg
166 * @status_off: offset to STATUS reg
167 */
168struct mdss_mdp_intr_reg {
169 u32 clr_off;
170 u32 en_off;
171 u32 status_off;
172};
173
174/*
175 * struct mdss_mdp_irq - maps each irq with i/f
176 * @intr_type: type of interface
177 * @intf_num: i/f the irq is associated with
178 * @irq_mask: corresponding bit in the reg set
179 * @reg_idx: which reg set to program
180 */
181struct mdss_mdp_irq {
182 u32 intr_type;
183 u32 intf_num;
184 u32 irq_mask;
185 u32 reg_idx;
186};
187
188static struct mdss_mdp_intr_reg mdp_intr_reg[] = {
189 { MDSS_MDP_REG_INTR_CLEAR, MDSS_MDP_REG_INTR_EN,
190 MDSS_MDP_REG_INTR_STATUS },
191 { MDSS_MDP_REG_INTR2_CLEAR, MDSS_MDP_REG_INTR2_EN,
192 MDSS_MDP_REG_INTR2_STATUS }
193};
194
195static struct mdss_mdp_irq mdp_irq_map[] = {
196 { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 1,
197 MDSS_MDP_INTR_INTF_0_UNDERRUN, 0},
198 { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 2,
199 MDSS_MDP_INTR_INTF_1_UNDERRUN, 0},
200 { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 3,
201 MDSS_MDP_INTR_INTF_2_UNDERRUN, 0},
202 { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 4,
203 MDSS_MDP_INTR_INTF_3_UNDERRUN, 0},
204 { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 1,
205 MDSS_MDP_INTR_INTF_0_VSYNC, 0},
206 { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 2,
207 MDSS_MDP_INTR_INTF_1_VSYNC, 0},
208 { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 3,
209 MDSS_MDP_INTR_INTF_2_VSYNC, 0},
210 { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 4,
211 MDSS_MDP_INTR_INTF_3_VSYNC, 0},
212 { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 0,
213 MDSS_MDP_INTR_PING_PONG_0_DONE, 0},
214 { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 1,
215 MDSS_MDP_INTR_PING_PONG_1_DONE, 0},
216 { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 2,
217 MDSS_MDP_INTR_PING_PONG_2_DONE, 0},
218 { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 3,
219 MDSS_MDP_INTR_PING_PONG_3_DONE, 0},
220 { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 0,
221 MDSS_MDP_INTR_PING_PONG_0_RD_PTR, 0},
222 { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 1,
223 MDSS_MDP_INTR_PING_PONG_1_RD_PTR, 0},
224 { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 2,
225 MDSS_MDP_INTR_PING_PONG_2_RD_PTR, 0},
226 { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 3,
227 MDSS_MDP_INTR_PING_PONG_3_RD_PTR, 0},
228 { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 0,
229 MDSS_MDP_INTR_PING_PONG_0_WR_PTR, 0},
230 { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 1,
231 MDSS_MDP_INTR_PING_PONG_1_WR_PTR, 0},
232 { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 2,
233 MDSS_MDP_INTR_PING_PONG_2_WR_PTR, 0},
234 { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 3,
235 MDSS_MDP_INTR_PING_PONG_3_WR_PTR, 0},
236 { MDSS_MDP_IRQ_TYPE_WB_ROT_COMP, 0,
237 MDSS_MDP_INTR_WB_0_DONE, 0},
238 { MDSS_MDP_IRQ_TYPE_WB_ROT_COMP, 1,
239 MDSS_MDP_INTR_WB_1_DONE, 0},
240 { MDSS_MDP_IRQ_TYPE_WB_WFD_COMP, 0,
241 MDSS_MDP_INTR_WB_2_DONE, 0},
242 { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 0,
243 MDSS_MDP_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
244 { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 1,
245 MDSS_MDP_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
246 { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 2,
247 MDSS_MDP_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
248 { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 3,
249 MDSS_MDP_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
250 { MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 2,
251 MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1},
252 { MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 3,
253 MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1}
254};
255
256static struct intr_callback *mdp_intr_cb;
257
258static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on);
259static int mdss_mdp_parse_dt(struct platform_device *pdev);
260static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev);
261static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev);
262static int mdss_mdp_parse_dt_wb(struct platform_device *pdev);
263static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev);
264static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev);
265static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
266 char *prop_name, u32 *offsets, int len);
267static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
268 char *prop_name);
269static int mdss_mdp_parse_dt_smp(struct platform_device *pdev);
270static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev);
271static int mdss_mdp_parse_dt_misc(struct platform_device *pdev);
272static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev);
273static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev);
274static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev);
275static int mdss_mdp_parse_dt_cdm(struct platform_device *pdev);
276static int mdss_mdp_parse_dt_dsc(struct platform_device *pdev);
277
278static inline u32 is_mdp_irq_enabled(void)
279{
280 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
281 int i;
282
283 for (i = 0; i < ARRAY_SIZE(mdp_intr_reg); i++)
284 if (mdata->mdp_irq_mask[i] != 0)
285 return 1;
286
287 if (mdata->mdp_hist_irq_mask)
288 return 1;
289
290 if (mdata->mdp_intf_irq_mask)
291 return 1;
292
293 return 0;
294}
295
296u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp)
297{
298 /* The adreno GPU hardware requires that the pitch be aligned to
299 * 32 pixels for color buffers, so for the cases where the GPU
300 * is writing directly to fb0, the framebuffer pitch
301 * also needs to be 32 pixel aligned
302 */
303
304 if (fb_index == 0)
305 return ALIGN(xres, 32) * bpp;
306 else
307 return xres * bpp;
308}
309
310static void mdss_irq_mask(struct irq_data *data)
311{
312 struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data);
313 unsigned long irq_flags;
314
315 if (!mdata)
316 return;
317
318 pr_debug("irq_domain_mask %lu\n", data->hwirq);
319
320 if (data->hwirq < 32) {
321 spin_lock_irqsave(&mdp_lock, irq_flags);
322 mdata->mdss_util->disable_irq(&mdss_misc_hw);
323 spin_unlock_irqrestore(&mdp_lock, irq_flags);
324 }
325}
326
327static void mdss_irq_unmask(struct irq_data *data)
328{
329 struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data);
330 unsigned long irq_flags;
331
332 if (!mdata)
333 return;
334
335 pr_debug("irq_domain_unmask %lu\n", data->hwirq);
336
337 if (data->hwirq < 32) {
338 spin_lock_irqsave(&mdp_lock, irq_flags);
339 mdata->mdss_util->enable_irq(&mdss_misc_hw);
340 spin_unlock_irqrestore(&mdp_lock, irq_flags);
341 }
342}
343
344static struct irq_chip mdss_irq_chip = {
345 .name = "mdss",
346 .irq_mask = mdss_irq_mask,
347 .irq_unmask = mdss_irq_unmask,
348};
349
350static int mdss_irq_domain_map(struct irq_domain *d,
351 unsigned int virq, irq_hw_number_t hw)
352{
353 struct mdss_data_type *mdata = d->host_data;
354 /* check here if virq is a valid interrupt line */
355 irq_set_chip_and_handler(virq, &mdss_irq_chip, handle_level_irq);
356 irq_set_chip_data(virq, mdata);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530357 return 0;
358}
359
360const struct irq_domain_ops mdss_irq_domain_ops = {
361 .map = mdss_irq_domain_map,
362 .xlate = irq_domain_xlate_onecell,
363};
364
365static irqreturn_t mdss_irq_handler(int irq, void *ptr)
366{
367 struct mdss_data_type *mdata = ptr;
368 u32 intr;
369
370 if (!mdata)
371 return IRQ_NONE;
372 else if (!mdss_get_irq_enable_state(&mdss_mdp_hw))
373 return IRQ_HANDLED;
374
375 intr = MDSS_REG_READ(mdata, MDSS_REG_HW_INTR_STATUS);
376
377 mdss_mdp_hw.irq_info->irq_buzy = true;
378
379 if (intr & MDSS_INTR_MDP) {
380 spin_lock(&mdp_lock);
381 mdata->mdss_util->irq_dispatch(MDSS_HW_MDP, irq, ptr);
382 spin_unlock(&mdp_lock);
383 intr &= ~MDSS_INTR_MDP;
384 }
385
386 if (intr & MDSS_INTR_DSI0) {
387 mdata->mdss_util->irq_dispatch(MDSS_HW_DSI0, irq, ptr);
388 intr &= ~MDSS_INTR_DSI0;
389 }
390
391 if (intr & MDSS_INTR_DSI1) {
392 mdata->mdss_util->irq_dispatch(MDSS_HW_DSI1, irq, ptr);
393 intr &= ~MDSS_INTR_DSI1;
394 }
395
396 if (intr & MDSS_INTR_EDP) {
397 mdata->mdss_util->irq_dispatch(MDSS_HW_EDP, irq, ptr);
398 intr &= ~MDSS_INTR_EDP;
399 }
400
401 if (intr & MDSS_INTR_HDMI) {
402 mdata->mdss_util->irq_dispatch(MDSS_HW_HDMI, irq, ptr);
403 intr &= ~MDSS_INTR_HDMI;
404 }
405
406 /* route misc. interrupts to external drivers */
407 while (intr) {
408 irq_hw_number_t hwirq = fls(intr) - 1;
409
410 generic_handle_irq(irq_find_mapping(
411 mdata->irq_domain, hwirq));
412 intr &= ~(1 << hwirq);
413 }
414
415 mdss_mdp_hw.irq_info->irq_buzy = false;
416
417 return IRQ_HANDLED;
418}
419
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530420#ifdef CONFIG_QCOM_BUS_SCALING
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530421static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
422{
423 struct msm_bus_scale_pdata *reg_bus_pdata;
424 int i, rc;
425
426 if (!mdata->bus_hdl) {
427 rc = mdss_mdp_parse_dt_bus_scale(mdata->pdev);
428 if (rc) {
429 pr_err("Error in device tree : bus scale\n");
430 return rc;
431 }
432
433 mdata->bus_hdl =
434 msm_bus_scale_register_client(mdata->bus_scale_table);
435 if (!mdata->bus_hdl) {
436 pr_err("bus_client register failed\n");
437 return -EINVAL;
438 }
439
440 pr_debug("register bus_hdl=%x\n", mdata->bus_hdl);
441 }
442
443 if (!mdata->reg_bus_scale_table) {
444 reg_bus_pdata = &mdp_reg_bus_scale_table;
445 for (i = 0; i < reg_bus_pdata->num_usecases; i++) {
446 mdp_reg_bus_usecases[i].num_paths = 1;
447 mdp_reg_bus_usecases[i].vectors =
448 &mdp_reg_bus_vectors[i];
449 }
450 mdata->reg_bus_scale_table = reg_bus_pdata;
451 }
452
453 if (!mdata->reg_bus_hdl) {
454 mdata->reg_bus_hdl =
455 msm_bus_scale_register_client(
456 mdata->reg_bus_scale_table);
457 if (!mdata->reg_bus_hdl)
458 /* Continue without reg_bus scaling */
459 pr_warn("reg_bus_client register failed\n");
460 else
461 pr_debug("register reg_bus_hdl=%x\n",
462 mdata->reg_bus_hdl);
463 }
464
465 if (mdata->hw_rt_bus_scale_table && !mdata->hw_rt_bus_hdl) {
466 mdata->hw_rt_bus_hdl =
467 msm_bus_scale_register_client(
468 mdata->hw_rt_bus_scale_table);
469 if (!mdata->hw_rt_bus_hdl)
470 /* Continue without reg_bus scaling */
471 pr_warn("hw_rt_bus client register failed\n");
472 else
473 pr_debug("register hw_rt_bus=%x\n",
474 mdata->hw_rt_bus_hdl);
475 }
476
477 /*
478 * Following call will not result in actual vote rather update the
479 * current index and ab/ib value. When continuous splash is enabled,
480 * actual vote will happen when splash handoff is done.
481 */
482 return mdss_bus_scale_set_quota(MDSS_MDP_RT, AB_QUOTA, IB_QUOTA);
483}
484
485static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata)
486{
487 pr_debug("unregister bus_hdl=%x\n", mdata->bus_hdl);
488
489 if (mdata->bus_hdl)
490 msm_bus_scale_unregister_client(mdata->bus_hdl);
491
492 pr_debug("unregister reg_bus_hdl=%x\n", mdata->reg_bus_hdl);
493
494 if (mdata->reg_bus_hdl) {
495 msm_bus_scale_unregister_client(mdata->reg_bus_hdl);
496 mdata->reg_bus_hdl = 0;
497 }
498
499 if (mdata->hw_rt_bus_hdl) {
500 msm_bus_scale_unregister_client(mdata->hw_rt_bus_hdl);
501 mdata->hw_rt_bus_hdl = 0;
502 }
503}
504
505/*
506 * Caller needs to hold mdata->bus_lock lock before calling this function.
507 */
508static int mdss_mdp_bus_scale_set_quota(u64 ab_quota_rt, u64 ab_quota_nrt,
509 u64 ib_quota_rt, u64 ib_quota_nrt)
510{
511 int new_uc_idx;
512 u64 ab_quota[MAX_AXI_PORT_COUNT] = {0, 0};
513 u64 ib_quota[MAX_AXI_PORT_COUNT] = {0, 0};
514 int rc;
515
516 if (mdss_res->bus_hdl < 1) {
517 pr_err("invalid bus handle %d\n", mdss_res->bus_hdl);
518 return -EINVAL;
519 }
520
521 if (!ab_quota_rt && !ab_quota_nrt && !ib_quota_rt && !ib_quota_nrt) {
522 new_uc_idx = 0;
523 } else {
524 int i;
525 struct msm_bus_vectors *vect = NULL;
526 struct msm_bus_scale_pdata *bw_table =
527 mdss_res->bus_scale_table;
528 u32 nrt_axi_port_cnt = mdss_res->nrt_axi_port_cnt;
529 u32 total_axi_port_cnt = mdss_res->axi_port_cnt;
530 u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt;
531 int match_cnt = 0;
532
533 if (!bw_table || !total_axi_port_cnt ||
534 total_axi_port_cnt > MAX_AXI_PORT_COUNT) {
535 pr_err("invalid input\n");
536 return -EINVAL;
537 }
538
539 if (mdss_res->bus_channels) {
540 ib_quota_rt = div_u64(ib_quota_rt,
541 mdss_res->bus_channels);
542 ib_quota_nrt = div_u64(ib_quota_nrt,
543 mdss_res->bus_channels);
544 }
545
546 if (mdss_res->has_fixed_qos_arbiter_enabled ||
547 nrt_axi_port_cnt) {
548
549 ab_quota_rt = div_u64(ab_quota_rt, rt_axi_port_cnt);
550 ab_quota_nrt = div_u64(ab_quota_nrt, nrt_axi_port_cnt);
551
552 for (i = 0; i < total_axi_port_cnt; i++) {
553 if (i < rt_axi_port_cnt) {
554 ab_quota[i] = ab_quota_rt;
555 ib_quota[i] = ib_quota_rt;
556 } else {
557 ab_quota[i] = ab_quota_nrt;
558 ib_quota[i] = ib_quota_nrt;
559 }
560 }
561 } else {
562 ab_quota[0] = div_u64(ab_quota_rt + ab_quota_nrt,
563 total_axi_port_cnt);
564 ib_quota[0] = ib_quota_rt + ib_quota_nrt;
565
566 for (i = 1; i < total_axi_port_cnt; i++) {
567 ab_quota[i] = ab_quota[0];
568 ib_quota[i] = ib_quota[0];
569 }
570 }
571
572 for (i = 0; i < total_axi_port_cnt; i++) {
573 vect = &bw_table->usecase
574 [mdss_res->curr_bw_uc_idx].vectors[i];
575 /* avoid performing updates for small changes */
576 if ((ab_quota[i] == vect->ab) &&
577 (ib_quota[i] == vect->ib))
578 match_cnt++;
579 }
580
581 if (match_cnt == total_axi_port_cnt) {
582 pr_debug("skip BW vote\n");
583 return 0;
584 }
585
586 new_uc_idx = (mdss_res->curr_bw_uc_idx %
587 (bw_table->num_usecases - 1)) + 1;
588
589 for (i = 0; i < total_axi_port_cnt; i++) {
590 vect = &bw_table->usecase[new_uc_idx].vectors[i];
591 vect->ab = ab_quota[i];
592 vect->ib = ib_quota[i];
593
594 pr_debug("uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
595 new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt"
596 , i, vect->ab, vect->ib);
597 }
598 }
599 mdss_res->curr_bw_uc_idx = new_uc_idx;
600 mdss_res->ao_bw_uc_idx = new_uc_idx;
601
602 if ((mdss_res->bus_ref_cnt == 0) && mdss_res->curr_bw_uc_idx) {
603 rc = 0;
604 } else { /* vote BW if bus_bw_cnt > 0 or uc_idx is zero */
605 ATRACE_BEGIN("msm_bus_scale_req");
606 rc = msm_bus_scale_client_update_request(mdss_res->bus_hdl,
607 new_uc_idx);
608 ATRACE_END("msm_bus_scale_req");
609 }
610 return rc;
611}
612
613struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name)
614{
615 struct reg_bus_client *client;
616 static u32 id;
617
618 if (client_name == NULL) {
619 pr_err("client name is null\n");
620 return ERR_PTR(-EINVAL);
621 }
622
623 client = kcalloc(1, sizeof(struct reg_bus_client), GFP_KERNEL);
624 if (!client)
625 return ERR_PTR(-ENOMEM);
626
627 mutex_lock(&mdss_res->reg_bus_lock);
628 strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
629 client->usecase_ndx = VOTE_INDEX_DISABLE;
630 client->id = id;
631 pr_debug("bus vote client %s created:%pK id :%d\n", client_name,
632 client, id);
633 id++;
634 list_add(&client->list, &mdss_res->reg_bus_clist);
635 mutex_unlock(&mdss_res->reg_bus_lock);
636
637 return client;
638}
639
640void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client)
641{
642 if (!client) {
643 pr_err("reg bus vote: invalid client handle\n");
644 } else {
645 pr_debug("bus vote client %s destroyed:%pK id:%u\n",
646 client->name, client, client->id);
647 mutex_lock(&mdss_res->reg_bus_lock);
648 list_del_init(&client->list);
649 mutex_unlock(&mdss_res->reg_bus_lock);
650 kfree(client);
651 }
652}
653
654int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
655{
656 int ret = 0;
657 bool changed = false;
658 u32 max_usecase_ndx = VOTE_INDEX_DISABLE;
659 struct reg_bus_client *client, *temp_client;
660
661 if (!mdss_res || !mdss_res->reg_bus_hdl || !bus_client)
662 return 0;
663
664 mutex_lock(&mdss_res->reg_bus_lock);
665 bus_client->usecase_ndx = usecase_ndx;
666 list_for_each_entry_safe(client, temp_client, &mdss_res->reg_bus_clist,
667 list) {
668
669 if (client->usecase_ndx < VOTE_INDEX_MAX &&
670 client->usecase_ndx > max_usecase_ndx)
671 max_usecase_ndx = client->usecase_ndx;
672 }
673
674 if (mdss_res->reg_bus_usecase_ndx != max_usecase_ndx) {
675 changed = true;
676 mdss_res->reg_bus_usecase_ndx = max_usecase_ndx;
677 }
678
679 pr_debug("%pS: changed=%d current idx=%d request client %s id:%u idx:%d\n",
680 __builtin_return_address(0), changed, max_usecase_ndx,
681 bus_client->name, bus_client->id, usecase_ndx);
682 MDSS_XLOG(changed, max_usecase_ndx, bus_client->id, usecase_ndx);
683 if (changed)
684 ret = msm_bus_scale_client_update_request(mdss_res->reg_bus_hdl,
685 max_usecase_ndx);
686
687 mutex_unlock(&mdss_res->reg_bus_lock);
688 return ret;
689}
690
691int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
692{
693 int rc = 0;
694 int i;
695 u64 total_ab_rt = 0, total_ib_rt = 0;
696 u64 total_ab_nrt = 0, total_ib_nrt = 0;
697
698 mutex_lock(&mdss_res->bus_lock);
699
700 mdss_res->ab[client] = ab_quota;
701 mdss_res->ib[client] = ib_quota;
702 trace_mdp_perf_update_bus(client, ab_quota, ib_quota);
703
704 for (i = 0; i < MDSS_MAX_BUS_CLIENTS; i++) {
705 if (i == MDSS_MDP_NRT) {
706 total_ab_nrt = mdss_res->ab[i];
707 total_ib_nrt = mdss_res->ib[i];
708 } else {
709 total_ab_rt += mdss_res->ab[i];
710 total_ib_rt = max(total_ib_rt, mdss_res->ib[i]);
711 }
712 }
713
714 rc = mdss_mdp_bus_scale_set_quota(total_ab_rt, total_ab_nrt,
715 total_ib_rt, total_ib_nrt);
716
717 mutex_unlock(&mdss_res->bus_lock);
718
719 return rc;
720}
721#else
722static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
723{
724 return 0;
725}
726
727static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata)
728{
729}
730
731int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
732{
733 pr_debug("No bus scaling! client=%d ab=%llu ib=%llu\n",
734 client, ab_quota, ib_quota);
735
736 return 0;
737}
738
739struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name)
740{
741 return NULL;
742}
743
744void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client)
745{
746}
747
748int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
749{
750 pr_debug("%pS: No reg scaling! usecase=%u\n",
751 __builtin_return_address(0), usecase_ndx);
752
753 return 0;
754}
755#endif
756
757
758static int mdss_mdp_intr2index(u32 intr_type, u32 intf_num)
759{
760 int i;
761
762 for (i = 0; i < ARRAY_SIZE(mdp_irq_map); i++) {
763 if (intr_type == mdp_irq_map[i].intr_type &&
764 intf_num == mdp_irq_map[i].intf_num)
765 return i;
766 }
767 return -EINVAL;
768}
769
770u32 mdss_mdp_get_irq_mask(u32 intr_type, u32 intf_num)
771{
772 int idx = mdss_mdp_intr2index(intr_type, intf_num);
773
774 return (idx < 0) ? 0 : mdp_irq_map[idx].irq_mask;
775}
776
777void mdss_mdp_enable_hw_irq(struct mdss_data_type *mdata)
778{
779 mdata->mdss_util->enable_irq(&mdss_mdp_hw);
780}
781
782void mdss_mdp_disable_hw_irq(struct mdss_data_type *mdata)
783{
784 if (!is_mdp_irq_enabled())
785 mdata->mdss_util->disable_irq(&mdss_mdp_hw);
786}
787
788/* function assumes that mdp is clocked to access hw registers */
789void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
790 u32 intr_type, u32 intf_num)
791{
792 unsigned long irq_flags;
793 int irq_idx;
794 struct mdss_mdp_intr_reg reg;
795 struct mdss_mdp_irq irq;
796
797 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
798 if (irq_idx < 0) {
799 pr_err("invalid irq request\n");
800 return;
801 }
802
803 irq = mdp_irq_map[irq_idx];
804 reg = mdp_intr_reg[irq.reg_idx];
805
806 pr_debug("clearing mdp irq mask=%x\n", irq.irq_mask);
807 spin_lock_irqsave(&mdp_lock, irq_flags);
808 writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
809 spin_unlock_irqrestore(&mdp_lock, irq_flags);
810}
811
812int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
813{
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530814 int irq_idx;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530815 unsigned long irq_flags;
816 int ret = 0;
817 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
818 struct mdss_mdp_intr_reg reg;
819 struct mdss_mdp_irq irq;
820
821 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
822 if (irq_idx < 0) {
823 pr_err("invalid irq request\n");
824 return -EINVAL;
825 }
826
827 irq = mdp_irq_map[irq_idx];
828 reg = mdp_intr_reg[irq.reg_idx];
829
830 spin_lock_irqsave(&mdp_lock, irq_flags);
831 if (mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask) {
832 pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n",
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530833 irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530834 ret = -EBUSY;
835 } else {
836 pr_debug("MDP IRQ mask old=%x new=%x\n",
837 mdata->mdp_irq_mask[irq.reg_idx], irq.irq_mask);
838 mdata->mdp_irq_mask[irq.reg_idx] |= irq.irq_mask;
839 writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
840 writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
841 mdata->mdp_base + reg.en_off);
842 mdata->mdss_util->enable_irq(&mdss_mdp_hw);
843 }
844 spin_unlock_irqrestore(&mdp_lock, irq_flags);
845
846 return ret;
847}
848int mdss_mdp_hist_irq_enable(u32 irq)
849{
850 int ret = 0;
851 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
852
853 if (mdata->mdp_hist_irq_mask & irq) {
854 pr_warn("MDSS MDP Hist IRQ-0x%x is already set, mask=%x\n",
855 irq, mdata->mdp_hist_irq_mask);
856 ret = -EBUSY;
857 } else {
858 pr_debug("mask old=%x new=%x\n",
859 mdata->mdp_hist_irq_mask, irq);
860 mdata->mdp_hist_irq_mask |= irq;
861 writel_relaxed(irq, mdata->mdp_base +
862 MDSS_MDP_REG_HIST_INTR_CLEAR);
863 writel_relaxed(mdata->mdp_hist_irq_mask, mdata->mdp_base +
864 MDSS_MDP_REG_HIST_INTR_EN);
865 mdata->mdss_util->enable_irq(&mdss_mdp_hw);
866 }
867
868 return ret;
869}
870
871void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num)
872{
873 int irq_idx;
874 unsigned long irq_flags;
875 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
876 struct mdss_mdp_intr_reg reg;
877 struct mdss_mdp_irq irq;
878
879 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
880 if (irq_idx < 0) {
881 pr_err("invalid irq request\n");
882 return;
883 }
884
885 irq = mdp_irq_map[irq_idx];
886 reg = mdp_intr_reg[irq.reg_idx];
887
888 spin_lock_irqsave(&mdp_lock, irq_flags);
889 if (!(mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask)) {
890 pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
891 irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
892 } else {
893 mdata->mdp_irq_mask[irq.reg_idx] &= ~irq.irq_mask;
894 writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
895 mdata->mdp_base + reg.en_off);
896 if (!is_mdp_irq_enabled())
897 mdata->mdss_util->disable_irq(&mdss_mdp_hw);
898 }
899 spin_unlock_irqrestore(&mdp_lock, irq_flags);
900}
901
902/* This function is used to check and clear the status of MDP interrupts */
903void mdss_mdp_intr_check_and_clear(u32 intr_type, u32 intf_num)
904{
905 u32 status;
906 int irq_idx;
907 unsigned long irq_flags;
908 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
909 struct mdss_mdp_intr_reg reg;
910 struct mdss_mdp_irq irq;
911
912 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
913 if (irq_idx < 0) {
914 pr_err("invalid irq request\n");
915 return;
916 }
917
918 irq = mdp_irq_map[irq_idx];
919 reg = mdp_intr_reg[irq.reg_idx];
920
921 spin_lock_irqsave(&mdp_lock, irq_flags);
922 status = irq.irq_mask & readl_relaxed(mdata->mdp_base +
923 reg.status_off);
924 if (status) {
925 pr_debug("clearing irq: intr_type:%d, intf_num:%d\n",
926 intr_type, intf_num);
927 writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
928 }
929 spin_unlock_irqrestore(&mdp_lock, irq_flags);
930}
931
932void mdss_mdp_hist_irq_disable(u32 irq)
933{
934 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
935
936 if (!(mdata->mdp_hist_irq_mask & irq)) {
937 pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
938 irq, mdata->mdp_hist_irq_mask);
939 } else {
940 mdata->mdp_hist_irq_mask &= ~irq;
941 writel_relaxed(mdata->mdp_hist_irq_mask, mdata->mdp_base +
942 MDSS_MDP_REG_HIST_INTR_EN);
943 if (!is_mdp_irq_enabled())
944 mdata->mdss_util->disable_irq(&mdss_mdp_hw);
945 }
946}
947
948/**
949 * mdss_mdp_irq_disable_nosync() - disable mdp irq
950 * @intr_type: mdp interface type
951 * @intf_num: mdp interface num
952 *
953 * This function is called from interrupt context
954 * mdp_lock is already held at up stream (mdss_irq_handler)
955 * therefore spin_lock(&mdp_lock) is not allowed here
956 *
957 */
958void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num)
959{
960 int irq_idx;
961 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
962 struct mdss_mdp_intr_reg reg;
963 struct mdss_mdp_irq irq;
964
965 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
966 if (irq_idx < 0) {
967 pr_err("invalid irq request\n");
968 return;
969 }
970
971 irq = mdp_irq_map[irq_idx];
972 reg = mdp_intr_reg[irq.reg_idx];
973
974 if (!(mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask)) {
975 pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
976 irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
977 } else {
978 mdata->mdp_irq_mask[irq.reg_idx] &= ~irq.irq_mask;
979 writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
980 mdata->mdp_base + reg.en_off);
981 if (!is_mdp_irq_enabled())
982 mdata->mdss_util->disable_irq_nosync(&mdss_mdp_hw);
983 }
984}
985
986int mdss_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
987 void (*fnc_ptr)(void *), void *arg)
988{
989 unsigned long flags;
990 int index;
991
992 index = mdss_mdp_intr2index(intr_type, intf_num);
993 if (index < 0) {
994 pr_warn("invalid intr type=%u intf_numf_num=%u\n",
995 intr_type, intf_num);
996 return -EINVAL;
997 }
998
999 spin_lock_irqsave(&mdss_mdp_intr_lock, flags);
1000 WARN(mdp_intr_cb[index].func && fnc_ptr,
1001 "replacing current intr callback for ndx=%d\n", index);
1002 mdp_intr_cb[index].func = fnc_ptr;
1003 mdp_intr_cb[index].arg = arg;
1004 spin_unlock_irqrestore(&mdss_mdp_intr_lock, flags);
1005
1006 return 0;
1007}
1008
1009int mdss_mdp_set_intr_callback_nosync(u32 intr_type, u32 intf_num,
1010 void (*fnc_ptr)(void *), void *arg)
1011{
1012 int index;
1013
1014 index = mdss_mdp_intr2index(intr_type, intf_num);
1015 if (index < 0) {
1016 pr_warn("invalid intr Typee=%u intf_num=%u\n",
1017 intr_type, intf_num);
1018 return -EINVAL;
1019 }
1020
1021 WARN(mdp_intr_cb[index].func && fnc_ptr,
1022 "replacing current intr callbackack for ndx=%d\n",
1023 index);
1024 mdp_intr_cb[index].func = fnc_ptr;
1025 mdp_intr_cb[index].arg = arg;
1026
1027 return 0;
1028}
1029
1030static inline void mdss_mdp_intr_done(int index)
1031{
1032 void (*fnc)(void *);
1033 void *arg;
1034
1035 spin_lock(&mdss_mdp_intr_lock);
1036 fnc = mdp_intr_cb[index].func;
1037 arg = mdp_intr_cb[index].arg;
1038 spin_unlock(&mdss_mdp_intr_lock);
1039 if (fnc)
1040 fnc(arg);
1041}
1042
1043irqreturn_t mdss_mdp_isr(int irq, void *ptr)
1044{
1045 struct mdss_data_type *mdata = ptr;
1046 u32 isr, mask, hist_isr, hist_mask;
1047 int i, j;
1048
1049 if (!mdata->clk_ena)
1050 return IRQ_HANDLED;
1051
1052 for (i = 0; i < ARRAY_SIZE(mdp_intr_reg); i++) {
1053 struct mdss_mdp_intr_reg reg = mdp_intr_reg[i];
1054
1055 isr = readl_relaxed(mdata->mdp_base + reg.status_off);
1056 if (isr == 0)
1057 continue;
1058
1059 mask = readl_relaxed(mdata->mdp_base + reg.en_off);
1060 writel_relaxed(isr, mdata->mdp_base + reg.clr_off);
1061
1062 pr_debug("%s: reg:%d isr=%x mask=%x\n",
1063 __func__, i+1, isr, mask);
1064
1065 isr &= mask;
1066 if (isr == 0)
1067 continue;
1068
1069 for (j = 0; j < ARRAY_SIZE(mdp_irq_map); j++)
1070 if (mdp_irq_map[j].reg_idx == i &&
1071 (isr & mdp_irq_map[j].irq_mask))
1072 mdss_mdp_intr_done(j);
1073 if (!i) {
1074 if (isr & MDSS_MDP_INTR_PING_PONG_0_DONE)
1075 mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI0,
1076 false);
1077
1078 if (isr & MDSS_MDP_INTR_PING_PONG_1_DONE)
1079 mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI1,
1080 false);
1081
1082 if (isr & MDSS_MDP_INTR_INTF_0_VSYNC)
1083 mdss_misr_crc_collect(mdata, DISPLAY_MISR_EDP,
1084 true);
1085
1086 if (isr & MDSS_MDP_INTR_INTF_1_VSYNC)
1087 mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI0,
1088 true);
1089
1090 if (isr & MDSS_MDP_INTR_INTF_2_VSYNC)
1091 mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI1,
1092 true);
1093
1094 if (isr & MDSS_MDP_INTR_INTF_3_VSYNC)
1095 mdss_misr_crc_collect(mdata, DISPLAY_MISR_HDMI,
1096 true);
1097
1098 if (isr & MDSS_MDP_INTR_WB_0_DONE)
1099 mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
1100 true);
1101
1102 if (isr & MDSS_MDP_INTR_WB_1_DONE)
1103 mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
1104 true);
1105
1106 if (isr & MDSS_MDP_INTR_WB_2_DONE)
1107 mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
1108 true);
1109 }
1110 }
1111
1112 hist_isr = readl_relaxed(mdata->mdp_base +
1113 MDSS_MDP_REG_HIST_INTR_STATUS);
1114 if (hist_isr != 0) {
1115 hist_mask = readl_relaxed(mdata->mdp_base +
1116 MDSS_MDP_REG_HIST_INTR_EN);
1117 writel_relaxed(hist_isr, mdata->mdp_base +
1118 MDSS_MDP_REG_HIST_INTR_CLEAR);
1119 hist_isr &= hist_mask;
1120 if (hist_isr != 0)
1121 mdss_mdp_hist_intr_done(hist_isr);
1122 }
1123
1124 mdss_mdp_video_isr(mdata->video_intf, mdata->nintf);
1125 return IRQ_HANDLED;
1126}
1127
1128static int mdss_mdp_clk_update(u32 clk_idx, u32 enable)
1129{
1130 int ret = -ENODEV;
1131 struct clk *clk = mdss_mdp_get_clk(clk_idx);
1132
1133 if (clk) {
1134 pr_debug("clk=%d en=%d\n", clk_idx, enable);
1135 if (enable) {
1136 if (clk_idx == MDSS_CLK_MDP_VSYNC)
1137 clk_set_rate(clk, 19200000);
1138 ret = clk_prepare_enable(clk);
1139 } else {
1140 clk_disable_unprepare(clk);
1141 ret = 0;
1142 }
1143 }
1144 return ret;
1145}
1146
1147int mdss_mdp_vsync_clk_enable(int enable, bool locked)
1148{
1149 int ret = 0;
1150
1151 pr_debug("clk enable=%d\n", enable);
1152
1153 if (!locked)
1154 mutex_lock(&mdp_clk_lock);
1155
1156 if (mdss_res->vsync_ena != enable) {
1157 mdss_res->vsync_ena = enable;
1158 ret = mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
1159 }
1160
1161 if (!locked)
1162 mutex_unlock(&mdp_clk_lock);
1163 return ret;
1164}
1165
1166void mdss_mdp_set_clk_rate(unsigned long rate)
1167{
1168 struct mdss_data_type *mdata = mdss_res;
1169 unsigned long clk_rate;
1170 struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
1171 unsigned long min_clk_rate;
1172
1173 min_clk_rate = max(rate, mdata->perf_tune.min_mdp_clk);
1174
1175 if (clk) {
1176 mutex_lock(&mdp_clk_lock);
1177 if (min_clk_rate < mdata->max_mdp_clk_rate)
1178 clk_rate = clk_round_rate(clk, min_clk_rate);
1179 else
1180 clk_rate = mdata->max_mdp_clk_rate;
1181 if (IS_ERR_VALUE(clk_rate)) {
1182 pr_err("unable to round rate err=%ld\n", clk_rate);
1183 } else if (clk_rate != clk_get_rate(clk)) {
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301184 if (IS_ERR_VALUE((unsigned long)
1185 clk_set_rate(clk, clk_rate)))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301186 pr_err("clk_set_rate failed\n");
1187 else
1188 pr_debug("mdp clk rate=%lu\n", clk_rate);
1189 }
1190 mutex_unlock(&mdp_clk_lock);
1191 } else {
1192 pr_err("mdp src clk not setup properly\n");
1193 }
1194}
1195
1196unsigned long mdss_mdp_get_clk_rate(u32 clk_idx, bool locked)
1197{
1198 unsigned long clk_rate = 0;
1199 struct clk *clk = mdss_mdp_get_clk(clk_idx);
1200
1201 if (clk) {
1202 if (!locked)
1203 mutex_lock(&mdp_clk_lock);
1204
1205 clk_rate = clk_get_rate(clk);
1206
1207 if (!locked)
1208 mutex_unlock(&mdp_clk_lock);
1209 }
1210
1211 return clk_rate;
1212}
1213
1214/**
1215 * mdss_bus_rt_bw_vote() -- place bus bandwidth request
1216 * @enable: value of enable or disable
1217 *
1218 * hw_rt table has two entries, 0 and Min Vote (1Mhz)
1219 * while attaching SMMU and for few TZ operations which
1220 * happen at very early stage, we will request Min Vote
1221 * thru this handle.
1222 *
1223 */
1224static int mdss_bus_rt_bw_vote(bool enable)
1225{
1226 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1227 int rc = 0;
1228 bool changed = false;
1229
1230 if (!mdata->hw_rt_bus_hdl || mdata->handoff_pending)
1231 return 0;
1232
1233 if (enable) {
1234 if (mdata->hw_rt_bus_ref_cnt == 0)
1235 changed = true;
1236 mdata->hw_rt_bus_ref_cnt++;
1237 } else {
1238 if (mdata->hw_rt_bus_ref_cnt != 0) {
1239 mdata->hw_rt_bus_ref_cnt--;
1240 if (mdata->hw_rt_bus_ref_cnt == 0)
1241 changed = true;
1242 } else {
1243 pr_warn("%s: bus bw votes are not balanced\n",
1244 __func__);
1245 }
1246 }
1247
1248 pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
1249 __builtin_return_address(0), current->group_leader->comm,
1250 mdata->hw_rt_bus_ref_cnt, changed, enable);
1251
1252 if (changed) {
1253 rc = msm_bus_scale_client_update_request(mdata->hw_rt_bus_hdl,
1254 enable ? 1 : 0);
1255 if (rc)
1256 pr_err("%s: Bus bandwidth vote failed\n", __func__);
1257 }
1258
1259 return rc;
1260}
1261
1262/**
1263 * __mdss_mdp_reg_access_clk_enable - Enable minimum MDSS clocks required
1264 * for register access
1265 */
1266static inline void __mdss_mdp_reg_access_clk_enable(
1267 struct mdss_data_type *mdata, bool enable)
1268{
1269 if (enable) {
1270 mdss_update_reg_bus_vote(mdata->reg_bus_clt,
1271 VOTE_INDEX_LOW);
1272 mdss_bus_rt_bw_vote(true);
Abhijit Kulkarni6d68a7b2016-04-12 15:48:52 -07001273 mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, 1);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301274 mdss_mdp_clk_update(MDSS_CLK_AHB, 1);
1275 mdss_mdp_clk_update(MDSS_CLK_AXI, 1);
1276 mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 1);
1277 } else {
1278 mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 0);
1279 mdss_mdp_clk_update(MDSS_CLK_AXI, 0);
1280 mdss_mdp_clk_update(MDSS_CLK_AHB, 0);
1281 mdss_bus_rt_bw_vote(false);
Abhijit Kulkarni6d68a7b2016-04-12 15:48:52 -07001282 mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, 0);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301283 mdss_update_reg_bus_vote(mdata->reg_bus_clt,
1284 VOTE_INDEX_DISABLE);
1285 }
1286}
1287
1288int __mdss_mdp_vbif_halt(struct mdss_data_type *mdata, bool is_nrt)
1289{
1290 int rc = 0;
1291 void __iomem *base;
1292 u32 halt_ack_mask = BIT(0), status;
1293
1294 /* if not real time vbif */
1295 if (is_nrt)
1296 base = mdata->vbif_nrt_io.base;
1297 else
1298 base = mdata->vbif_io.base;
1299
1300 if (!base) {
1301 /* some targets might not have a nrt port */
1302 goto vbif_done;
1303 }
1304
1305 /* force vbif clock on */
1306 MDSS_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 1, is_nrt);
1307
1308 /* request halt */
1309 MDSS_VBIF_WRITE(mdata, MMSS_VBIF_AXI_HALT_CTRL0, 1, is_nrt);
1310
1311 rc = readl_poll_timeout(base +
1312 MMSS_VBIF_AXI_HALT_CTRL1, status, (status &
1313 halt_ack_mask),
1314 1000, AXI_HALT_TIMEOUT_US);
1315 if (rc == -ETIMEDOUT) {
1316 pr_err("VBIF axi is not halting. TIMEDOUT.\n");
1317 goto vbif_done;
1318 }
1319
1320 pr_debug("VBIF axi is halted\n");
1321
1322vbif_done:
1323 return rc;
1324}
1325
1326/**
1327 * mdss_mdp_vbif_axi_halt() - Halt MDSS AXI ports
1328 * @mdata: pointer to the global mdss data structure.
1329 *
1330 * This function can be called during deep suspend, display off or for
1331 * debugging purposes. On success it should be assumed that AXI ports connected
1332 * to RT VBIF are in idle state and would not fetch any more data.
1333 */
1334static void mdss_mdp_vbif_axi_halt(struct mdss_data_type *mdata)
1335{
1336 __mdss_mdp_reg_access_clk_enable(mdata, true);
1337
1338 /* real time ports */
1339 __mdss_mdp_vbif_halt(mdata, false);
1340 /* non-real time ports */
1341 __mdss_mdp_vbif_halt(mdata, true);
1342
1343 __mdss_mdp_reg_access_clk_enable(mdata, false);
1344}
1345
1346int mdss_iommu_ctrl(int enable)
1347{
1348 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1349 int rc = 0;
1350
1351 mutex_lock(&mdp_iommu_ref_cnt_lock);
1352 pr_debug("%pS: enable:%d ref_cnt:%d attach:%d hoff:%d\n",
1353 __builtin_return_address(0), enable, mdata->iommu_ref_cnt,
1354 mdata->iommu_attached, mdata->handoff_pending);
1355
1356 if (enable) {
1357 /*
1358 * delay iommu attach until continuous splash screen has
1359 * finished handoff, as it may still be working with phys addr
1360 */
1361 if (!mdata->iommu_attached && !mdata->handoff_pending) {
1362 mdss_bus_rt_bw_vote(true);
1363 rc = mdss_smmu_attach(mdata);
1364 }
1365 mdata->iommu_ref_cnt++;
1366 } else {
1367 if (mdata->iommu_ref_cnt) {
1368 mdata->iommu_ref_cnt--;
1369 if (mdata->iommu_ref_cnt == 0) {
1370 rc = mdss_smmu_detach(mdata);
1371 mdss_bus_rt_bw_vote(false);
1372 }
1373 } else {
1374 pr_err("unbalanced iommu ref\n");
1375 }
1376 }
1377 mutex_unlock(&mdp_iommu_ref_cnt_lock);
1378
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301379 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301380 return rc;
1381 else
1382 return mdata->iommu_ref_cnt;
1383}
1384
1385static void mdss_mdp_memory_retention_enter(void)
1386{
1387 struct clk *mdss_mdp_clk = NULL;
1388 struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
1389
1390 if (mdp_vote_clk) {
1391 mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
1392 if (mdss_mdp_clk) {
1393 clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
1394 clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_SET);
1395 clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_PERIPH);
1396 }
1397 }
1398}
1399
1400static void mdss_mdp_memory_retention_exit(void)
1401{
1402 struct clk *mdss_mdp_clk = NULL;
1403 struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
1404
1405 if (mdp_vote_clk) {
1406 mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
1407 if (mdss_mdp_clk) {
1408 clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
1409 clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_PERIPH);
1410 clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_CLEAR);
1411 }
1412 }
1413}
1414
1415/**
1416 * mdss_mdp_idle_pc_restore() - Restore MDSS settings when exiting idle pc
1417 *
1418 * MDSS GDSC can be voted off during idle-screen usecase for MIPI DSI command
1419 * mode displays, referred to as MDSS idle power collapse. Upon subsequent
1420 * frame update, MDSS GDSC needs to turned back on and hw state needs to be
1421 * restored.
1422 */
1423static int mdss_mdp_idle_pc_restore(void)
1424{
1425 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1426 int rc = 0;
1427
1428 mutex_lock(&mdp_fs_idle_pc_lock);
1429 if (!mdata->idle_pc) {
1430 pr_debug("no idle pc, no need to restore\n");
1431 goto end;
1432 }
1433
1434 pr_debug("called from %pS\n", __builtin_return_address(0));
1435 rc = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301436 if (IS_ERR_VALUE((unsigned long)rc)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301437 pr_err("mdss iommu attach failed rc=%d\n", rc);
1438 goto end;
1439 }
1440 mdss_hw_init(mdata);
1441 mdss_iommu_ctrl(0);
1442
1443 /**
1444 * sleep 10 microseconds to make sure AD auto-reinitialization
1445 * is done
1446 */
1447 udelay(10);
1448 mdss_mdp_memory_retention_exit();
1449
1450 mdss_mdp_ctl_restore(true);
1451 mdata->idle_pc = false;
1452
1453end:
1454 mutex_unlock(&mdp_fs_idle_pc_lock);
1455 return rc;
1456}
1457
1458/**
1459 * mdss_bus_bandwidth_ctrl() -- place bus bandwidth request
1460 * @enable: value of enable or disable
1461 *
1462 * Function place bus bandwidth request to allocate saved bandwidth
1463 * if enabled or free bus bandwidth allocation if disabled.
1464 * Bus bandwidth is required by mdp.For dsi, it only requires to send
1465 * dcs coammnd. It returns error if bandwidth request fails.
1466 */
1467void mdss_bus_bandwidth_ctrl(int enable)
1468{
1469 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1470 int changed = 0;
1471
1472 mutex_lock(&mdata->bus_lock);
1473 if (enable) {
1474 if (mdata->bus_ref_cnt == 0)
1475 changed++;
1476 mdata->bus_ref_cnt++;
1477 } else {
1478 if (mdata->bus_ref_cnt) {
1479 mdata->bus_ref_cnt--;
1480 if (mdata->bus_ref_cnt == 0)
1481 changed++;
1482 } else {
1483 pr_err("Can not be turned off\n");
1484 }
1485 }
1486
1487 pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
1488 __builtin_return_address(0), current->group_leader->comm,
1489 mdata->bus_ref_cnt, changed, enable);
1490
1491 if (changed) {
1492 MDSS_XLOG(mdata->bus_ref_cnt, enable);
1493
1494 if (!enable) {
1495 if (!mdata->handoff_pending) {
1496 msm_bus_scale_client_update_request(
1497 mdata->bus_hdl, 0);
1498 mdata->ao_bw_uc_idx = 0;
1499 }
1500 pm_runtime_mark_last_busy(&mdata->pdev->dev);
1501 pm_runtime_put_autosuspend(&mdata->pdev->dev);
1502 } else {
1503 pm_runtime_get_sync(&mdata->pdev->dev);
1504 msm_bus_scale_client_update_request(
1505 mdata->bus_hdl, mdata->curr_bw_uc_idx);
1506 }
1507 }
1508
1509 mutex_unlock(&mdata->bus_lock);
1510}
1511EXPORT_SYMBOL(mdss_bus_bandwidth_ctrl);
1512
1513void mdss_mdp_clk_ctrl(int enable)
1514{
1515 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1516 static int mdp_clk_cnt;
1517 unsigned long flags;
1518 int changed = 0;
1519 int rc = 0;
1520
1521 mutex_lock(&mdp_clk_lock);
1522 if (enable) {
1523 if (mdp_clk_cnt == 0)
1524 changed++;
1525 mdp_clk_cnt++;
1526 } else {
1527 if (mdp_clk_cnt) {
1528 mdp_clk_cnt--;
1529 if (mdp_clk_cnt == 0)
1530 changed++;
1531 } else {
1532 pr_err("Can not be turned off\n");
1533 }
1534 }
1535
1536 if (changed)
1537 MDSS_XLOG(mdp_clk_cnt, enable, current->pid);
1538
1539 pr_debug("%pS: task:%s clk_cnt=%d changed=%d enable=%d\n",
1540 __builtin_return_address(0), current->group_leader->comm,
1541 mdata->bus_ref_cnt, changed, enable);
1542
1543 if (changed) {
1544 if (enable) {
1545 pm_runtime_get_sync(&mdata->pdev->dev);
1546
1547 mdss_update_reg_bus_vote(mdata->reg_bus_clt,
1548 VOTE_INDEX_LOW);
1549
1550 rc = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301551 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301552 pr_err("IOMMU attach failed\n");
1553
1554 /* Active+Sleep */
1555 msm_bus_scale_client_update_context(mdata->bus_hdl,
1556 false, mdata->curr_bw_uc_idx);
1557 }
1558
1559 spin_lock_irqsave(&mdp_lock, flags);
1560 mdata->clk_ena = enable;
1561 spin_unlock_irqrestore(&mdp_lock, flags);
1562
Abhijit Kulkarni6d68a7b2016-04-12 15:48:52 -07001563 mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, enable);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301564 mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
1565 mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
1566 mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
1567 mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
1568 if (mdata->vsync_ena)
1569 mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
1570
1571 if (!enable) {
1572 /* release iommu control */
1573 mdss_iommu_ctrl(0);
1574
1575 /* Active-Only */
1576 msm_bus_scale_client_update_context(mdata->bus_hdl,
1577 true, mdata->ao_bw_uc_idx);
1578
1579 mdss_update_reg_bus_vote(mdata->reg_bus_clt,
1580 VOTE_INDEX_DISABLE);
1581
1582 pm_runtime_mark_last_busy(&mdata->pdev->dev);
1583 pm_runtime_put_autosuspend(&mdata->pdev->dev);
1584 }
1585 }
1586
1587 if (enable && changed)
1588 mdss_mdp_idle_pc_restore();
1589
1590 mutex_unlock(&mdp_clk_lock);
1591}
1592
1593static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata,
1594 char *clk_name, int clk_idx)
1595{
1596 struct clk *tmp;
1597
1598 if (clk_idx >= MDSS_MAX_CLK) {
1599 pr_err("invalid clk index %d\n", clk_idx);
1600 return -EINVAL;
1601 }
1602
1603 tmp = devm_clk_get(&mdata->pdev->dev, clk_name);
1604 if (IS_ERR(tmp)) {
1605 pr_err("unable to get clk: %s\n", clk_name);
1606 return PTR_ERR(tmp);
1607 }
1608
1609 mdata->mdp_clk[clk_idx] = tmp;
1610 return 0;
1611}
1612
1613#define SEC_DEVICE_MDSS 1
1614
1615static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata)
1616{
1617 int ret, scm_ret = 0;
1618
1619 if (test_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, mdata->mdss_caps_map))
1620 return;
1621
1622 pr_debug("restoring mdss secure config\n");
1623
1624 __mdss_mdp_reg_access_clk_enable(mdata, true);
1625
1626 ret = scm_restore_sec_cfg(SEC_DEVICE_MDSS, 0, &scm_ret);
1627 if (ret || scm_ret)
1628 pr_warn("scm_restore_sec_cfg failed %d %d\n",
1629 ret, scm_ret);
1630
1631 __mdss_mdp_reg_access_clk_enable(mdata, false);
1632}
1633
1634static int mdss_mdp_gdsc_notifier_call(struct notifier_block *self,
1635 unsigned long event, void *data)
1636{
1637 struct mdss_data_type *mdata;
1638
1639 mdata = container_of(self, struct mdss_data_type, gdsc_cb);
1640
1641 if (event & REGULATOR_EVENT_ENABLE) {
1642 /*
1643 * As SMMU in low tier targets is not power collapsible,
1644 * hence we don't need to restore sec configuration.
1645 */
1646 if (!mdss_mdp_req_init_restore_cfg(mdata))
1647 __mdss_restore_sec_cfg(mdata);
1648 } else if (event & REGULATOR_EVENT_PRE_DISABLE) {
1649 pr_debug("mdss gdsc is getting disabled\n");
1650 /* halt the vbif transactions */
1651 mdss_mdp_vbif_axi_halt(mdata);
1652 }
1653
1654 return NOTIFY_OK;
1655}
1656
1657static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata)
1658{
1659 int ret;
1660
1661 ret = of_property_read_u32(mdata->pdev->dev.of_node,
1662 "qcom,max-clk-rate", &mdata->max_mdp_clk_rate);
1663 if (ret) {
1664 pr_err("failed to get max mdp clock rate\n");
1665 return ret;
1666 }
1667
1668 pr_debug("max mdp clk rate=%d\n", mdata->max_mdp_clk_rate);
1669
1670 ret = devm_request_irq(&mdata->pdev->dev, mdss_mdp_hw.irq_info->irq,
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301671 mdss_irq_handler, 0, "MDSS", mdata);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301672 if (ret) {
1673 pr_err("mdp request_irq() failed!\n");
1674 return ret;
1675 }
1676 disable_irq(mdss_mdp_hw.irq_info->irq);
1677
1678 mdata->fs = devm_regulator_get(&mdata->pdev->dev, "vdd");
1679 if (IS_ERR_OR_NULL(mdata->fs)) {
1680 mdata->fs = NULL;
1681 pr_err("unable to get gdsc regulator\n");
1682 return -EINVAL;
1683 }
1684
1685 mdata->venus = devm_regulator_get_optional(&mdata->pdev->dev,
1686 "gdsc-venus");
1687 if (IS_ERR_OR_NULL(mdata->venus)) {
1688 mdata->venus = NULL;
1689 pr_debug("unable to get venus gdsc regulator\n");
1690 }
1691
1692 mdata->fs_ena = false;
1693
1694 mdata->gdsc_cb.notifier_call = mdss_mdp_gdsc_notifier_call;
1695 mdata->gdsc_cb.priority = 5;
1696 if (regulator_register_notifier(mdata->fs, &(mdata->gdsc_cb)))
1697 pr_warn("GDSC notification registration failed!\n");
1698 else
1699 mdata->regulator_notif_register = true;
1700
1701 mdata->vdd_cx = devm_regulator_get_optional(&mdata->pdev->dev,
1702 "vdd-cx");
1703 if (IS_ERR_OR_NULL(mdata->vdd_cx)) {
1704 pr_debug("unable to get CX reg. rc=%d\n",
1705 PTR_RET(mdata->vdd_cx));
1706 mdata->vdd_cx = NULL;
1707 }
1708
1709 mdata->reg_bus_clt = mdss_reg_bus_vote_client_create("mdp\0");
1710 if (IS_ERR(mdata->reg_bus_clt)) {
1711 pr_err("bus client register failed\n");
1712 return PTR_ERR(mdata->reg_bus_clt);
1713 }
1714
1715 if (mdss_mdp_irq_clk_register(mdata, "bus_clk", MDSS_CLK_AXI) ||
1716 mdss_mdp_irq_clk_register(mdata, "iface_clk", MDSS_CLK_AHB) ||
1717 mdss_mdp_irq_clk_register(mdata, "core_clk",
1718 MDSS_CLK_MDP_CORE))
1719 return -EINVAL;
1720
1721 /* lut_clk is not present on all MDSS revisions */
1722 mdss_mdp_irq_clk_register(mdata, "lut_clk", MDSS_CLK_MDP_LUT);
1723
1724 /* vsync_clk is optional for non-smart panels */
1725 mdss_mdp_irq_clk_register(mdata, "vsync_clk", MDSS_CLK_MDP_VSYNC);
1726
Abhijit Kulkarni6d68a7b2016-04-12 15:48:52 -07001727 /* this clk is not present on all MDSS revisions */
1728 mdss_mdp_irq_clk_register(mdata, "mnoc_clk", MDSS_CLK_MNOC_AHB);
1729
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301730 /* Setting the default clock rate to the max supported.*/
1731 mdss_mdp_set_clk_rate(mdata->max_mdp_clk_rate);
1732 pr_debug("mdp clk rate=%ld\n",
1733 mdss_mdp_get_clk_rate(MDSS_CLK_MDP_CORE, false));
1734
1735 return 0;
1736}
1737
1738static void mdss_debug_enable_clock(int on)
1739{
1740 if (on)
1741 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
1742 else
1743 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
1744}
1745
1746static int mdss_mdp_debug_init(struct platform_device *pdev,
1747 struct mdss_data_type *mdata)
1748{
1749 int rc;
1750 struct mdss_debug_base *dbg_blk;
1751
1752 mdata->debug_inf.debug_enable_clock = mdss_debug_enable_clock;
1753
1754 rc = mdss_debugfs_init(mdata);
1755 if (rc)
1756 return rc;
1757
1758 rc = mdss_mdp_debugfs_init(mdata);
1759 if (rc) {
1760 mdss_debugfs_remove(mdata);
1761 return rc;
1762 }
1763
1764 mdss_debug_register_io("mdp", &mdata->mdss_io, &dbg_blk);
1765 mdss_debug_register_dump_range(pdev, dbg_blk, "qcom,regs-dump-mdp",
1766 "qcom,regs-dump-names-mdp", "qcom,regs-dump-xin-id-mdp");
1767
1768 if (mdata->vbif_io.base)
1769 mdss_debug_register_io("vbif", &mdata->vbif_io, NULL);
1770 if (mdata->vbif_nrt_io.base)
1771 mdss_debug_register_io("vbif_nrt", &mdata->vbif_nrt_io, NULL);
1772
1773 return 0;
1774}
1775
1776static u32 mdss_get_props(void)
1777{
1778 u32 props = 0;
1779 void __iomem *props_base = ioremap(0xFC4B8114, 4);
1780
1781 if (props_base) {
1782 props = readl_relaxed(props_base);
1783 iounmap(props_base);
1784 }
1785 return props;
1786}
1787
1788void mdss_mdp_init_default_prefill_factors(struct mdss_data_type *mdata)
1789{
1790 mdata->prefill_data.prefill_factors.fmt_mt_nv12_factor = 8;
1791 mdata->prefill_data.prefill_factors.fmt_mt_factor = 4;
1792 mdata->prefill_data.prefill_factors.fmt_linear_factor = 1;
1793 mdata->prefill_data.prefill_factors.scale_factor = 1;
1794 mdata->prefill_data.prefill_factors.xtra_ff_factor = 2;
1795
1796 if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) {
1797 mdata->prefill_data.ts_threshold = 25;
1798 mdata->prefill_data.ts_end = 8;
1799 mdata->prefill_data.ts_rate.numer = 1;
1800 mdata->prefill_data.ts_rate.denom = 4;
1801 mdata->prefill_data.ts_overhead = 2;
1802 }
1803}
1804
1805static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
1806{
1807
1808 mdata->per_pipe_ib_factor.numer = 0;
1809 mdata->per_pipe_ib_factor.denom = 0;
1810 mdata->apply_post_scale_bytes = true;
1811 mdata->hflip_buffer_reused = true;
1812 /* prevent disable of prefill calculations */
1813 mdata->min_prefill_lines = 0xffff;
1814 /* clock gating feature is disabled by default */
1815 mdata->enable_gate = false;
1816 mdata->pixel_ram_size = 0;
1817 mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_FLAT;
1818
1819 mdss_mdp_hw_rev_debug_caps_init(mdata);
1820
1821 switch (mdata->mdp_rev) {
1822 case MDSS_MDP_HW_REV_107:
1823 mdss_set_quirk(mdata, MDSS_QUIRK_ROTCDP);
1824 case MDSS_MDP_HW_REV_107_1:
1825 mdss_mdp_format_flag_removal(invalid_mdp107_wb_output_fmts,
1826 ARRAY_SIZE(invalid_mdp107_wb_output_fmts),
1827 VALID_MDP_WB_INTF_FORMAT);
1828 /* fall-through */
1829 case MDSS_MDP_HW_REV_107_2:
1830 mdata->max_target_zorder = 7; /* excluding base layer */
1831 mdata->max_cursor_size = 128;
1832 mdata->per_pipe_ib_factor.numer = 8;
1833 mdata->per_pipe_ib_factor.denom = 5;
1834 mdata->apply_post_scale_bytes = false;
1835 mdata->hflip_buffer_reused = false;
1836 mdata->min_prefill_lines = 21;
1837 mdata->has_ubwc = true;
1838 mdata->pixel_ram_size = 50 * 1024;
1839 set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
1840 set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
1841 set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
1842 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1843 set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
1844 set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
1845 set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
1846 set_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED,
1847 mdata->mdss_caps_map);
1848 set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
1849 mdata->mdss_caps_map);
1850 mdss_mdp_init_default_prefill_factors(mdata);
1851 mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU);
1852 mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT);
1853 mdss_set_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED);
1854 break;
1855 case MDSS_MDP_HW_REV_105:
1856 case MDSS_MDP_HW_REV_109:
1857 mdss_set_quirk(mdata, MDSS_QUIRK_BWCPANIC);
1858 mdata->max_target_zorder = 7; /* excluding base layer */
1859 mdata->max_cursor_size = 128;
1860 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1861 set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
1862 mdata->mdss_caps_map);
1863 break;
1864 case MDSS_MDP_HW_REV_110:
1865 mdss_set_quirk(mdata, MDSS_QUIRK_BWCPANIC);
1866 mdata->max_target_zorder = 4; /* excluding base layer */
1867 mdata->max_cursor_size = 128;
1868 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1869 mdata->min_prefill_lines = 12;
1870 mdata->props = mdss_get_props();
1871 break;
1872 case MDSS_MDP_HW_REV_112:
1873 mdata->max_target_zorder = 4; /* excluding base layer */
1874 mdata->max_cursor_size = 64;
1875 mdata->min_prefill_lines = 12;
1876 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1877 break;
1878 case MDSS_MDP_HW_REV_114:
1879 /* disable ECG for 28nm PHY platform */
1880 mdata->enable_gate = false;
1881 case MDSS_MDP_HW_REV_116:
1882 mdata->max_target_zorder = 4; /* excluding base layer */
1883 mdata->max_cursor_size = 128;
1884 mdata->min_prefill_lines = 14;
1885 mdata->has_ubwc = true;
1886 mdata->pixel_ram_size = 40 * 1024;
1887 mdata->apply_post_scale_bytes = false;
1888 mdata->hflip_buffer_reused = false;
1889 mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL;
1890 set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
1891 set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
1892 set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
1893 set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
1894 mdss_mdp_init_default_prefill_factors(mdata);
1895 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1896 mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
1897 mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP);
1898 break;
1899 case MDSS_MDP_HW_REV_115:
1900 mdata->max_target_zorder = 4; /* excluding base layer */
1901 mdata->max_cursor_size = 128;
1902 mdata->min_prefill_lines = 14;
1903 mdata->has_ubwc = false;
1904 mdata->pixel_ram_size = 16 * 1024;
1905 mdata->apply_post_scale_bytes = false;
1906 mdata->hflip_buffer_reused = false;
1907 /* disable ECG for 28nm PHY platform */
1908 mdata->enable_gate = false;
1909 mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL;
1910 set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
1911 set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
1912 set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
1913 set_bit(MDSS_CAPS_MIXER_1_FOR_WB, mdata->mdss_caps_map);
1914 mdss_mdp_init_default_prefill_factors(mdata);
1915 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1916 mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
1917 mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP);
1918 break;
1919 case MDSS_MDP_HW_REV_300:
1920 case MDSS_MDP_HW_REV_301:
1921 mdata->max_target_zorder = 7; /* excluding base layer */
1922 mdata->max_cursor_size = 384;
1923 mdata->per_pipe_ib_factor.numer = 8;
1924 mdata->per_pipe_ib_factor.denom = 5;
1925 mdata->apply_post_scale_bytes = false;
1926 mdata->hflip_buffer_reused = false;
1927 mdata->min_prefill_lines = 25;
1928 mdata->has_ubwc = true;
1929 mdata->pixel_ram_size = 50 * 1024;
1930 mdata->rects_per_sspp[MDSS_MDP_PIPE_TYPE_DMA] = 2;
1931
1932 set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
1933 set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
1934 set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
1935 set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
1936 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1937 set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
1938 set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
1939 set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
1940 set_bit(MDSS_QOS_IB_NOCR, mdata->mdss_qos_map);
1941 set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
1942 set_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED,
1943 mdata->mdss_caps_map);
1944 set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
1945 mdata->mdss_caps_map);
1946 set_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map);
1947 set_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map);
1948 mdss_mdp_init_default_prefill_factors(mdata);
1949 mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU);
1950 mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT);
1951 mdss_set_quirk(mdata, MDSS_QUIRK_SRC_SPLIT_ALWAYS);
1952 mdata->has_wb_ubwc = true;
1953 set_bit(MDSS_CAPS_10_BIT_SUPPORTED, mdata->mdss_caps_map);
1954 break;
1955 default:
1956 mdata->max_target_zorder = 4; /* excluding base layer */
1957 mdata->max_cursor_size = 64;
1958 }
1959
1960 if (mdata->mdp_rev < MDSS_MDP_HW_REV_103)
1961 mdss_set_quirk(mdata, MDSS_QUIRK_DOWNSCALE_HANG);
1962
1963 if (mdata->mdp_rev < MDSS_MDP_HW_REV_102 ||
1964 mdata->mdp_rev == MDSS_MDP_HW_REV_200)
1965 mdss_set_quirk(mdata, MDSS_QUIRK_FMT_PACK_PATTERN);
1966}
1967
1968static void mdss_hw_rev_init(struct mdss_data_type *mdata)
1969{
1970 if (mdata->mdp_rev)
1971 return;
1972
1973 mdata->mdp_rev = MDSS_REG_READ(mdata, MDSS_REG_HW_VERSION);
1974 mdss_mdp_hw_rev_caps_init(mdata);
1975}
1976
1977/**
1978 * mdss_hw_init() - Initialize MDSS target specific register settings
1979 * @mdata: MDP private data
1980 *
1981 * Initialize basic MDSS hardware settings based on the board specific
1982 * parameters. This function does not explicitly turn on the MDP clocks
1983 * and so it must be called with the MDP clocks already enabled.
1984 */
1985void mdss_hw_init(struct mdss_data_type *mdata)
1986{
1987 struct mdss_mdp_pipe *vig;
1988
1989 mdss_hw_rev_init(mdata);
1990
1991 /* Disable hw underrun recovery only for older mdp reversions. */
1992 if (mdata->mdp_rev < MDSS_MDP_HW_REV_105)
1993 writel_relaxed(0x0, mdata->mdp_base +
1994 MDSS_MDP_REG_VIDEO_INTF_UNDERFLOW_CTL);
1995
1996 if (mdata->hw_settings) {
1997 struct mdss_hw_settings *hws = mdata->hw_settings;
1998
1999 while (hws->reg) {
2000 writel_relaxed(hws->val, hws->reg);
2001 hws++;
2002 }
2003 }
2004
2005 vig = mdata->vig_pipes;
2006
2007 mdata->nmax_concurrent_ad_hw =
2008 (mdata->mdp_rev < MDSS_MDP_HW_REV_103) ? 1 : 2;
2009
2010 pr_debug("MDP hw init done\n");
2011}
2012
2013static u32 mdss_mdp_res_init(struct mdss_data_type *mdata)
2014{
2015 u32 rc = 0;
2016
2017 if (mdata->res_init) {
2018 pr_err("mdss resources already initialized\n");
2019 return -EPERM;
2020 }
2021
2022 mdata->res_init = true;
2023 mdata->clk_ena = false;
2024 mdss_mdp_hw.irq_info->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK;
2025 mdss_mdp_hw.irq_info->irq_ena = false;
2026
2027 rc = mdss_mdp_irq_clk_setup(mdata);
2028 if (rc)
2029 return rc;
2030
2031 mdata->hist_intr.req = 0;
2032 mdata->hist_intr.curr = 0;
2033 mdata->hist_intr.state = 0;
2034 spin_lock_init(&mdata->hist_intr.lock);
2035
2036 mdata->iclient = msm_ion_client_create(mdata->pdev->name);
2037 if (IS_ERR_OR_NULL(mdata->iclient)) {
2038 pr_err("msm_ion_client_create() return error (%pK)\n",
2039 mdata->iclient);
2040 mdata->iclient = NULL;
2041 }
2042
2043 return rc;
2044}
2045
2046static u32 mdss_mdp_scaler_init(struct mdss_data_type *mdata,
2047 struct device *dev)
2048{
2049 int ret;
2050 struct device_node *node;
2051 u32 prop_val;
2052
2053 if (!dev)
2054 return -EPERM;
2055
2056 node = of_get_child_by_name(dev->of_node, "qcom,mdss-scaler-offsets");
2057 if (!node)
2058 return 0;
2059
2060 if (mdata->scaler_off)
2061 return -EFAULT;
2062
2063 mdata->scaler_off = devm_kzalloc(&mdata->pdev->dev,
2064 sizeof(*mdata->scaler_off), GFP_KERNEL);
2065 if (!mdata->scaler_off)
2066 return -ENOMEM;
2067
2068 ret = of_property_read_u32(node,
2069 "qcom,mdss-vig-scaler-off",
2070 &prop_val);
2071 if (ret) {
2072 pr_err("read property %s failed ret %d\n",
2073 "qcom,mdss-vig-scaler-off", ret);
2074 return -EINVAL;
2075 }
2076 mdata->scaler_off->vig_scaler_off = prop_val;
2077 ret = of_property_read_u32(node,
2078 "qcom,mdss-vig-scaler-lut-off",
2079 &prop_val);
2080 if (ret) {
2081 pr_err("read property %s failed ret %d\n",
2082 "qcom,mdss-vig-scaler-lut-off", ret);
2083 return -EINVAL;
2084 }
2085 mdata->scaler_off->vig_scaler_lut_off = prop_val;
2086 mdata->scaler_off->has_dest_scaler =
2087 of_property_read_bool(mdata->pdev->dev.of_node,
2088 "qcom,mdss-has-dest-scaler");
2089 if (mdata->scaler_off->has_dest_scaler) {
2090 ret = of_property_read_u32(node,
2091 "qcom,mdss-dest-block-off",
2092 &prop_val);
2093 if (ret) {
2094 pr_err("read property %s failed ret %d\n",
2095 "qcom,mdss-dest-block-off", ret);
2096 return -EINVAL;
2097 }
2098 mdata->scaler_off->dest_base = mdata->mdss_io.base +
2099 prop_val;
2100 mdata->scaler_off->ndest_scalers =
2101 mdss_mdp_parse_dt_prop_len(mdata->pdev,
2102 "qcom,mdss-dest-scalers-off");
2103 mdata->scaler_off->dest_scaler_off =
2104 devm_kzalloc(&mdata->pdev->dev, sizeof(u32) *
2105 mdata->scaler_off->ndest_scalers,
2106 GFP_KERNEL);
Amine Najahie0f1e8e2018-04-10 12:17:15 -04002107 if (!mdata->scaler_off->dest_scaler_off)
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302108 return -ENOMEM;
Amine Najahie0f1e8e2018-04-10 12:17:15 -04002109
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302110 ret = mdss_mdp_parse_dt_handler(mdata->pdev,
2111 "qcom,mdss-dest-scaler-off",
2112 mdata->scaler_off->dest_scaler_off,
2113 mdata->scaler_off->ndest_scalers);
2114 if (ret)
2115 return -EINVAL;
2116 mdata->scaler_off->dest_scaler_lut_off =
2117 devm_kzalloc(&mdata->pdev->dev, sizeof(u32) *
2118 mdata->scaler_off->ndest_scalers,
2119 GFP_KERNEL);
Amine Najahie0f1e8e2018-04-10 12:17:15 -04002120 if (!mdata->scaler_off->dest_scaler_lut_off)
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302121 return -ENOMEM;
Amine Najahie0f1e8e2018-04-10 12:17:15 -04002122
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302123 ret = mdss_mdp_parse_dt_handler(mdata->pdev,
2124 "qcom,mdss-dest-scalers-lut-off",
2125 mdata->scaler_off->dest_scaler_lut_off,
2126 mdata->scaler_off->ndest_scalers);
2127 if (ret)
2128 return -EINVAL;
2129 }
2130
Animesh Kishore9bd358e2018-02-23 18:04:28 +05302131 mutex_init(&mdata->scaler_off->scaler_lock);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302132 return 0;
2133}
2134
2135/**
2136 * mdss_mdp_footswitch_ctrl_splash() - clocks handoff for cont. splash screen
2137 * @on: 1 to start handoff, 0 to complete the handoff after first frame update
2138 *
2139 * MDSS Clocks and GDSC are already on during continuous splash screen, but
2140 * increasing ref count will keep clocks from being turned off until handoff
2141 * has properly happened after frame update.
2142 */
2143void mdss_mdp_footswitch_ctrl_splash(int on)
2144{
2145 int ret;
2146 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
2147
2148 if (mdata != NULL) {
2149 if (on) {
2150 mdata->handoff_pending = true;
2151 pr_debug("Enable MDP FS for splash.\n");
2152 if (mdata->venus) {
2153 ret = regulator_enable(mdata->venus);
2154 if (ret)
2155 pr_err("venus failed to enable\n");
2156 }
2157
2158 ret = regulator_enable(mdata->fs);
2159 if (ret)
2160 pr_err("Footswitch failed to enable\n");
2161
2162 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
2163 mdss_bus_bandwidth_ctrl(true);
2164 } else {
2165 pr_debug("Disable MDP FS for splash.\n");
2166 mdss_bus_bandwidth_ctrl(false);
2167 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
2168 regulator_disable(mdata->fs);
2169 if (mdata->venus)
2170 regulator_disable(mdata->venus);
2171 mdata->handoff_pending = false;
2172 }
2173 } else {
2174 pr_warn("mdss mdata not initialized\n");
2175 }
2176}
2177
2178static int mdss_mdp_get_pan_intf(const char *pan_intf)
2179{
2180 int i, rc = MDSS_PANEL_INTF_INVALID;
2181
2182 if (!pan_intf)
2183 return rc;
2184
2185 for (i = 0; i < ARRAY_SIZE(pan_types); i++) {
2186 if (!strcmp(pan_intf, pan_types[i].name)) {
2187 rc = pan_types[i].type;
2188 break;
2189 }
2190 }
2191 return rc;
2192}
2193
2194static int mdss_mdp_get_pan_cfg(struct mdss_panel_cfg *pan_cfg)
2195{
2196 char *t = NULL;
2197 char pan_intf_str[MDSS_MAX_PANEL_LEN];
2198 int rc, i, panel_len;
2199 char pan_name[MDSS_MAX_PANEL_LEN] = {'\0'};
2200
2201 if (!pan_cfg)
2202 return -EINVAL;
2203
2204 if (mdss_mdp_panel[0] == '0') {
2205 pr_debug("panel name is not set\n");
2206 pan_cfg->lk_cfg = false;
2207 pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
2208 return -EINVAL;
2209 } else if (mdss_mdp_panel[0] == '1') {
2210 pan_cfg->lk_cfg = true;
2211 } else {
2212 /* read from dt */
2213 pan_cfg->lk_cfg = true;
2214 pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
2215 return -EINVAL;
2216 }
2217
2218 /* skip lk cfg and delimiter; ex: "1:" */
2219 strlcpy(pan_name, &mdss_mdp_panel[2], MDSS_MAX_PANEL_LEN);
2220 t = strnstr(pan_name, ":", MDSS_MAX_PANEL_LEN);
2221 if (!t) {
2222 pr_err("pan_name=[%s] invalid\n", pan_name);
2223 pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
2224 return -EINVAL;
2225 }
2226
2227 for (i = 0; ((pan_name + i) < t) && (i < 4); i++)
2228 pan_intf_str[i] = *(pan_name + i);
2229 pan_intf_str[i] = 0;
2230 pr_debug("%d panel intf %s\n", __LINE__, pan_intf_str);
2231 /* point to the start of panel name */
2232 t = t + 1;
2233 strlcpy(&pan_cfg->arg_cfg[0], t, sizeof(pan_cfg->arg_cfg));
2234 pr_debug("%d: t=[%s] panel name=[%s]\n", __LINE__,
2235 t, pan_cfg->arg_cfg);
2236
2237 panel_len = strlen(pan_cfg->arg_cfg);
2238 if (!panel_len) {
2239 pr_err("Panel name is invalid\n");
2240 pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
2241 return -EINVAL;
2242 }
2243
2244 rc = mdss_mdp_get_pan_intf(pan_intf_str);
2245 pan_cfg->pan_intf = (rc < 0) ? MDSS_PANEL_INTF_INVALID : rc;
2246 return 0;
2247}
2248
2249static int mdss_mdp_parse_dt_pan_intf(struct platform_device *pdev)
2250{
2251 int rc;
2252 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
2253 const char *prim_intf = NULL;
2254
2255 rc = of_property_read_string(pdev->dev.of_node,
2256 "qcom,mdss-pref-prim-intf", &prim_intf);
2257 if (rc)
2258 return -ENODEV;
2259
2260 rc = mdss_mdp_get_pan_intf(prim_intf);
2261 if (rc < 0) {
2262 mdata->pan_cfg.pan_intf = MDSS_PANEL_INTF_INVALID;
2263 } else {
2264 mdata->pan_cfg.pan_intf = rc;
2265 rc = 0;
2266 }
2267 return rc;
2268}
2269
2270static int mdss_mdp_get_cmdline_config(struct platform_device *pdev)
2271{
2272 int rc, len = 0;
2273 int *intf_type;
2274 char *panel_name;
2275 struct mdss_panel_cfg *pan_cfg;
2276 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
2277
2278 mdata->pan_cfg.arg_cfg[MDSS_MAX_PANEL_LEN] = 0;
2279 pan_cfg = &mdata->pan_cfg;
2280 panel_name = &pan_cfg->arg_cfg[0];
2281 intf_type = &pan_cfg->pan_intf;
2282
2283 /* reads from dt by default */
2284 pan_cfg->lk_cfg = true;
2285
2286 len = strlen(mdss_mdp_panel);
2287
2288 if (len > 0) {
2289 rc = mdss_mdp_get_pan_cfg(pan_cfg);
2290 if (!rc) {
2291 pan_cfg->init_done = true;
2292 return rc;
2293 }
2294 }
2295
2296 rc = mdss_mdp_parse_dt_pan_intf(pdev);
2297 /* if pref pan intf is not present */
2298 if (rc)
2299 pr_warn("unable to parse device tree for pan intf\n");
2300
2301 pan_cfg->init_done = true;
2302
2303 return 0;
2304}
2305
2306static void __update_sspp_info(struct mdss_mdp_pipe *pipe,
2307 int pipe_cnt, char *type, char *buf, int *cnt)
2308{
2309 int i;
2310 int j;
2311 size_t len = PAGE_SIZE;
2312 int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
2313
2314#define SPRINT(fmt, ...) \
2315 (*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
2316
2317 for (i = 0; i < pipe_cnt && pipe; i++) {
2318 SPRINT("pipe_num:%d pipe_type:%s pipe_ndx:%d rects:%d pipe_is_handoff:%d display_id:%d ",
2319 pipe->num, type, pipe->ndx, pipe->multirect.max_rects,
2320 pipe->is_handed_off, mdss_mdp_get_display_id(pipe));
2321 SPRINT("fmts_supported:");
2322 for (j = 0; j < num_bytes; j++)
2323 SPRINT("%d,", pipe->supported_formats[j]);
2324 SPRINT("\n");
2325 pipe += pipe->multirect.max_rects;
2326 }
2327#undef SPRINT
2328}
2329
2330static void mdss_mdp_update_sspp_info(struct mdss_data_type *mdata,
2331 char *buf, int *cnt)
2332{
2333 __update_sspp_info(mdata->vig_pipes, mdata->nvig_pipes,
2334 "vig", buf, cnt);
2335 __update_sspp_info(mdata->rgb_pipes, mdata->nrgb_pipes,
2336 "rgb", buf, cnt);
2337 __update_sspp_info(mdata->dma_pipes, mdata->ndma_pipes,
2338 "dma", buf, cnt);
2339 __update_sspp_info(mdata->cursor_pipes, mdata->ncursor_pipes,
2340 "cursor", buf, cnt);
2341}
2342
2343static void mdss_mdp_update_wb_info(struct mdss_data_type *mdata,
2344 char *buf, int *cnt)
2345{
2346#define SPRINT(fmt, ...) \
2347 (*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
2348 size_t len = PAGE_SIZE;
2349 int i;
2350 int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
2351
2352 SPRINT("rot_input_fmts=");
2353 for (i = 0; i < num_bytes && mdata->wb; i++)
2354 SPRINT("%d ", mdata->wb->supported_input_formats[i]);
2355 SPRINT("\nrot_output_fmts=");
2356 for (i = 0; i < num_bytes && mdata->wb; i++)
2357 SPRINT("%d ", mdata->wb->supported_input_formats[i]);
2358 SPRINT("\nwb_output_fmts=");
2359 for (i = 0; i < num_bytes && mdata->wb; i++)
2360 SPRINT("%d ", mdata->wb->supported_output_formats[i]);
2361 SPRINT("\n");
2362#undef SPRINT
2363}
2364
2365ssize_t mdss_mdp_show_capabilities(struct device *dev,
2366 struct device_attribute *attr, char *buf)
2367{
2368 struct mdss_data_type *mdata = dev_get_drvdata(dev);
2369 size_t len = PAGE_SIZE;
2370 int cnt = 0;
2371
2372#define SPRINT(fmt, ...) \
2373 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2374
2375 SPRINT("mdp_version=5\n");
2376 SPRINT("hw_rev=%d\n", mdata->mdp_rev);
2377 SPRINT("pipe_count:%d\n", mdata->nvig_pipes + mdata->nrgb_pipes +
2378 mdata->ndma_pipes + mdata->ncursor_pipes);
2379 mdss_mdp_update_sspp_info(mdata, buf, &cnt);
2380 mdss_mdp_update_wb_info(mdata, buf, &cnt);
2381 /* TODO : need to remove num pipes info */
2382 SPRINT("rgb_pipes=%d\n", mdata->nrgb_pipes);
2383 SPRINT("vig_pipes=%d\n", mdata->nvig_pipes);
2384 SPRINT("dma_pipes=%d\n", mdata->ndma_pipes);
2385 SPRINT("blending_stages=%d\n", mdata->max_target_zorder);
2386 SPRINT("cursor_pipes=%d\n", mdata->ncursor_pipes);
2387 SPRINT("max_cursor_size=%d\n", mdata->max_cursor_size);
2388 SPRINT("smp_count=%d\n", mdata->smp_mb_cnt);
2389 SPRINT("smp_size=%d\n", mdata->smp_mb_size);
2390 SPRINT("smp_mb_per_pipe=%d\n", mdata->smp_mb_per_pipe);
2391 SPRINT("max_downscale_ratio=%d\n", MAX_DOWNSCALE_RATIO);
2392 SPRINT("max_upscale_ratio=%d\n", MAX_UPSCALE_RATIO);
2393
2394 if (mdata->nwb)
2395 SPRINT("wb_intf_index=%d\n", mdata->nwb - 1);
2396
2397 if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)) {
2398 SPRINT("fmt_mt_nv12_factor=%d\n",
2399 mdata->prefill_data.prefill_factors.fmt_mt_nv12_factor);
2400 SPRINT("fmt_mt_factor=%d\n",
2401 mdata->prefill_data.prefill_factors.fmt_mt_factor);
2402 SPRINT("fmt_linear_factor=%d\n",
2403 mdata->prefill_data.prefill_factors.fmt_linear_factor);
2404 SPRINT("scale_factor=%d\n",
2405 mdata->prefill_data.prefill_factors.scale_factor);
2406 SPRINT("xtra_ff_factor=%d\n",
2407 mdata->prefill_data.prefill_factors.xtra_ff_factor);
2408 }
2409
2410 if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) {
2411 SPRINT("amortizable_threshold=%d\n",
2412 mdata->prefill_data.ts_threshold);
2413 SPRINT("system_overhead_lines=%d\n",
2414 mdata->prefill_data.ts_overhead);
2415 }
2416
2417 if (mdata->props)
2418 SPRINT("props=%d\n", mdata->props);
2419 if (mdata->max_bw_low)
2420 SPRINT("max_bandwidth_low=%u\n", mdata->max_bw_low);
2421 if (mdata->max_bw_high)
2422 SPRINT("max_bandwidth_high=%u\n", mdata->max_bw_high);
2423 if (mdata->max_pipe_width)
2424 SPRINT("max_pipe_width=%d\n", mdata->max_pipe_width);
2425 if (mdata->max_mixer_width)
2426 SPRINT("max_mixer_width=%d\n", mdata->max_mixer_width);
2427 if (mdata->max_bw_per_pipe)
2428 SPRINT("max_pipe_bw=%u\n", mdata->max_bw_per_pipe);
2429 if (mdata->max_mdp_clk_rate)
2430 SPRINT("max_mdp_clk=%u\n", mdata->max_mdp_clk_rate);
2431 if (mdata->clk_factor.numer)
2432 SPRINT("clk_fudge_factor=%u,%u\n", mdata->clk_factor.numer,
2433 mdata->clk_factor.denom);
2434 if (mdata->has_rot_dwnscale) {
2435 if (mdata->rot_dwnscale_min)
2436 SPRINT("rot_dwnscale_min=%u\n",
2437 mdata->rot_dwnscale_min);
2438 if (mdata->rot_dwnscale_max)
2439 SPRINT("rot_dwnscale_max=%u\n",
2440 mdata->rot_dwnscale_max);
2441 }
2442 SPRINT("features=");
2443 if (mdata->has_bwc)
2444 SPRINT(" bwc");
2445 if (mdata->has_ubwc)
2446 SPRINT(" ubwc");
2447 if (mdata->has_wb_ubwc)
2448 SPRINT(" wb_ubwc");
2449 if (mdata->has_decimation)
2450 SPRINT(" decimation");
2451 if (mdata->highest_bank_bit && !mdss_mdp_is_ubwc_supported(mdata))
2452 SPRINT(" tile_format");
2453 if (mdata->has_non_scalar_rgb)
2454 SPRINT(" non_scalar_rgb");
2455 if (mdata->has_src_split)
2456 SPRINT(" src_split");
2457 if (mdata->has_rot_dwnscale)
2458 SPRINT(" rotator_downscale");
2459 if (mdata->max_bw_settings_cnt)
2460 SPRINT(" dynamic_bw_limit");
2461 if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
2462 SPRINT(" qseed3");
2463 if (test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map))
2464 SPRINT(" dest_scaler");
2465 if (mdata->has_separate_rotator)
2466 SPRINT(" separate_rotator");
2467 if (mdss_has_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED))
2468 SPRINT(" hdr");
2469 SPRINT("\n");
2470#undef SPRINT
2471
2472 return cnt;
2473}
2474
2475static ssize_t mdss_mdp_read_max_limit_bw(struct device *dev,
2476 struct device_attribute *attr, char *buf)
2477{
2478 struct mdss_data_type *mdata = dev_get_drvdata(dev);
2479 size_t len = PAGE_SIZE;
2480 u32 cnt = 0;
2481 int i;
2482
2483 char bw_names[4][8] = {"default", "camera", "hflip", "vflip"};
2484 char pipe_bw_names[4][16] = {"default_pipe", "camera_pipe",
2485 "hflip_pipe", "vflip_pipe"};
2486 struct mdss_max_bw_settings *bw_settings;
2487 struct mdss_max_bw_settings *pipe_bw_settings;
2488
2489 bw_settings = mdata->max_bw_settings;
2490 pipe_bw_settings = mdata->max_per_pipe_bw_settings;
2491
2492#define SPRINT(fmt, ...) \
2493 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2494
2495 SPRINT("bw_mode_bitmap=%d\n", mdata->bw_mode_bitmap);
2496 SPRINT("bw_limit_pending=%d\n", mdata->bw_limit_pending);
2497
2498 for (i = 0; i < mdata->max_bw_settings_cnt; i++) {
2499 SPRINT("%s=%d\n", bw_names[i], bw_settings->mdss_max_bw_val);
2500 bw_settings++;
2501 }
2502
2503 for (i = 0; i < mdata->mdss_per_pipe_bw_cnt; i++) {
2504 SPRINT("%s=%d\n", pipe_bw_names[i],
2505 pipe_bw_settings->mdss_max_bw_val);
2506 pipe_bw_settings++;
2507 }
2508
2509 return cnt;
2510}
2511
2512static ssize_t mdss_mdp_store_max_limit_bw(struct device *dev,
2513 struct device_attribute *attr, const char *buf, size_t len)
2514{
2515 struct mdss_data_type *mdata = dev_get_drvdata(dev);
2516 u32 data = 0;
2517
2518 if (kstrtouint(buf, 0, &data)) {
2519 pr_info("Not able scan to bw_mode_bitmap\n");
2520 } else {
2521 mdata->bw_mode_bitmap = data;
2522 mdata->bw_limit_pending = true;
2523 pr_debug("limit use case, bw_mode_bitmap = %d\n", data);
2524 }
2525
2526 return len;
2527}
2528
2529static DEVICE_ATTR(caps, 0444, mdss_mdp_show_capabilities, NULL);
2530static DEVICE_ATTR(bw_mode_bitmap, 0664,
2531 mdss_mdp_read_max_limit_bw, mdss_mdp_store_max_limit_bw);
2532
2533static struct attribute *mdp_fs_attrs[] = {
2534 &dev_attr_caps.attr,
2535 &dev_attr_bw_mode_bitmap.attr,
2536 NULL
2537};
2538
2539static struct attribute_group mdp_fs_attr_group = {
2540 .attrs = mdp_fs_attrs
2541};
2542
2543static int mdss_mdp_register_sysfs(struct mdss_data_type *mdata)
2544{
2545 struct device *dev = &mdata->pdev->dev;
2546 int rc;
2547
2548 rc = sysfs_create_group(&dev->kobj, &mdp_fs_attr_group);
2549
2550 return rc;
2551}
2552
2553int mdss_panel_get_intf_status(u32 disp_num, u32 intf_type)
2554{
2555 int rc, intf_status = 0;
2556 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
2557
2558 if (!mdss_res || !mdss_res->pan_cfg.init_done)
2559 return -EPROBE_DEFER;
2560
2561 if (mdss_res->handoff_pending) {
2562 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
2563 intf_status = readl_relaxed(mdata->mdp_base +
2564 MDSS_MDP_REG_DISP_INTF_SEL);
2565 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
2566 if (intf_type == MDSS_PANEL_INTF_DSI) {
2567 if (disp_num == DISPLAY_1)
2568 rc = (intf_status & MDSS_MDP_INTF_DSI0_SEL);
2569 else if (disp_num == DISPLAY_2)
2570 rc = (intf_status & MDSS_MDP_INTF_DSI1_SEL);
2571 else
2572 rc = 0;
2573 } else if (intf_type == MDSS_PANEL_INTF_EDP) {
2574 intf_status &= MDSS_MDP_INTF_EDP_SEL;
2575 rc = (intf_status == MDSS_MDP_INTF_EDP_SEL);
2576 } else if (intf_type == MDSS_PANEL_INTF_HDMI) {
2577 intf_status &= MDSS_MDP_INTF_HDMI_SEL;
2578 rc = (intf_status == MDSS_MDP_INTF_HDMI_SEL);
2579 } else {
2580 rc = 0;
2581 }
2582 } else {
2583 rc = 0;
2584 }
2585
2586 return rc;
2587}
2588
2589static int mdss_mdp_probe(struct platform_device *pdev)
2590{
2591 struct resource *res;
2592 int rc;
2593 struct mdss_data_type *mdata;
2594 uint32_t intf_sel = 0;
2595 uint32_t split_display = 0;
2596 int num_of_display_on = 0;
2597 int i = 0;
2598
2599 if (!pdev->dev.of_node) {
2600 pr_err("MDP driver only supports device tree probe\n");
2601 return -ENOTSUPP;
2602 }
2603
2604 if (mdss_res) {
2605 pr_err("MDP already initialized\n");
2606 return -EINVAL;
2607 }
2608
2609 mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
2610 if (mdata == NULL)
2611 return -ENOMEM;
2612
2613 pdev->id = 0;
2614 mdata->pdev = pdev;
2615 platform_set_drvdata(pdev, mdata);
2616 mdss_res = mdata;
2617 mutex_init(&mdata->reg_lock);
2618 mutex_init(&mdata->reg_bus_lock);
2619 mutex_init(&mdata->bus_lock);
2620 INIT_LIST_HEAD(&mdata->reg_bus_clist);
2621 atomic_set(&mdata->sd_client_count, 0);
2622 atomic_set(&mdata->active_intf_cnt, 0);
2623
2624 mdss_res->mdss_util = mdss_get_util_intf();
2625 if (mdss_res->mdss_util == NULL) {
2626 pr_err("Failed to get mdss utility functions\n");
2627 return -ENODEV;
2628 }
2629
2630 mdss_res->mdss_util->get_iommu_domain = mdss_smmu_get_domain_id;
2631 mdss_res->mdss_util->iommu_attached = is_mdss_iommu_attached;
2632 mdss_res->mdss_util->iommu_ctrl = mdss_iommu_ctrl;
2633 mdss_res->mdss_util->bus_scale_set_quota = mdss_bus_scale_set_quota;
2634 mdss_res->mdss_util->bus_bandwidth_ctrl = mdss_bus_bandwidth_ctrl;
2635 mdss_res->mdss_util->panel_intf_type = mdss_panel_intf_type;
2636 mdss_res->mdss_util->panel_intf_status = mdss_panel_get_intf_status;
2637
Sachin Bhayare5076e252018-01-18 14:56:45 +05302638 rc = msm_mdss_ioremap_byname(pdev, &mdata->mdss_io, "mdp_phys");
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302639 if (rc) {
2640 pr_err("unable to map MDP base\n");
2641 goto probe_done;
2642 }
2643 pr_debug("MDSS HW Base addr=0x%x len=0x%x\n",
2644 (int) (unsigned long) mdata->mdss_io.base,
2645 mdata->mdss_io.len);
2646
Sachin Bhayare5076e252018-01-18 14:56:45 +05302647 rc = msm_mdss_ioremap_byname(pdev, &mdata->vbif_io, "vbif_phys");
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302648 if (rc) {
2649 pr_err("unable to map MDSS VBIF base\n");
2650 goto probe_done;
2651 }
2652 pr_debug("MDSS VBIF HW Base addr=0x%x len=0x%x\n",
2653 (int) (unsigned long) mdata->vbif_io.base,
2654 mdata->vbif_io.len);
2655
Sachin Bhayare5076e252018-01-18 14:56:45 +05302656 rc = msm_mdss_ioremap_byname(pdev, &mdata->vbif_nrt_io,
2657 "vbif_nrt_phys");
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302658 if (rc)
2659 pr_debug("unable to map MDSS VBIF non-realtime base\n");
2660 else
2661 pr_debug("MDSS VBIF NRT HW Base addr=%pK len=0x%x\n",
2662 mdata->vbif_nrt_io.base, mdata->vbif_nrt_io.len);
2663
2664 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2665 if (!res) {
2666 pr_err("unable to get MDSS irq\n");
2667 rc = -ENOMEM;
2668 goto probe_done;
2669 }
2670
2671 mdss_mdp_hw.irq_info = kcalloc(1, sizeof(struct irq_info), GFP_KERNEL);
2672 if (!mdss_mdp_hw.irq_info)
2673 return -ENOMEM;
2674
2675 mdss_mdp_hw.irq_info->irq = res->start;
2676 mdss_mdp_hw.ptr = mdata;
2677
2678 /* export misc. interrupts to external driver */
2679 mdata->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 32,
2680 &mdss_irq_domain_ops, mdata);
2681 if (!mdata->irq_domain) {
2682 pr_err("unable to add linear domain\n");
2683 rc = -ENOMEM;
2684 goto probe_done;
2685 }
2686
2687 mdss_misc_hw.irq_info = mdss_intr_line();
2688 rc = mdss_res->mdss_util->register_irq(&mdss_misc_hw);
2689 if (rc)
2690 pr_err("mdss_register_irq failed.\n");
2691
2692 rc = mdss_mdp_res_init(mdata);
2693 if (rc) {
2694 pr_err("unable to initialize mdss mdp resources\n");
2695 goto probe_done;
2696 }
2697
2698 pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT_MS);
2699 if (mdata->idle_pc_enabled)
2700 pm_runtime_use_autosuspend(&pdev->dev);
2701 pm_runtime_set_suspended(&pdev->dev);
2702 pm_runtime_enable(&pdev->dev);
2703 if (!pm_runtime_enabled(&pdev->dev))
2704 mdss_mdp_footswitch_ctrl(mdata, true);
2705
2706 rc = mdss_mdp_bus_scale_register(mdata);
2707 if (rc) {
2708 pr_err("unable to register bus scaling\n");
2709 goto probe_done;
2710 }
2711
2712 /*
2713 * enable clocks and read mdp_rev as soon as possible once
2714 * kernel is up.
2715 */
2716 mdss_mdp_footswitch_ctrl_splash(true);
2717 mdss_hw_rev_init(mdata);
2718
2719 /*populate hw iomem base info from device tree*/
2720 rc = mdss_mdp_parse_dt(pdev);
2721 if (rc) {
2722 pr_err("unable to parse device tree\n");
2723 goto probe_done;
2724 }
2725
2726 rc = mdss_mdp_get_cmdline_config(pdev);
2727 if (rc) {
2728 pr_err("Error in panel override:rc=[%d]\n", rc);
2729 goto probe_done;
2730 }
2731
2732 rc = mdss_mdp_debug_init(pdev, mdata);
2733 if (rc) {
2734 pr_err("unable to initialize mdp debugging\n");
2735 goto probe_done;
2736 }
2737 rc = mdss_mdp_scaler_init(mdata, &pdev->dev);
2738 if (rc)
2739 goto probe_done;
2740
2741 rc = mdss_mdp_register_sysfs(mdata);
2742 if (rc)
2743 pr_err("unable to register mdp sysfs nodes\n");
2744
2745 rc = mdss_fb_register_mdp_instance(&mdp5);
2746 if (rc)
2747 pr_err("unable to register mdp instance\n");
2748
2749 rc = mdss_res->mdss_util->register_irq(&mdss_mdp_hw);
2750 if (rc)
2751 pr_err("mdss_register_irq failed.\n");
2752
2753 rc = mdss_smmu_init(mdata, &pdev->dev);
2754 if (rc)
2755 pr_err("mdss smmu init failed\n");
2756
2757 mdss_mdp_set_supported_formats(mdata);
2758
2759 mdss_res->mdss_util->mdp_probe_done = true;
2760
2761 mdss_hw_init(mdata);
2762
2763 rc = mdss_mdp_pp_init(&pdev->dev);
2764 if (rc)
2765 pr_err("unable to initialize mdss pp resources\n");
2766
2767 /* Restoring Secure configuration during boot-up */
2768 if (mdss_mdp_req_init_restore_cfg(mdata))
2769 __mdss_restore_sec_cfg(mdata);
2770
2771 if (mdss_has_quirk(mdata, MDSS_QUIRK_BWCPANIC)) {
2772 mdata->default_panic_lut0 = readl_relaxed(mdata->mdp_base +
2773 MMSS_MDP_PANIC_LUT0);
2774 mdata->default_panic_lut1 = readl_relaxed(mdata->mdp_base +
2775 MMSS_MDP_PANIC_LUT1);
2776 mdata->default_robust_lut = readl_relaxed(mdata->mdp_base +
2777 MMSS_MDP_ROBUST_LUT);
2778 }
2779
2780 /*
2781 * Read the DISP_INTF_SEL register to check if display was enabled in
2782 * bootloader or not. If yes, let handoff handle removing the extra
2783 * clk/regulator votes else turn off clk/regulators because purpose
2784 * here is to get mdp_rev.
2785 */
2786 intf_sel = readl_relaxed(mdata->mdp_base +
2787 MDSS_MDP_REG_DISP_INTF_SEL);
2788 split_display = readl_relaxed(mdata->mdp_base +
2789 MDSS_MDP_REG_SPLIT_DISPLAY_EN);
2790 mdata->splash_intf_sel = intf_sel;
2791 mdata->splash_split_disp = split_display;
2792
2793 if (intf_sel != 0) {
2794 for (i = 0; i < 4; i++)
2795 if ((intf_sel >> i*8) & 0x000000FF)
2796 num_of_display_on++;
2797
2798 /*
2799 * For split display enabled - DSI0, DSI1 interfaces are
2800 * considered as single display. So decrement
2801 * 'num_of_display_on' by 1
2802 */
2803 if (split_display)
2804 num_of_display_on--;
2805 }
2806 if (!num_of_display_on) {
2807 mdss_mdp_footswitch_ctrl_splash(false);
2808 msm_bus_scale_client_update_request(
2809 mdata->bus_hdl, 0);
2810 mdata->ao_bw_uc_idx = 0;
2811 } else {
2812 mdata->handoff_pending = true;
2813 /*
2814 * If multiple displays are enabled in LK, ctrl_splash off will
2815 * be called multiple times during splash_cleanup. Need to
2816 * enable it symmetrically
2817 */
2818 for (i = 1; i < num_of_display_on; i++)
2819 mdss_mdp_footswitch_ctrl_splash(true);
2820 }
2821
2822 mdp_intr_cb = kcalloc(ARRAY_SIZE(mdp_irq_map),
2823 sizeof(struct intr_callback), GFP_KERNEL);
2824 if (mdp_intr_cb == NULL)
2825 return -ENOMEM;
2826
2827 mdss_res->mdp_irq_mask = kcalloc(ARRAY_SIZE(mdp_intr_reg),
2828 sizeof(u32), GFP_KERNEL);
2829 if (mdss_res->mdp_irq_mask == NULL)
2830 return -ENOMEM;
2831
2832 pr_info("mdss version = 0x%x, bootloader display is %s, num %d, intf_sel=0x%08x\n",
2833 mdata->mdp_rev, num_of_display_on ? "on" : "off",
2834 num_of_display_on, intf_sel);
2835
2836probe_done:
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302837 if (IS_ERR_VALUE((unsigned long)rc)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302838 if (!num_of_display_on)
2839 mdss_mdp_footswitch_ctrl_splash(false);
2840
2841 if (mdata->regulator_notif_register)
2842 regulator_unregister_notifier(mdata->fs,
2843 &(mdata->gdsc_cb));
2844 mdss_mdp_hw.ptr = NULL;
2845 mdss_mdp_pp_term(&pdev->dev);
2846 mutex_destroy(&mdata->reg_lock);
2847 mdss_res = NULL;
2848 }
2849
2850 return rc;
2851}
2852
Sachin Bhayare5076e252018-01-18 14:56:45 +05302853static void mdss_mdp_parse_dt_regs_array(const u32 *arr,
2854 struct mdss_io_data *io, struct mdss_hw_settings *hws, int count)
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302855{
2856 u32 len, reg;
2857 int i;
2858
2859 if (!arr)
2860 return;
2861
2862 for (i = 0, len = count * 2; i < len; i += 2) {
2863 reg = be32_to_cpu(arr[i]);
2864 if (reg >= io->len)
2865 continue;
2866
2867 hws->reg = io->base + reg;
2868 hws->val = be32_to_cpu(arr[i + 1]);
2869 pr_debug("reg: 0x%04x=0x%08x\n", reg, hws->val);
2870 hws++;
2871 }
2872}
2873
2874int mdss_mdp_parse_dt_hw_settings(struct platform_device *pdev)
2875{
2876 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
2877 struct mdss_hw_settings *hws;
2878 const u32 *vbif_arr, *mdp_arr, *vbif_nrt_arr;
2879 int vbif_len, mdp_len, vbif_nrt_len;
2880
2881 vbif_arr = of_get_property(pdev->dev.of_node, "qcom,vbif-settings",
2882 &vbif_len);
2883 if (!vbif_arr || (vbif_len & 1)) {
2884 pr_debug("MDSS VBIF settings not found\n");
2885 vbif_len = 0;
2886 }
2887 vbif_len /= 2 * sizeof(u32);
2888
2889 vbif_nrt_arr = of_get_property(pdev->dev.of_node,
2890 "qcom,vbif-nrt-settings", &vbif_nrt_len);
2891 if (!vbif_nrt_arr || (vbif_nrt_len & 1)) {
2892 pr_debug("MDSS VBIF non-realtime settings not found\n");
2893 vbif_nrt_len = 0;
2894 }
2895 vbif_nrt_len /= 2 * sizeof(u32);
2896
2897 mdp_arr = of_get_property(pdev->dev.of_node, "qcom,mdp-settings",
2898 &mdp_len);
2899 if (!mdp_arr || (mdp_len & 1)) {
2900 pr_debug("MDSS MDP settings not found\n");
2901 mdp_len = 0;
2902 }
2903 mdp_len /= 2 * sizeof(u32);
2904
2905 if (!(mdp_len + vbif_len + vbif_nrt_len))
2906 return 0;
2907
2908 hws = devm_kzalloc(&pdev->dev, sizeof(*hws) * (vbif_len + mdp_len +
2909 vbif_nrt_len + 1), GFP_KERNEL);
2910 if (!hws)
2911 return -ENOMEM;
2912
2913 mdss_mdp_parse_dt_regs_array(vbif_arr, &mdata->vbif_io,
2914 hws, vbif_len);
2915 mdss_mdp_parse_dt_regs_array(vbif_nrt_arr, &mdata->vbif_nrt_io,
2916 hws, vbif_nrt_len);
2917 mdss_mdp_parse_dt_regs_array(mdp_arr, &mdata->mdss_io,
2918 hws + vbif_len, mdp_len);
2919
2920 mdata->hw_settings = hws;
2921
2922 return 0;
2923}
2924
2925static int mdss_mdp_parse_dt(struct platform_device *pdev)
2926{
2927 int rc, data;
2928 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
2929
2930 rc = mdss_mdp_parse_dt_hw_settings(pdev);
2931 if (rc) {
2932 pr_err("Error in device tree : hw settings\n");
2933 return rc;
2934 }
2935
2936 rc = mdss_mdp_parse_dt_pipe(pdev);
2937 if (rc) {
2938 pr_err("Error in device tree : pipes\n");
2939 return rc;
2940 }
2941
2942 rc = mdss_mdp_parse_dt_mixer(pdev);
2943 if (rc) {
2944 pr_err("Error in device tree : mixers\n");
2945 return rc;
2946 }
2947
2948 rc = mdss_mdp_parse_dt_misc(pdev);
2949 if (rc) {
2950 pr_err("Error in device tree : misc\n");
2951 return rc;
2952 }
2953
2954 rc = mdss_mdp_parse_dt_wb(pdev);
2955 if (rc) {
2956 pr_err("Error in device tree : wb\n");
2957 return rc;
2958 }
2959
2960 rc = mdss_mdp_parse_dt_ctl(pdev);
2961 if (rc) {
2962 pr_err("Error in device tree : ctl\n");
2963 return rc;
2964 }
2965
2966 rc = mdss_mdp_parse_dt_video_intf(pdev);
2967 if (rc) {
2968 pr_err("Error in device tree : ctl\n");
2969 return rc;
2970 }
2971
2972 rc = mdss_mdp_parse_dt_smp(pdev);
2973 if (rc) {
2974 pr_err("Error in device tree : smp\n");
2975 return rc;
2976 }
2977
2978 rc = mdss_mdp_parse_dt_prefill(pdev);
2979 if (rc) {
2980 pr_err("Error in device tree : prefill\n");
2981 return rc;
2982 }
2983
2984 rc = mdss_mdp_parse_dt_ad_cfg(pdev);
2985 if (rc) {
2986 pr_err("Error in device tree : ad\n");
2987 return rc;
2988 }
2989
2990 rc = mdss_mdp_parse_dt_cdm(pdev);
2991 if (rc)
2992 pr_debug("CDM offset not found in device tree\n");
2993
2994 rc = mdss_mdp_parse_dt_dsc(pdev);
2995 if (rc)
2996 pr_debug("DSC offset not found in device tree\n");
2997
2998 /* Parse the mdp specific register base offset*/
2999 rc = of_property_read_u32(pdev->dev.of_node,
3000 "qcom,mdss-mdp-reg-offset", &data);
3001 if (rc) {
3002 pr_err("Error in device tree : mdp reg base\n");
3003 return rc;
3004 }
3005 mdata->mdp_base = mdata->mdss_io.base + data;
3006 return 0;
3007}
3008
3009static void mdss_mdp_parse_dt_pipe_sw_reset(struct platform_device *pdev,
3010 u32 reg_off, char *prop_name, struct mdss_mdp_pipe *pipe_list,
3011 u32 npipes)
3012{
3013 int len;
3014 const u32 *arr;
3015
3016 arr = of_get_property(pdev->dev.of_node, prop_name, &len);
3017 if (arr) {
3018 int i;
3019
3020 len /= sizeof(u32);
3021 if (len != npipes) {
3022 pr_err("%s: invalid sw_reset entries req:%d found:%d\n",
3023 prop_name, len, npipes);
3024 return;
3025 }
3026
3027 for (i = 0; i < len; i++) {
3028 pipe_list[i].sw_reset.reg_off = reg_off;
3029 pipe_list[i].sw_reset.bit_off = be32_to_cpu(arr[i]);
3030
3031 pr_debug("%s[%d]: sw_reset: reg_off:0x%x bit_off:%d\n",
3032 prop_name, i, reg_off, be32_to_cpu(arr[i]));
3033 }
3034 }
3035}
3036
3037static int mdss_mdp_parse_dt_pipe_clk_ctrl(struct platform_device *pdev,
3038 char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes)
3039{
3040 int rc = 0, len;
3041 const u32 *arr;
3042
3043 arr = of_get_property(pdev->dev.of_node, prop_name, &len);
3044 if (arr) {
3045 int i, j;
3046
3047 len /= sizeof(u32);
3048 for (i = 0, j = 0; i < len; j++) {
3049 struct mdss_mdp_pipe *pipe = NULL;
3050
3051 if (j >= npipes) {
3052 pr_err("invalid clk ctrl enries for prop: %s\n",
3053 prop_name);
3054 return -EINVAL;
3055 }
3056
3057 pipe = &pipe_list[j];
3058
3059 pipe->clk_ctrl.reg_off = be32_to_cpu(arr[i++]);
3060 pipe->clk_ctrl.bit_off = be32_to_cpu(arr[i++]);
3061
3062 /* status register is next in line to ctrl register */
3063 pipe->clk_status.reg_off = pipe->clk_ctrl.reg_off + 4;
3064 pipe->clk_status.bit_off = be32_to_cpu(arr[i++]);
3065
3066 pr_debug("%s[%d]: ctrl: reg_off: 0x%x bit_off: %d\n",
3067 prop_name, j, pipe->clk_ctrl.reg_off,
3068 pipe->clk_ctrl.bit_off);
3069 pr_debug("%s[%d]: status: reg_off: 0x%x bit_off: %d\n",
3070 prop_name, j, pipe->clk_status.reg_off,
3071 pipe->clk_status.bit_off);
3072 }
3073 if (j != npipes) {
3074 pr_err("%s: %d entries found. required %d\n",
3075 prop_name, j, npipes);
3076 for (i = 0; i < npipes; i++) {
3077 memset(&pipe_list[i].clk_ctrl, 0,
3078 sizeof(pipe_list[i].clk_ctrl));
3079 memset(&pipe_list[i].clk_status, 0,
3080 sizeof(pipe_list[i].clk_status));
3081 }
3082 rc = -EINVAL;
3083 }
3084 } else {
3085 pr_err("error mandatory property '%s' not found\n", prop_name);
3086 rc = -EINVAL;
3087 }
3088
3089 return rc;
3090}
3091
3092static void mdss_mdp_parse_dt_pipe_panic_ctrl(struct platform_device *pdev,
3093 char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes)
3094{
3095 int i, j;
3096 int len;
3097 const u32 *arr;
3098 struct mdss_mdp_pipe *pipe = NULL;
3099
3100 arr = of_get_property(pdev->dev.of_node, prop_name, &len);
3101 if (arr) {
3102 len /= sizeof(u32);
3103 for (i = 0, j = 0; i < len; j++) {
3104 if (j >= npipes) {
3105 pr_err("invalid panic ctrl enries for prop: %s\n",
3106 prop_name);
3107 return;
3108 }
3109
3110 pipe = &pipe_list[j];
3111 pipe->panic_ctrl_ndx = be32_to_cpu(arr[i++]);
3112 }
3113 if (j != npipes)
3114 pr_err("%s: %d entries found. required %d\n",
3115 prop_name, j, npipes);
3116 } else {
3117 pr_debug("panic ctrl enabled but property '%s' not found\n",
3118 prop_name);
3119 }
3120}
3121
3122static int mdss_mdp_parse_dt_pipe_helper(struct platform_device *pdev,
3123 u32 ptype, char *ptypestr,
3124 struct mdss_mdp_pipe **out_plist,
3125 size_t len,
3126 u8 priority_base)
3127{
3128 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3129 u32 offsets[MDSS_MDP_MAX_SSPP];
3130 u32 ftch_id[MDSS_MDP_MAX_SSPP];
3131 u32 xin_id[MDSS_MDP_MAX_SSPP];
3132 u32 pnums[MDSS_MDP_MAX_SSPP];
3133 struct mdss_mdp_pipe *pipe_list;
3134 char prop_name[64];
3135 int i, cnt, rc;
3136 u32 rects_per_sspp;
3137
3138 if (!out_plist)
3139 return -EINVAL;
3140
3141 for (i = 0, cnt = 0; i < MDSS_MDP_MAX_SSPP && cnt < len; i++) {
3142 if (ptype == get_pipe_type_from_num(i)) {
3143 pnums[cnt] = i;
3144 cnt++;
3145 }
3146 }
3147
3148 if (cnt < len)
3149 pr_warn("Invalid %s pipe count: %zu, max supported: %d\n",
3150 ptypestr, len, cnt);
3151 if (cnt == 0) {
3152 *out_plist = NULL;
3153
3154 return 0;
3155 }
3156
3157 /* by default works in single rect mode unless otherwise noted */
3158 rects_per_sspp = mdata->rects_per_sspp[ptype] ? : 1;
3159
3160 pipe_list = devm_kzalloc(&pdev->dev,
3161 (sizeof(struct mdss_mdp_pipe) * cnt * rects_per_sspp),
3162 GFP_KERNEL);
3163 if (!pipe_list)
3164 return -ENOMEM;
3165
3166 if (mdata->has_pixel_ram || (ptype == MDSS_MDP_PIPE_TYPE_CURSOR)) {
3167 for (i = 0; i < cnt; i++)
3168 ftch_id[i] = -1;
3169 } else {
3170 snprintf(prop_name, sizeof(prop_name),
3171 "qcom,mdss-pipe-%s-fetch-id", ptypestr);
3172 rc = mdss_mdp_parse_dt_handler(pdev, prop_name, ftch_id,
3173 cnt);
3174 if (rc)
3175 goto parse_fail;
3176 }
3177
3178 snprintf(prop_name, sizeof(prop_name),
3179 "qcom,mdss-pipe-%s-xin-id", ptypestr);
3180 rc = mdss_mdp_parse_dt_handler(pdev, prop_name, xin_id, cnt);
3181 if (rc)
3182 goto parse_fail;
3183
3184 snprintf(prop_name, sizeof(prop_name),
3185 "qcom,mdss-pipe-%s-off", ptypestr);
3186 rc = mdss_mdp_parse_dt_handler(pdev, prop_name, offsets, cnt);
3187 if (rc)
3188 goto parse_fail;
3189
3190 rc = mdss_mdp_pipe_addr_setup(mdata, pipe_list, offsets, ftch_id,
3191 xin_id, ptype, pnums, cnt, rects_per_sspp,
3192 priority_base);
3193 if (rc)
3194 goto parse_fail;
3195
3196 snprintf(prop_name, sizeof(prop_name),
3197 "qcom,mdss-pipe-%s-clk-ctrl-offsets", ptypestr);
3198 rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev, prop_name,
3199 pipe_list, cnt);
3200 if (rc)
3201 goto parse_fail;
3202
3203 *out_plist = pipe_list;
3204
3205 return cnt;
3206parse_fail:
3207 devm_kfree(&pdev->dev, pipe_list);
3208
3209 return rc;
3210}
3211
3212static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev)
3213{
3214 int rc = 0;
3215 u32 nfids = 0, len, nxids = 0, npipes = 0;
3216 u32 sw_reset_offset = 0;
3217 u32 data[4];
3218
3219 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3220
3221 mdata->has_pixel_ram = !mdss_mdp_parse_dt_prop_len(pdev,
3222 "qcom,mdss-smp-data");
3223
3224 mdata->nvig_pipes = mdss_mdp_parse_dt_prop_len(pdev,
3225 "qcom,mdss-pipe-vig-off");
3226 mdata->nrgb_pipes = mdss_mdp_parse_dt_prop_len(pdev,
3227 "qcom,mdss-pipe-rgb-off");
3228 mdata->ndma_pipes = mdss_mdp_parse_dt_prop_len(pdev,
3229 "qcom,mdss-pipe-dma-off");
3230 mdata->ncursor_pipes = mdss_mdp_parse_dt_prop_len(pdev,
3231 "qcom,mdss-pipe-cursor-off");
3232
3233 npipes = mdata->nvig_pipes + mdata->nrgb_pipes + mdata->ndma_pipes;
3234
3235 if (!mdata->has_pixel_ram) {
3236 nfids += mdss_mdp_parse_dt_prop_len(pdev,
3237 "qcom,mdss-pipe-vig-fetch-id");
3238 nfids += mdss_mdp_parse_dt_prop_len(pdev,
3239 "qcom,mdss-pipe-rgb-fetch-id");
3240 nfids += mdss_mdp_parse_dt_prop_len(pdev,
3241 "qcom,mdss-pipe-dma-fetch-id");
3242 if (npipes != nfids) {
3243 pr_err("device tree err: unequal number of pipes and smp ids");
3244 return -EINVAL;
3245 }
3246 }
3247
3248 if (mdata->nvig_pipes)
3249 nxids += mdss_mdp_parse_dt_prop_len(pdev,
3250 "qcom,mdss-pipe-vig-xin-id");
3251 if (mdata->nrgb_pipes)
3252 nxids += mdss_mdp_parse_dt_prop_len(pdev,
3253 "qcom,mdss-pipe-rgb-xin-id");
3254 if (mdata->ndma_pipes)
3255 nxids += mdss_mdp_parse_dt_prop_len(pdev,
3256 "qcom,mdss-pipe-dma-xin-id");
3257 if (npipes != nxids) {
3258 pr_err("device tree err: unequal number of pipes and xin ids\n");
3259 return -EINVAL;
3260 }
3261
3262 rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_VIG, "vig",
3263 &mdata->vig_pipes, mdata->nvig_pipes, 0);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303264 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303265 goto parse_fail;
3266 mdata->nvig_pipes = rc;
3267
3268 rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_RGB, "rgb",
3269 &mdata->rgb_pipes, mdata->nrgb_pipes,
3270 mdata->nvig_pipes);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303271 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303272 goto parse_fail;
3273 mdata->nrgb_pipes = rc;
3274
3275 rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_DMA, "dma",
3276 &mdata->dma_pipes, mdata->ndma_pipes,
3277 mdata->nvig_pipes + mdata->nrgb_pipes);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303278 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303279 goto parse_fail;
3280 mdata->ndma_pipes = rc;
3281
3282 rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_CURSOR,
3283 "cursor", &mdata->cursor_pipes, mdata->ncursor_pipes,
3284 0);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303285 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303286 goto parse_fail;
3287 mdata->ncursor_pipes = rc;
3288
3289 rc = 0;
3290
3291 mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-sw-reset-off",
3292 &sw_reset_offset, 1);
3293 if (sw_reset_offset) {
3294 if (mdata->vig_pipes)
3295 mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
3296 "qcom,mdss-pipe-vig-sw-reset-map",
3297 mdata->vig_pipes, mdata->nvig_pipes);
3298 if (mdata->rgb_pipes)
3299 mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
3300 "qcom,mdss-pipe-rgb-sw-reset-map",
3301 mdata->rgb_pipes, mdata->nrgb_pipes);
3302 if (mdata->dma_pipes)
3303 mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
3304 "qcom,mdss-pipe-dma-sw-reset-map",
3305 mdata->dma_pipes, mdata->ndma_pipes);
3306 }
3307
3308 mdata->has_panic_ctrl = of_property_read_bool(pdev->dev.of_node,
3309 "qcom,mdss-has-panic-ctrl");
3310 if (mdata->has_panic_ctrl) {
3311 if (mdata->vig_pipes)
3312 mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
3313 "qcom,mdss-pipe-vig-panic-ctrl-offsets",
3314 mdata->vig_pipes, mdata->nvig_pipes);
3315 if (mdata->rgb_pipes)
3316 mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
3317 "qcom,mdss-pipe-rgb-panic-ctrl-offsets",
3318 mdata->rgb_pipes, mdata->nrgb_pipes);
3319 if (mdata->dma_pipes)
3320 mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
3321 "qcom,mdss-pipe-dma-panic-ctrl-offsets",
3322 mdata->dma_pipes, mdata->ndma_pipes);
3323 }
3324
3325 len = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-per-pipe-panic-luts");
3326 if (len != 4) {
3327 pr_debug("Unable to read per-pipe-panic-luts\n");
3328 } else {
3329 rc = mdss_mdp_parse_dt_handler(pdev,
3330 "qcom,mdss-per-pipe-panic-luts", data, len);
3331 mdata->default_panic_lut_per_pipe_linear = data[0];
3332 mdata->default_panic_lut_per_pipe_tile = data[1];
3333 mdata->default_robust_lut_per_pipe_linear = data[2];
3334 mdata->default_robust_lut_per_pipe_tile = data[3];
3335 pr_debug("per pipe panic lut [0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x\n",
3336 data[0], data[1], data[2], data[3]);
3337 }
3338
3339parse_fail:
3340 return rc;
3341}
3342
3343static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev)
3344{
3345
3346 u32 nmixers, npingpong;
3347 int rc = 0;
3348 u32 *mixer_offsets = NULL, *dspp_offsets = NULL,
3349 *pingpong_offsets = NULL;
3350 u32 is_virtual_mixer_req = false;
3351
3352 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3353
3354 mdata->nmixers_intf = mdss_mdp_parse_dt_prop_len(pdev,
3355 "qcom,mdss-mixer-intf-off");
3356 mdata->nmixers_wb = mdss_mdp_parse_dt_prop_len(pdev,
3357 "qcom,mdss-mixer-wb-off");
3358 mdata->ndspp = mdss_mdp_parse_dt_prop_len(pdev,
3359 "qcom,mdss-dspp-off");
3360 npingpong = mdss_mdp_parse_dt_prop_len(pdev,
3361 "qcom,mdss-pingpong-off");
3362 nmixers = mdata->nmixers_intf + mdata->nmixers_wb;
3363
3364 rc = of_property_read_u32(pdev->dev.of_node,
3365 "qcom,max-mixer-width", &mdata->max_mixer_width);
3366 if (rc) {
3367 pr_err("device tree err: failed to get max mixer width\n");
3368 return -EINVAL;
3369 }
3370
3371 if (mdata->nmixers_intf < mdata->ndspp) {
3372 pr_err("device tree err: no of dspp are greater than intf mixers\n");
3373 return -EINVAL;
3374 }
3375
3376 if (mdata->nmixers_intf != npingpong) {
3377 pr_err("device tree err: unequal no of pingpong and intf mixers\n");
3378 return -EINVAL;
3379 }
3380
3381 mixer_offsets = kcalloc(nmixers, sizeof(u32), GFP_KERNEL);
3382 if (!mixer_offsets)
3383 return -ENOMEM;
3384
3385 dspp_offsets = kcalloc(mdata->ndspp, sizeof(u32), GFP_KERNEL);
3386 if (!dspp_offsets) {
3387 rc = -ENOMEM;
3388 goto dspp_alloc_fail;
3389 }
3390 pingpong_offsets = kcalloc(npingpong, sizeof(u32), GFP_KERNEL);
3391 if (!pingpong_offsets) {
3392 rc = -ENOMEM;
3393 goto pingpong_alloc_fail;
3394 }
3395
3396 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-intf-off",
3397 mixer_offsets, mdata->nmixers_intf);
3398 if (rc)
3399 goto parse_done;
3400
3401 mdata->has_separate_rotator = of_property_read_bool(pdev->dev.of_node,
3402 "qcom,mdss-has-separate-rotator");
3403 if (mdata->nmixers_wb) {
3404 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-wb-off",
3405 mixer_offsets + mdata->nmixers_intf,
3406 mdata->nmixers_wb);
3407 if (rc)
3408 goto parse_done;
3409 } else if (!mdata->has_separate_rotator) {
3410 /*
3411 * If writeback mixers are not available, put the number of
3412 * writeback mixers equal to number of DMA pipes so that
3413 * later same number of virtual writeback mixers can be
3414 * allocated.
3415 */
3416 mdata->nmixers_wb = mdata->ndma_pipes;
3417 is_virtual_mixer_req = true;
3418 }
3419
3420 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dspp-off",
3421 dspp_offsets, mdata->ndspp);
3422 if (rc)
3423 goto parse_done;
3424
3425 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pingpong-off",
3426 pingpong_offsets, npingpong);
3427 if (rc)
3428 goto parse_done;
3429
3430 rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets,
3431 dspp_offsets, pingpong_offsets,
3432 MDSS_MDP_MIXER_TYPE_INTF, mdata->nmixers_intf);
3433 if (rc)
3434 goto parse_done;
3435
3436 if (mdata->nmixers_wb) {
3437 if (is_virtual_mixer_req) {
3438 /*
3439 * Replicate last interface mixers based on number of
3440 * dma pipes available as virtual writeback mixers.
3441 */
3442 rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets +
3443 mdata->nmixers_intf - mdata->ndma_pipes,
3444 NULL, NULL, MDSS_MDP_MIXER_TYPE_WRITEBACK,
3445 mdata->nmixers_wb);
3446 if (rc)
3447 goto parse_done;
3448 } else {
3449 rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets +
3450 mdata->nmixers_intf, NULL, NULL,
3451 MDSS_MDP_MIXER_TYPE_WRITEBACK,
3452 mdata->nmixers_wb);
3453 if (rc)
3454 goto parse_done;
3455 }
3456 }
3457
3458parse_done:
3459 kfree(pingpong_offsets);
3460pingpong_alloc_fail:
3461 kfree(dspp_offsets);
3462dspp_alloc_fail:
3463 kfree(mixer_offsets);
3464
3465 return rc;
3466}
3467
3468static int mdss_mdp_cdm_addr_setup(struct mdss_data_type *mdata,
3469 u32 *cdm_offsets, u32 len)
3470{
3471 struct mdss_mdp_cdm *head;
3472 u32 i = 0;
3473
3474 head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_cdm) *
3475 len, GFP_KERNEL);
3476 if (!head)
3477 return -ENOMEM;
3478
3479 for (i = 0; i < len; i++) {
3480 head[i].num = i;
3481 head[i].base = (mdata->mdss_io.base) + cdm_offsets[i];
3482 atomic_set(&head[i].kref.refcount, 0);
3483 mutex_init(&head[i].lock);
3484 init_completion(&head[i].free_comp);
3485 pr_debug("%s: cdm off (%d) = %pK\n", __func__, i, head[i].base);
3486 }
3487
3488 mdata->cdm_off = head;
3489 mutex_init(&mdata->cdm_lock);
3490 return 0;
3491}
3492
3493static int mdss_mdp_parse_dt_cdm(struct platform_device *pdev)
3494{
3495 int rc = 0;
3496 u32 *cdm_offsets = NULL;
3497 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3498
3499 mdata->ncdm = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-cdm-off");
3500
3501 if (!mdata->ncdm) {
3502 rc = 0;
3503 pr_debug("%s: No CDM offsets present in DT\n", __func__);
3504 goto end;
3505 }
3506 pr_debug("%s: cdm len == %d\n", __func__, mdata->ncdm);
3507 cdm_offsets = kcalloc(mdata->ncdm, sizeof(u32), GFP_KERNEL);
3508 if (!cdm_offsets) {
3509 rc = -ENOMEM;
3510 mdata->ncdm = 0;
3511 goto end;
3512 }
3513
3514 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-cdm-off", cdm_offsets,
3515 mdata->ncdm);
3516 if (rc) {
3517 pr_err("device tree err: failed to get cdm offsets\n");
3518 goto fail;
3519 }
3520
3521 rc = mdss_mdp_cdm_addr_setup(mdata, cdm_offsets, mdata->ncdm);
3522 if (rc) {
3523 pr_err("%s: CDM address setup failed\n", __func__);
3524 goto fail;
3525 }
3526
3527fail:
3528 kfree(cdm_offsets);
3529 if (rc)
3530 mdata->ncdm = 0;
3531end:
3532 return rc;
3533}
3534
3535static int mdss_mdp_dsc_addr_setup(struct mdss_data_type *mdata,
3536 u32 *dsc_offsets, u32 len)
3537{
3538 struct mdss_mdp_dsc *head;
3539 u32 i = 0;
3540
3541 head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_dsc) *
3542 len, GFP_KERNEL);
3543 if (!head)
3544 return -ENOMEM;
3545
3546 for (i = 0; i < len; i++) {
3547 head[i].num = i;
3548 head[i].base = (mdata->mdss_io.base) + dsc_offsets[i];
3549 pr_debug("dsc off (%d) = %pK\n", i, head[i].base);
3550 }
3551
3552 mdata->dsc_off = head;
3553 return 0;
3554}
3555
3556static int mdss_mdp_parse_dt_dsc(struct platform_device *pdev)
3557{
3558 int rc = 0;
3559 u32 *dsc_offsets = NULL;
3560 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3561
3562 mdata->ndsc = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-dsc-off");
3563 if (!mdata->ndsc) {
3564 rc = 0;
3565 pr_debug("No DSC offsets present in DT\n");
3566 goto end;
3567 }
3568 pr_debug("dsc len == %d\n", mdata->ndsc);
3569
3570 dsc_offsets = kcalloc(mdata->ndsc, sizeof(u32), GFP_KERNEL);
3571 if (!dsc_offsets) {
3572 rc = -ENOMEM;
3573 mdata->ndsc = 0;
3574 goto end;
3575 }
3576
3577 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dsc-off", dsc_offsets,
3578 mdata->ndsc);
3579 if (rc) {
3580 pr_err("device tree err: failed to get cdm offsets\n");
3581 goto fail;
3582 }
3583
3584 rc = mdss_mdp_dsc_addr_setup(mdata, dsc_offsets, mdata->ndsc);
3585 if (rc) {
3586 pr_err("%s: DSC address setup failed\n", __func__);
3587 goto fail;
3588 }
3589
3590fail:
3591 kfree(dsc_offsets);
3592 if (rc)
3593 mdata->ndsc = 0;
3594end:
3595 return rc;
3596}
3597
3598static int mdss_mdp_parse_dt_wb(struct platform_device *pdev)
3599{
3600 int rc = 0;
3601 u32 *wb_offsets = NULL;
3602 u32 num_wb_mixer, nwb_offsets, num_intf_wb = 0;
3603 const char *wfd_data;
3604 struct mdss_data_type *mdata;
3605
3606 mdata = platform_get_drvdata(pdev);
3607
3608 num_wb_mixer = mdata->nmixers_wb;
3609
3610 wfd_data = of_get_property(pdev->dev.of_node,
3611 "qcom,mdss-wfd-mode", NULL);
3612 if (wfd_data && strcmp(wfd_data, "shared") != 0)
3613 num_intf_wb = 1;
3614
3615 nwb_offsets = mdss_mdp_parse_dt_prop_len(pdev,
3616 "qcom,mdss-wb-off");
3617
3618 wb_offsets = kcalloc(nwb_offsets, sizeof(u32), GFP_KERNEL);
3619 if (!wb_offsets)
3620 return -ENOMEM;
3621
3622 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-wb-off",
3623 wb_offsets, nwb_offsets);
3624 if (rc)
3625 goto wb_parse_done;
3626
3627 rc = mdss_mdp_wb_addr_setup(mdata, num_wb_mixer, num_intf_wb);
3628 if (rc)
3629 goto wb_parse_done;
3630
3631 mdata->nwb_offsets = nwb_offsets;
3632 mdata->wb_offsets = wb_offsets;
3633
3634 return 0;
3635
3636wb_parse_done:
3637 kfree(wb_offsets);
3638 return rc;
3639}
3640
3641static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev)
3642{
3643 int rc = 0;
3644 u32 *ctl_offsets = NULL;
3645
3646 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3647
3648 mdata->nctl = mdss_mdp_parse_dt_prop_len(pdev,
3649 "qcom,mdss-ctl-off");
3650
3651 if (mdata->nctl < mdata->nwb) {
3652 pr_err("device tree err: number of ctl greater than wb\n");
3653 rc = -EINVAL;
3654 goto parse_done;
3655 }
3656
3657 ctl_offsets = kcalloc(mdata->nctl, sizeof(u32), GFP_KERNEL);
3658 if (!ctl_offsets)
3659 return -ENOMEM;
3660
3661 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ctl-off",
3662 ctl_offsets, mdata->nctl);
3663 if (rc)
3664 goto parse_done;
3665
3666 rc = mdss_mdp_ctl_addr_setup(mdata, ctl_offsets, mdata->nctl);
3667 if (rc)
3668 goto parse_done;
3669
3670parse_done:
3671 kfree(ctl_offsets);
3672
3673 return rc;
3674}
3675
3676static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev)
3677{
3678 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3679 u32 count;
3680 u32 *offsets;
3681 int rc;
3682
3683
3684 count = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-intf-off");
3685 if (count == 0)
3686 return -EINVAL;
3687
3688 offsets = kcalloc(count, sizeof(u32), GFP_KERNEL);
3689 if (!offsets)
3690 return -ENOMEM;
3691
3692 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-intf-off",
3693 offsets, count);
3694 if (rc)
3695 goto parse_fail;
3696
3697 rc = mdss_mdp_video_addr_setup(mdata, offsets, count);
3698 if (rc)
3699 pr_err("unable to setup video interfaces\n");
3700
3701parse_fail:
3702 kfree(offsets);
3703
3704 return rc;
3705}
3706
3707static int mdss_mdp_update_smp_map(struct platform_device *pdev,
3708 const u32 *data, int len, int pipe_cnt,
3709 struct mdss_mdp_pipe *pipes)
3710{
3711 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3712 int i, j, k;
3713 u32 cnt, mmb;
3714
3715 len /= sizeof(u32);
3716 for (i = 0, k = 0; i < len; k++) {
3717 struct mdss_mdp_pipe *pipe = NULL;
3718
3719 if (k >= pipe_cnt) {
3720 pr_err("invalid fixed mmbs\n");
3721 return -EINVAL;
3722 }
3723
3724 pipe = &pipes[k];
3725
3726 cnt = be32_to_cpu(data[i++]);
3727 if (cnt == 0)
3728 continue;
3729
3730 for (j = 0; j < cnt; j++) {
3731 mmb = be32_to_cpu(data[i++]);
3732 if (mmb > mdata->smp_mb_cnt) {
3733 pr_err("overflow mmb:%d pipe:%d: max:%d\n",
3734 mmb, k, mdata->smp_mb_cnt);
3735 return -EINVAL;
3736 }
3737 set_bit(mmb, pipe->smp_map[0].fixed);
3738 }
3739 if (bitmap_intersects(pipe->smp_map[0].fixed,
3740 mdata->mmb_alloc_map,
3741 mdata->smp_mb_cnt)) {
3742 pr_err("overlapping fixed mmb map\n");
3743 return -EINVAL;
3744 }
3745 bitmap_or(mdata->mmb_alloc_map, pipe->smp_map[0].fixed,
3746 mdata->mmb_alloc_map, mdata->smp_mb_cnt);
3747 }
3748 return 0;
3749}
3750
3751static int mdss_mdp_parse_dt_smp(struct platform_device *pdev)
3752{
3753 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3754 u32 num;
3755 u32 data[2];
3756 int rc, len;
3757 const u32 *arr;
3758
3759 num = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-smp-data");
3760 /*
3761 * This property is optional for targets with fix pixel ram. Rest
3762 * must provide no. of smp and size of each block.
3763 */
3764 if (!num)
3765 return 0;
3766 else if (num != 2)
3767 return -EINVAL;
3768
3769 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-smp-data", data, num);
3770 if (rc)
3771 return rc;
3772
3773 rc = mdss_mdp_smp_setup(mdata, data[0], data[1]);
3774
3775 if (rc) {
3776 pr_err("unable to setup smp data\n");
3777 return rc;
3778 }
3779
3780 rc = of_property_read_u32(pdev->dev.of_node,
3781 "qcom,mdss-smp-mb-per-pipe", data);
3782 mdata->smp_mb_per_pipe = (!rc ? data[0] : 0);
3783
3784 rc = 0;
3785 arr = of_get_property(pdev->dev.of_node,
3786 "qcom,mdss-pipe-rgb-fixed-mmb", &len);
3787 if (arr) {
3788 rc = mdss_mdp_update_smp_map(pdev, arr, len,
3789 mdata->nrgb_pipes, mdata->rgb_pipes);
3790
3791 if (rc)
3792 pr_warn("unable to update smp map for RGB pipes\n");
3793 }
3794
3795 arr = of_get_property(pdev->dev.of_node,
3796 "qcom,mdss-pipe-vig-fixed-mmb", &len);
3797 if (arr) {
3798 rc = mdss_mdp_update_smp_map(pdev, arr, len,
3799 mdata->nvig_pipes, mdata->vig_pipes);
3800
3801 if (rc)
3802 pr_warn("unable to update smp map for VIG pipes\n");
3803 }
3804 return rc;
3805}
3806
3807static void mdss_mdp_parse_dt_fudge_factors(struct platform_device *pdev,
3808 char *prop_name, struct mult_factor *ff)
3809{
3810 int rc;
3811 u32 data[2] = {1, 1};
3812
3813 rc = mdss_mdp_parse_dt_handler(pdev, prop_name, data, 2);
3814 if (rc) {
3815 pr_debug("err reading %s\n", prop_name);
3816 } else {
3817 ff->numer = data[0];
3818 ff->denom = data[1];
3819 }
3820}
3821
3822static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev)
3823{
3824 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3825 struct mdss_prefill_data *prefill = &mdata->prefill_data;
3826 int rc;
3827
3828 rc = of_property_read_u32(pdev->dev.of_node,
3829 "qcom,mdss-prefill-outstanding-buffer-bytes",
3830 &prefill->ot_bytes);
3831 if (rc) {
3832 pr_err("prefill outstanding buffer bytes not specified\n");
3833 return rc;
3834 }
3835
3836 rc = of_property_read_u32(pdev->dev.of_node,
3837 "qcom,mdss-prefill-y-buffer-bytes", &prefill->y_buf_bytes);
3838 if (rc) {
3839 pr_err("prefill y buffer bytes not specified\n");
3840 return rc;
3841 }
3842
3843 rc = of_property_read_u32(pdev->dev.of_node,
3844 "qcom,mdss-prefill-scaler-buffer-lines-bilinear",
3845 &prefill->y_scaler_lines_bilinear);
3846 if (rc) {
3847 pr_err("prefill scaler lines for bilinear not specified\n");
3848 return rc;
3849 }
3850
3851 rc = of_property_read_u32(pdev->dev.of_node,
3852 "qcom,mdss-prefill-scaler-buffer-lines-caf",
3853 &prefill->y_scaler_lines_caf);
3854 if (rc) {
3855 pr_debug("prefill scaler lines for caf not specified\n");
3856 return rc;
3857 }
3858
3859 rc = of_property_read_u32(pdev->dev.of_node,
3860 "qcom,mdss-prefill-post-scaler-buffer-pixels",
3861 &prefill->post_scaler_pixels);
3862 if (rc) {
3863 pr_err("prefill post scaler buffer pixels not specified\n");
3864 return rc;
3865 }
3866
3867 rc = of_property_read_u32(pdev->dev.of_node,
3868 "qcom,mdss-prefill-pingpong-buffer-pixels",
3869 &prefill->pp_pixels);
3870 if (rc) {
3871 pr_err("prefill pingpong buffer lines not specified\n");
3872 return rc;
3873 }
3874
3875 rc = of_property_read_u32(pdev->dev.of_node,
3876 "qcom,mdss-prefill-fbc-lines", &prefill->fbc_lines);
3877 if (rc)
3878 pr_debug("prefill FBC lines not specified\n");
3879
3880 return 0;
3881}
3882
3883static void mdss_mdp_parse_vbif_qos(struct platform_device *pdev)
3884{
3885 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3886 int rc;
3887
3888 mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
3889 "qcom,mdss-vbif-qos-rt-setting");
3890 if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) {
3891 mdata->vbif_rt_qos = kcalloc(mdata->npriority_lvl,
3892 sizeof(u32), GFP_KERNEL);
3893 if (!mdata->vbif_rt_qos)
3894 return;
3895
3896 rc = mdss_mdp_parse_dt_handler(pdev,
3897 "qcom,mdss-vbif-qos-rt-setting",
3898 mdata->vbif_rt_qos, mdata->npriority_lvl);
3899 if (rc) {
3900 pr_debug("rt setting not found\n");
3901 return;
3902 }
3903 } else {
3904 mdata->npriority_lvl = 0;
3905 pr_debug("Invalid or no vbif qos rt setting\n");
3906 return;
3907 }
3908
3909 mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
3910 "qcom,mdss-vbif-qos-nrt-setting");
3911 if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) {
3912 mdata->vbif_nrt_qos = kcalloc(mdata->npriority_lvl,
3913 sizeof(u32), GFP_KERNEL);
3914 if (!mdata->vbif_nrt_qos)
3915 return;
3916
3917 rc = mdss_mdp_parse_dt_handler(pdev,
3918 "qcom,mdss-vbif-qos-nrt-setting", mdata->vbif_nrt_qos,
3919 mdata->npriority_lvl);
3920 if (rc) {
3921 pr_debug("nrt setting not found\n");
3922 return;
3923 }
3924 } else {
3925 mdata->npriority_lvl = 0;
3926 pr_debug("Invalid or no vbif qos nrt seting\n");
3927 }
3928}
3929
3930static void mdss_mdp_parse_max_bw_array(const u32 *arr,
3931 struct mdss_max_bw_settings *max_bw_settings, int count)
3932{
3933 int i;
3934
3935 for (i = 0; i < count; i++) {
3936 max_bw_settings->mdss_max_bw_mode = be32_to_cpu(arr[i*2]);
3937 max_bw_settings->mdss_max_bw_val = be32_to_cpu(arr[(i*2)+1]);
3938 max_bw_settings++;
3939 }
3940}
3941
3942static void mdss_mdp_parse_max_bandwidth(struct platform_device *pdev)
3943{
3944 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3945 struct mdss_max_bw_settings *max_bw_settings;
3946 int max_bw_settings_cnt = 0;
3947 const u32 *max_bw;
3948
3949 max_bw = of_get_property(pdev->dev.of_node, "qcom,max-bw-settings",
3950 &max_bw_settings_cnt);
3951
3952 if (!max_bw || !max_bw_settings_cnt) {
3953 pr_debug("MDSS max bandwidth settings not found\n");
3954 return;
3955 }
3956
3957 max_bw_settings_cnt /= 2 * sizeof(u32);
3958
3959 max_bw_settings = devm_kzalloc(&pdev->dev, sizeof(*max_bw_settings)
3960 * max_bw_settings_cnt, GFP_KERNEL);
3961 if (!max_bw_settings)
3962 return;
3963
3964 mdss_mdp_parse_max_bw_array(max_bw, max_bw_settings,
3965 max_bw_settings_cnt);
3966
3967 mdata->max_bw_settings = max_bw_settings;
3968 mdata->max_bw_settings_cnt = max_bw_settings_cnt;
3969}
3970
3971static void mdss_mdp_parse_per_pipe_bandwidth(struct platform_device *pdev)
3972{
3973
3974 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3975 struct mdss_max_bw_settings *max_bw_per_pipe_settings;
3976 int max_bw_settings_cnt = 0;
3977 const u32 *max_bw_settings;
3978 u32 max_bw, min_bw, threshold, i = 0;
3979
3980 max_bw_settings = of_get_property(pdev->dev.of_node,
3981 "qcom,max-bandwidth-per-pipe-kbps",
3982 &max_bw_settings_cnt);
3983
3984 if (!max_bw_settings || !max_bw_settings_cnt) {
3985 pr_debug("MDSS per pipe max bandwidth settings not found\n");
3986 return;
3987 }
3988
3989 /* Support targets where a common per pipe max bw is provided */
3990 if ((max_bw_settings_cnt / sizeof(u32)) == 1) {
3991 mdata->max_bw_per_pipe = be32_to_cpu(max_bw_settings[0]);
3992 mdata->max_per_pipe_bw_settings = NULL;
3993 pr_debug("Common per pipe max bandwidth provided\n");
3994 return;
3995 }
3996
3997 max_bw_settings_cnt /= 2 * sizeof(u32);
3998
3999 max_bw_per_pipe_settings = devm_kzalloc(&pdev->dev,
4000 sizeof(struct mdss_max_bw_settings) * max_bw_settings_cnt,
4001 GFP_KERNEL);
4002 if (!max_bw_per_pipe_settings) {
4003 pr_err("Memory allocation failed for max_bw_settings\n");
4004 return;
4005 }
4006
4007 mdss_mdp_parse_max_bw_array(max_bw_settings, max_bw_per_pipe_settings,
4008 max_bw_settings_cnt);
4009 mdata->max_per_pipe_bw_settings = max_bw_per_pipe_settings;
4010 mdata->mdss_per_pipe_bw_cnt = max_bw_settings_cnt;
4011
4012 /* Calculate min and max allowed per pipe BW */
4013 min_bw = mdata->max_bw_high;
4014 max_bw = 0;
4015
4016 while (i < max_bw_settings_cnt) {
4017 threshold = mdata->max_per_pipe_bw_settings[i].mdss_max_bw_val;
4018 if (threshold > max_bw)
4019 max_bw = threshold;
4020 if (threshold < min_bw)
4021 min_bw = threshold;
4022 ++i;
4023 }
4024 mdata->max_bw_per_pipe = max_bw;
4025 mdata->min_bw_per_pipe = min_bw;
4026}
4027
4028static int mdss_mdp_parse_dt_misc(struct platform_device *pdev)
4029{
4030 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4031 u32 data, slave_pingpong_off;
4032 const char *wfd_data;
4033 int rc;
4034 struct property *prop = NULL;
4035
4036 rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size",
4037 &data);
4038 mdata->rot_block_size = (!rc ? data : 128);
4039
4040 rc = of_property_read_u32(pdev->dev.of_node,
4041 "qcom,mdss-default-ot-rd-limit", &data);
4042 mdata->default_ot_rd_limit = (!rc ? data : 0);
4043
4044 rc = of_property_read_u32(pdev->dev.of_node,
4045 "qcom,mdss-default-ot-wr-limit", &data);
4046 mdata->default_ot_wr_limit = (!rc ? data : 0);
4047
4048 mdata->has_non_scalar_rgb = of_property_read_bool(pdev->dev.of_node,
4049 "qcom,mdss-has-non-scalar-rgb");
4050 mdata->has_bwc = of_property_read_bool(pdev->dev.of_node,
4051 "qcom,mdss-has-bwc");
4052 mdata->has_decimation = of_property_read_bool(pdev->dev.of_node,
4053 "qcom,mdss-has-decimation");
4054 mdata->has_no_lut_read = of_property_read_bool(pdev->dev.of_node,
4055 "qcom,mdss-no-lut-read");
4056 mdata->needs_hist_vote = !(of_property_read_bool(pdev->dev.of_node,
4057 "qcom,mdss-no-hist-vote"));
4058 wfd_data = of_get_property(pdev->dev.of_node,
4059 "qcom,mdss-wfd-mode", NULL);
4060 if (wfd_data) {
4061 pr_debug("wfd mode: %s\n", wfd_data);
4062 if (!strcmp(wfd_data, "intf")) {
4063 mdata->wfd_mode = MDSS_MDP_WFD_INTERFACE;
4064 } else if (!strcmp(wfd_data, "shared")) {
4065 mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
4066 } else if (!strcmp(wfd_data, "dedicated")) {
4067 mdata->wfd_mode = MDSS_MDP_WFD_DEDICATED;
4068 } else {
4069 pr_debug("wfd default mode: Shared\n");
4070 mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
4071 }
4072 } else {
4073 pr_warn("wfd mode not configured. Set to default: Shared\n");
4074 mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
4075 }
4076
4077 mdata->has_src_split = of_property_read_bool(pdev->dev.of_node,
4078 "qcom,mdss-has-source-split");
4079 mdata->has_fixed_qos_arbiter_enabled =
4080 of_property_read_bool(pdev->dev.of_node,
4081 "qcom,mdss-has-fixed-qos-arbiter-enabled");
4082 mdata->idle_pc_enabled = of_property_read_bool(pdev->dev.of_node,
4083 "qcom,mdss-idle-power-collapse-enabled");
4084
4085 prop = of_find_property(pdev->dev.of_node, "batfet-supply", NULL);
4086 mdata->batfet_required = prop ? true : false;
4087 mdata->en_svs_high = of_property_read_bool(pdev->dev.of_node,
4088 "qcom,mdss-en-svs-high");
4089 if (!mdata->en_svs_high)
4090 pr_debug("%s: svs_high is not enabled\n", __func__);
4091 rc = of_property_read_u32(pdev->dev.of_node,
4092 "qcom,mdss-highest-bank-bit", &(mdata->highest_bank_bit));
4093 if (rc)
4094 pr_debug("Could not read optional property: highest bank bit\n");
4095
4096 mdata->has_pingpong_split = of_property_read_bool(pdev->dev.of_node,
4097 "qcom,mdss-has-pingpong-split");
4098
4099 if (mdata->has_pingpong_split) {
4100 rc = of_property_read_u32(pdev->dev.of_node,
4101 "qcom,mdss-slave-pingpong-off",
4102 &slave_pingpong_off);
4103 if (rc) {
4104 pr_err("Error in device tree: slave pingpong offset\n");
4105 return rc;
4106 }
4107 mdata->slave_pingpong_base = mdata->mdss_io.base +
4108 slave_pingpong_off;
4109 rc = mdss_mdp_parse_dt_ppb_off(pdev);
4110 if (rc) {
4111 pr_err("Error in device tree: ppb offset not configured\n");
4112 return rc;
4113 }
4114 }
4115
4116 /*
4117 * 2x factor on AB because bus driver will divide by 2
4118 * due to 2x ports to BIMC
4119 */
4120 mdata->ab_factor.numer = 2;
4121 mdata->ab_factor.denom = 1;
4122 mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ab-factor",
4123 &mdata->ab_factor);
4124
4125 /*
4126 * 1.2 factor on ib as default value. This value is
4127 * experimentally determined and should be tuned in device
4128 * tree.
4129 */
4130 mdata->ib_factor.numer = 6;
4131 mdata->ib_factor.denom = 5;
4132 mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor",
4133 &mdata->ib_factor);
4134
4135 /*
4136 * Set overlap ib value equal to ib by default. This value can
4137 * be tuned in device tree to be different from ib.
4138 * This factor apply when the max bandwidth per pipe
4139 * is the overlap BW.
4140 */
4141 mdata->ib_factor_overlap.numer = mdata->ib_factor.numer;
4142 mdata->ib_factor_overlap.denom = mdata->ib_factor.denom;
4143 mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor-overlap",
4144 &mdata->ib_factor_overlap);
4145
4146 mdata->clk_factor.numer = 1;
4147 mdata->clk_factor.denom = 1;
4148 mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-clk-factor",
4149 &mdata->clk_factor);
4150
4151 rc = of_property_read_u32(pdev->dev.of_node,
4152 "qcom,max-bandwidth-low-kbps", &mdata->max_bw_low);
4153 if (rc)
4154 pr_debug("max bandwidth (low) property not specified\n");
4155
4156 rc = of_property_read_u32(pdev->dev.of_node,
4157 "qcom,max-bandwidth-high-kbps", &mdata->max_bw_high);
4158 if (rc)
4159 pr_debug("max bandwidth (high) property not specified\n");
4160
4161 mdss_mdp_parse_per_pipe_bandwidth(pdev);
4162
4163 mdss_mdp_parse_max_bandwidth(pdev);
4164
4165 mdata->nclk_lvl = mdss_mdp_parse_dt_prop_len(pdev,
4166 "qcom,mdss-clk-levels");
4167
4168 if (mdata->nclk_lvl) {
4169 mdata->clock_levels = kcalloc(mdata->nclk_lvl, sizeof(u32),
4170 GFP_KERNEL);
4171 if (!mdata->clock_levels)
4172 return -ENOMEM;
4173
4174 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-clk-levels",
4175 mdata->clock_levels, mdata->nclk_lvl);
4176 if (rc)
4177 pr_debug("clock levels not found\n");
4178 }
4179
4180 mdss_mdp_parse_vbif_qos(pdev);
4181 mdata->traffic_shaper_en = of_property_read_bool(pdev->dev.of_node,
4182 "qcom,mdss-traffic-shaper-enabled");
4183 mdata->has_rot_dwnscale = of_property_read_bool(pdev->dev.of_node,
4184 "qcom,mdss-has-rotator-downscale");
4185 if (mdata->has_rot_dwnscale) {
4186 rc = of_property_read_u32(pdev->dev.of_node,
4187 "qcom,mdss-rot-downscale-min",
4188 &mdata->rot_dwnscale_min);
4189 if (rc)
4190 pr_err("Min rotator downscale property not specified\n");
4191
4192 rc = of_property_read_u32(pdev->dev.of_node,
4193 "qcom,mdss-rot-downscale-max",
4194 &mdata->rot_dwnscale_max);
4195 if (rc)
4196 pr_err("Max rotator downscale property not specified\n");
4197 }
4198
4199 rc = of_property_read_u32(pdev->dev.of_node,
4200 "qcom,mdss-dram-channels", &mdata->bus_channels);
4201 if (rc)
4202 pr_debug("number of channels property not specified\n");
4203
4204 rc = of_property_read_u32(pdev->dev.of_node,
4205 "qcom,max-pipe-width", &mdata->max_pipe_width);
4206 if (rc) {
4207 pr_debug("max pipe width not specified. Using default value\n");
4208 mdata->max_pipe_width = DEFAULT_MDP_PIPE_WIDTH;
4209 }
4210 return 0;
4211}
4212
4213static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev)
4214{
4215 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4216 u32 *ad_offsets = NULL;
4217 int rc;
4218
4219 mdata->nad_cfgs = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-ad-off");
4220
4221 if (mdata->nad_cfgs == 0) {
4222 mdata->ad_cfgs = NULL;
4223 return 0;
4224 }
4225
4226 if (mdata->nad_cfgs > mdata->nmixers_intf)
4227 return -EINVAL;
4228
4229
4230 mdata->has_wb_ad = of_property_read_bool(pdev->dev.of_node,
4231 "qcom,mdss-has-wb-ad");
4232
4233 ad_offsets = kcalloc(mdata->nad_cfgs, sizeof(u32), GFP_KERNEL);
4234 if (!ad_offsets)
4235 return -ENOMEM;
4236
4237 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ad-off", ad_offsets,
4238 mdata->nad_cfgs);
4239 if (rc)
4240 goto parse_done;
4241
4242 rc = mdss_mdp_ad_addr_setup(mdata, ad_offsets);
4243 if (rc)
4244 pr_err("unable to setup assertive display\n");
4245
4246parse_done:
4247 kfree(ad_offsets);
4248 return rc;
4249}
4250
4251static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev)
4252{
4253 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4254 u32 len, index;
4255 const u32 *arr;
4256
4257 arr = of_get_property(pdev->dev.of_node, "qcom,mdss-ppb-ctl-off", &len);
4258 if (arr) {
4259 mdata->nppb_ctl = len / sizeof(u32);
4260 mdata->ppb_ctl = devm_kzalloc(&mdata->pdev->dev,
4261 sizeof(u32) * mdata->nppb_ctl, GFP_KERNEL);
4262
4263 if (mdata->ppb_ctl == NULL)
4264 return -ENOMEM;
4265
4266 for (index = 0; index < mdata->nppb_ctl; index++)
4267 mdata->ppb_ctl[index] = be32_to_cpu(arr[index]);
4268 }
4269
4270 arr = of_get_property(pdev->dev.of_node, "qcom,mdss-ppb-cfg-off", &len);
4271 if (arr) {
4272 mdata->nppb_cfg = len / sizeof(u32);
4273 mdata->ppb_cfg = devm_kzalloc(&mdata->pdev->dev,
4274 sizeof(u32) * mdata->nppb_cfg, GFP_KERNEL);
4275
4276 if (mdata->ppb_cfg == NULL)
4277 return -ENOMEM;
4278
4279 for (index = 0; index < mdata->nppb_cfg; index++)
4280 mdata->ppb_cfg[index] = be32_to_cpu(arr[index]);
4281 }
4282 return 0;
4283}
4284
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05304285#ifdef CONFIG_QCOM_BUS_SCALING
Sachin Bhayareeeb88892018-01-02 16:36:01 +05304286static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
4287{
4288 int rc, paths;
4289 struct device_node *node;
4290 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4291
4292 rc = of_property_read_u32(pdev->dev.of_node,
4293 "qcom,msm-bus,num-paths", &paths);
4294 if (rc) {
4295 pr_err("Error. qcom,msm-bus,num-paths prop not found.rc=%d\n",
4296 rc);
4297 return rc;
4298 }
4299 mdss_res->axi_port_cnt = paths;
4300
4301 rc = of_property_read_u32(pdev->dev.of_node,
4302 "qcom,mdss-num-nrt-paths", &mdata->nrt_axi_port_cnt);
4303 if (rc && mdata->has_fixed_qos_arbiter_enabled) {
4304 pr_err("Error. qcom,mdss-num-nrt-paths prop not found.rc=%d\n",
4305 rc);
4306 return rc;
4307 }
4308 rc = 0;
4309
4310 mdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
4311 if (IS_ERR_OR_NULL(mdata->bus_scale_table)) {
4312 rc = PTR_ERR(mdata->bus_scale_table);
4313 if (!rc)
4314 rc = -EINVAL;
4315 pr_err("msm_bus_cl_get_pdata failed. rc=%d\n", rc);
4316 mdata->bus_scale_table = NULL;
4317 return rc;
4318 }
4319
4320 /*
4321 * if mdss-reg-bus is not found then default table is picked
4322 * hence below code wont return error.
4323 */
4324 node = of_get_child_by_name(pdev->dev.of_node, "qcom,mdss-reg-bus");
4325 if (node) {
4326 mdata->reg_bus_scale_table =
4327 msm_bus_pdata_from_node(pdev, node);
4328 if (IS_ERR_OR_NULL(mdata->reg_bus_scale_table)) {
4329 rc = PTR_ERR(mdata->reg_bus_scale_table);
4330 if (!rc)
4331 pr_err("bus_pdata reg_bus failed rc=%d\n", rc);
4332 rc = 0;
4333 mdata->reg_bus_scale_table = NULL;
4334 }
4335 } else {
4336 rc = 0;
4337 mdata->reg_bus_scale_table = NULL;
4338 pr_debug("mdss-reg-bus not found\n");
4339 }
4340
4341 node = of_get_child_by_name(pdev->dev.of_node, "qcom,mdss-hw-rt-bus");
4342 if (node) {
4343 mdata->hw_rt_bus_scale_table =
4344 msm_bus_pdata_from_node(pdev, node);
4345 if (IS_ERR_OR_NULL(mdata->hw_rt_bus_scale_table)) {
4346 rc = PTR_ERR(mdata->hw_rt_bus_scale_table);
4347 if (!rc)
4348 pr_err("hw_rt_bus_scale failed rc=%d\n", rc);
4349 rc = 0;
4350 mdata->hw_rt_bus_scale_table = NULL;
4351 }
4352 } else {
4353 rc = 0;
4354 mdata->hw_rt_bus_scale_table = NULL;
4355 pr_debug("mdss-hw-rt-bus not found\n");
4356 }
4357
4358 return rc;
4359}
4360#else
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05304361__maybe_unused
Sachin Bhayareeeb88892018-01-02 16:36:01 +05304362static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
4363{
4364 return 0;
4365}
4366
4367#endif
4368
4369static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
4370 char *prop_name, u32 *offsets, int len)
4371{
4372 int rc;
4373
4374 rc = of_property_read_u32_array(pdev->dev.of_node, prop_name,
4375 offsets, len);
4376 if (rc) {
4377 pr_err("Error from prop %s : u32 array read\n", prop_name);
4378 return -EINVAL;
4379 }
4380
4381 return 0;
4382}
4383
4384static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
4385 char *prop_name)
4386{
4387 int len = 0;
4388
4389 of_find_property(pdev->dev.of_node, prop_name, &len);
4390
4391 if (len < 1) {
4392 pr_debug("prop %s : doesn't exist in device tree\n",
4393 prop_name);
4394 return 0;
4395 }
4396
4397 len = len/sizeof(u32);
4398
4399 return len;
4400}
4401
4402struct mdss_data_type *mdss_mdp_get_mdata(void)
4403{
4404 return mdss_res;
4405}
4406
4407void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable)
4408{
4409 int ret;
4410
4411 if (!mdata->batfet_required)
4412 return;
4413
4414 if (!mdata->batfet) {
4415 if (enable) {
4416 mdata->batfet = devm_regulator_get(&mdata->pdev->dev,
4417 "batfet");
4418 if (IS_ERR_OR_NULL(mdata->batfet)) {
4419 pr_debug("unable to get batfet reg. rc=%d\n",
4420 PTR_RET(mdata->batfet));
4421 mdata->batfet = NULL;
4422 return;
4423 }
4424 } else {
4425 pr_debug("Batfet regulator disable w/o enable\n");
4426 return;
4427 }
4428 }
4429
4430 if (enable) {
4431 ret = regulator_enable(mdata->batfet);
4432 if (ret)
4433 pr_err("regulator_enable failed\n");
4434 } else {
4435 regulator_disable(mdata->batfet);
4436 }
4437}
4438
4439/**
4440 * mdss_is_ready() - checks if mdss is probed and ready
4441 *
4442 * Checks if mdss resources have been initialized
4443 *
4444 * returns true if mdss is ready, else returns false
4445 */
4446bool mdss_is_ready(void)
4447{
4448 return mdss_mdp_get_mdata() ? true : false;
4449}
4450EXPORT_SYMBOL(mdss_mdp_get_mdata);
4451
4452/**
4453 * mdss_panel_intf_type() - checks if a given intf type is primary
4454 * @intf_val: panel interface type of the individual controller
4455 *
4456 * Individual controller queries with MDP to check if it is
4457 * configured as the primary interface.
4458 *
4459 * returns a pointer to the configured structure mdss_panel_cfg
4460 * to the controller that's configured as the primary panel interface.
4461 * returns NULL on error or if @intf_val is not the configured
4462 * controller.
4463 */
4464struct mdss_panel_cfg *mdss_panel_intf_type(int intf_val)
4465{
4466 if (!mdss_res || !mdss_res->pan_cfg.init_done)
4467 return ERR_PTR(-EPROBE_DEFER);
4468
4469 if (mdss_res->pan_cfg.pan_intf == intf_val)
4470 return &mdss_res->pan_cfg;
4471 else
4472 return NULL;
4473}
4474EXPORT_SYMBOL(mdss_panel_intf_type);
4475
4476struct irq_info *mdss_intr_line()
4477{
4478 return mdss_mdp_hw.irq_info;
4479}
4480EXPORT_SYMBOL(mdss_intr_line);
4481
4482int mdss_mdp_wait_for_xin_halt(u32 xin_id, bool is_vbif_nrt)
4483{
4484 void __iomem *vbif_base;
4485 u32 status;
4486 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4487 u32 idle_mask = BIT(xin_id);
4488 int rc;
4489
4490 vbif_base = is_vbif_nrt ? mdata->vbif_nrt_io.base :
4491 mdata->vbif_io.base;
4492
4493 rc = readl_poll_timeout(vbif_base + MMSS_VBIF_XIN_HALT_CTRL1,
4494 status, (status & idle_mask),
4495 1000, XIN_HALT_TIMEOUT_US);
4496 if (rc == -ETIMEDOUT) {
4497 pr_err("VBIF client %d not halting. TIMEDOUT.\n",
4498 xin_id);
4499 MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
4500 "dbg_bus", "vbif_dbg_bus", "panic");
4501 } else {
4502 pr_debug("VBIF client %d is halted\n", xin_id);
4503 }
4504
4505 return rc;
4506}
4507
4508/**
4509 * force_on_xin_clk() - enable/disable the force-on for the pipe clock
4510 * @bit_off: offset of the bit to enable/disable the force-on.
4511 * @reg_off: register offset for the clock control.
4512 * @enable: boolean to indicate if the force-on of the clock needs to be
4513 * enabled or disabled.
4514 *
4515 * This function returns:
4516 * true - if the clock is forced-on by this function
4517 * false - if the clock was already forced on
4518 * It is the caller responsibility to check if this function is forcing
4519 * the clock on; if so, it will need to remove the force of the clock,
4520 * otherwise it should avoid to remove the force-on.
4521 * Clocks must be on when calling this function.
4522 */
4523bool force_on_xin_clk(u32 bit_off, u32 clk_ctl_reg_off, bool enable)
4524{
4525 u32 val;
4526 u32 force_on_mask;
4527 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4528 bool clk_forced_on = false;
4529
4530 force_on_mask = BIT(bit_off);
4531 val = readl_relaxed(mdata->mdp_base + clk_ctl_reg_off);
4532
4533 clk_forced_on = !(force_on_mask & val);
4534
4535 if (true == enable)
4536 val |= force_on_mask;
4537 else
4538 val &= ~force_on_mask;
4539
4540 writel_relaxed(val, mdata->mdp_base + clk_ctl_reg_off);
4541
4542 return clk_forced_on;
4543}
4544
4545static void apply_dynamic_ot_limit(u32 *ot_lim,
4546 struct mdss_mdp_set_ot_params *params)
4547{
4548 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4549 u32 res, read_vbif_ot;
4550 u32 rot_ot = 4;
4551
4552 if (false == test_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map))
4553 return;
4554
4555 /* Dynamic OT setting done only for rotator and WFD */
4556 if (!((params->is_rot && params->is_yuv) || params->is_wb))
4557 return;
4558
4559 res = params->width * params->height;
4560
4561 pr_debug("w:%d h:%d rot:%d yuv:%d wb:%d res:%d fps:%d\n",
4562 params->width, params->height, params->is_rot,
4563 params->is_yuv, params->is_wb, res, params->frame_rate);
4564
4565 switch (mdata->mdp_rev) {
4566 case MDSS_MDP_HW_REV_114:
4567 /*
4568 * MDP rev is same for msm8937 and msm8940, but rotator OT
4569 * recommendations are different. Setting it based on AXI OT.
4570 */
4571 read_vbif_ot = MDSS_VBIF_READ(mdata, MMSS_VBIF_OUT_RD_LIM_CONF0,
4572 false);
4573 rot_ot = (read_vbif_ot == 0x10) ? 4 : 8;
4574 /* fall-through */
4575 case MDSS_MDP_HW_REV_115:
4576 case MDSS_MDP_HW_REV_116:
4577 if ((res <= RES_1080p) && (params->frame_rate <= 30))
4578 *ot_lim = 2;
4579 else if (params->is_rot && params->is_yuv)
4580 *ot_lim = rot_ot;
4581 else
4582 *ot_lim = 6;
4583 break;
4584 default:
4585 if (res <= RES_1080p) {
4586 *ot_lim = 2;
4587 } else if (res <= RES_UHD) {
4588 if (params->is_rot && params->is_yuv)
4589 *ot_lim = 8;
4590 else
4591 *ot_lim = 16;
4592 }
4593 break;
4594 }
4595}
4596
4597static u32 get_ot_limit(u32 reg_off, u32 bit_off,
4598 struct mdss_mdp_set_ot_params *params)
4599{
4600 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4601 u32 ot_lim = 0;
4602 u32 is_vbif_nrt, val;
4603
4604 if (mdata->default_ot_wr_limit &&
4605 (params->reg_off_vbif_lim_conf == MMSS_VBIF_WR_LIM_CONF))
4606 ot_lim = mdata->default_ot_wr_limit;
4607 else if (mdata->default_ot_rd_limit &&
4608 (params->reg_off_vbif_lim_conf == MMSS_VBIF_RD_LIM_CONF))
4609 ot_lim = mdata->default_ot_rd_limit;
4610
4611 /*
4612 * If default ot is not set from dt,
4613 * then do not configure it.
4614 */
4615 if (ot_lim == 0)
4616 goto exit;
4617
4618 /* Modify the limits if the target and the use case requires it */
4619 apply_dynamic_ot_limit(&ot_lim, params);
4620
4621 is_vbif_nrt = params->is_vbif_nrt;
4622 val = MDSS_VBIF_READ(mdata, reg_off, is_vbif_nrt);
4623 val &= (0xFF << bit_off);
4624 val = val >> bit_off;
4625
4626 if (val == ot_lim)
4627 ot_lim = 0;
4628
4629exit:
4630 pr_debug("ot_lim=%d\n", ot_lim);
4631 return ot_lim;
4632}
4633
4634void mdss_mdp_set_ot_limit(struct mdss_mdp_set_ot_params *params)
4635{
4636 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4637 u32 ot_lim;
4638 u32 reg_off_vbif_lim_conf = (params->xin_id / 4) * 4 +
4639 params->reg_off_vbif_lim_conf;
4640 u32 bit_off_vbif_lim_conf = (params->xin_id % 4) * 8;
4641 bool is_vbif_nrt = params->is_vbif_nrt;
4642 u32 reg_val;
4643 bool forced_on;
4644
4645 ot_lim = get_ot_limit(
4646 reg_off_vbif_lim_conf,
4647 bit_off_vbif_lim_conf,
4648 params) & 0xFF;
4649
4650 if (ot_lim == 0)
4651 goto exit;
4652
4653 trace_mdp_perf_set_ot(params->num, params->xin_id, ot_lim,
4654 is_vbif_nrt);
4655
4656 mutex_lock(&mdata->reg_lock);
4657
4658 forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
4659 params->reg_off_mdp_clk_ctrl, true);
4660
4661 reg_val = MDSS_VBIF_READ(mdata, reg_off_vbif_lim_conf,
4662 is_vbif_nrt);
4663 reg_val &= ~(0xFF << bit_off_vbif_lim_conf);
4664 reg_val |= (ot_lim) << bit_off_vbif_lim_conf;
4665 MDSS_VBIF_WRITE(mdata, reg_off_vbif_lim_conf, reg_val,
4666 is_vbif_nrt);
4667
4668 reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
4669 is_vbif_nrt);
4670 MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
4671 reg_val | BIT(params->xin_id), is_vbif_nrt);
4672
4673 mutex_unlock(&mdata->reg_lock);
4674 mdss_mdp_wait_for_xin_halt(params->xin_id, is_vbif_nrt);
4675 mutex_lock(&mdata->reg_lock);
4676
4677 reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
4678 is_vbif_nrt);
4679 MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
4680 reg_val & ~BIT(params->xin_id), is_vbif_nrt);
4681
4682 if (forced_on)
4683 force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
4684 params->reg_off_mdp_clk_ctrl, false);
4685
4686 mutex_unlock(&mdata->reg_lock);
4687
4688exit:
4689 return;
4690}
4691
4692#define RPM_MISC_REQ_TYPE 0x6373696d
4693#define RPM_MISC_REQ_SVS_PLUS_KEY 0x2B737673
4694
4695static void mdss_mdp_config_cx_voltage(struct mdss_data_type *mdata, int enable)
4696{
4697 int ret = 0;
4698 static struct msm_rpm_kvp rpm_kvp;
4699 static uint8_t svs_en;
4700
4701 if (!mdata->en_svs_high)
4702 return;
4703
4704 if (!rpm_kvp.key) {
4705 rpm_kvp.key = RPM_MISC_REQ_SVS_PLUS_KEY;
4706 rpm_kvp.length = sizeof(uint64_t);
4707 pr_debug("%s: Initialized rpm_kvp structure\n", __func__);
4708 }
4709
4710 if (enable) {
4711 svs_en = 1;
4712 rpm_kvp.data = &svs_en;
4713 pr_debug("%s: voting for svs high\n", __func__);
4714 ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
4715 RPM_MISC_REQ_TYPE, 0,
4716 &rpm_kvp, 1);
4717 if (ret)
4718 pr_err("vote for active_set svs high failed: %d\n",
4719 ret);
4720 ret = msm_rpm_send_message(MSM_RPM_CTX_SLEEP_SET,
4721 RPM_MISC_REQ_TYPE, 0,
4722 &rpm_kvp, 1);
4723 if (ret)
4724 pr_err("vote for sleep_set svs high failed: %d\n",
4725 ret);
4726 } else {
4727 svs_en = 0;
4728 rpm_kvp.data = &svs_en;
4729 pr_debug("%s: Removing vote for svs high\n", __func__);
4730 ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
4731 RPM_MISC_REQ_TYPE, 0,
4732 &rpm_kvp, 1);
4733 if (ret)
4734 pr_err("Remove vote:active_set svs high failed: %d\n",
4735 ret);
4736 ret = msm_rpm_send_message(MSM_RPM_CTX_SLEEP_SET,
4737 RPM_MISC_REQ_TYPE, 0,
4738 &rpm_kvp, 1);
4739 if (ret)
4740 pr_err("Remove vote:sleep_set svs high failed: %d\n",
4741 ret);
4742 }
4743}
4744
4745static int mdss_mdp_cx_ctrl(struct mdss_data_type *mdata, int enable)
4746{
4747 int rc = 0;
4748
4749 if (!mdata->vdd_cx)
4750 return rc;
4751
4752 if (enable) {
4753 rc = regulator_set_voltage(
4754 mdata->vdd_cx,
4755 RPM_REGULATOR_CORNER_SVS_SOC,
4756 RPM_REGULATOR_CORNER_SUPER_TURBO);
4757 if (rc < 0)
4758 goto vreg_set_voltage_fail;
4759
4760 pr_debug("Enabling CX power rail\n");
4761 rc = regulator_enable(mdata->vdd_cx);
4762 if (rc) {
4763 pr_err("Failed to enable regulator.\n");
4764 return rc;
4765 }
4766 } else {
4767 pr_debug("Disabling CX power rail\n");
4768 rc = regulator_disable(mdata->vdd_cx);
4769 if (rc) {
4770 pr_err("Failed to disable regulator.\n");
4771 return rc;
4772 }
4773 rc = regulator_set_voltage(
4774 mdata->vdd_cx,
4775 RPM_REGULATOR_CORNER_NONE,
4776 RPM_REGULATOR_CORNER_SUPER_TURBO);
4777 if (rc < 0)
4778 goto vreg_set_voltage_fail;
4779 }
4780
4781 return rc;
4782
4783vreg_set_voltage_fail:
4784 pr_err("Set vltg fail\n");
4785 return rc;
4786}
4787
4788/**
4789 * mdss_mdp_footswitch_ctrl() - Disable/enable MDSS GDSC and CX/Batfet rails
4790 * @mdata: MDP private data
4791 * @on: 1 to turn on footswitch, 0 to turn off footswitch
4792 *
4793 * When no active references to the MDP device node and it's child nodes are
4794 * held, MDSS GDSC can be turned off. However, any any panels are still
4795 * active (but likely in an idle state), the vote for the CX and the batfet
4796 * rails should not be released.
4797 */
4798static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
4799{
4800 int ret;
4801 int active_cnt = 0;
4802
4803 if (!mdata->fs)
4804 return;
4805
4806 MDSS_XLOG(on, mdata->fs_ena, mdata->idle_pc, mdata->en_svs_high,
4807 atomic_read(&mdata->active_intf_cnt));
4808
4809 if (on) {
4810 if (!mdata->fs_ena) {
4811 pr_debug("Enable MDP FS\n");
4812 if (mdata->venus) {
4813 ret = regulator_enable(mdata->venus);
4814 if (ret)
4815 pr_err("venus failed to enable\n");
4816 }
4817
4818 ret = regulator_enable(mdata->fs);
4819 if (ret)
4820 pr_warn("Footswitch failed to enable\n");
4821 if (!mdata->idle_pc) {
4822 mdss_mdp_cx_ctrl(mdata, true);
4823 mdss_mdp_batfet_ctrl(mdata, true);
4824 }
4825 }
4826 if (mdata->en_svs_high)
4827 mdss_mdp_config_cx_voltage(mdata, true);
4828 mdata->fs_ena = true;
4829 } else {
4830 if (mdata->fs_ena) {
4831 pr_debug("Disable MDP FS\n");
4832 active_cnt = atomic_read(&mdata->active_intf_cnt);
4833 if (active_cnt != 0) {
4834 /*
4835 * Turning off GDSC while overlays are still
4836 * active.
4837 */
4838 mdata->idle_pc = true;
4839 pr_debug("idle pc. active overlays=%d\n",
4840 active_cnt);
4841 mdss_mdp_memory_retention_enter();
4842 } else {
4843 mdss_mdp_cx_ctrl(mdata, false);
4844 mdss_mdp_batfet_ctrl(mdata, false);
4845 }
4846 if (mdata->en_svs_high)
4847 mdss_mdp_config_cx_voltage(mdata, false);
4848 regulator_disable(mdata->fs);
4849 if (mdata->venus)
4850 regulator_disable(mdata->venus);
4851 }
4852 mdata->fs_ena = false;
4853 }
4854}
4855
4856int mdss_mdp_secure_display_ctrl(struct mdss_data_type *mdata,
4857 unsigned int enable)
4858{
4859 struct sd_ctrl_req {
4860 unsigned int enable;
4861 } __attribute__ ((__packed__)) request;
4862 unsigned int resp = -1;
4863 int ret = 0;
4864 struct scm_desc desc;
4865
4866 if ((enable && (mdss_get_sd_client_cnt() > 0)) ||
4867 (!enable && (mdss_get_sd_client_cnt() > 1))) {
4868 mdss_update_sd_client(mdata, enable);
4869 return ret;
4870 }
4871
4872 desc.args[0] = request.enable = enable;
4873 desc.arginfo = SCM_ARGS(1);
4874
4875 if (!is_scm_armv8()) {
4876 ret = scm_call(SCM_SVC_MP, MEM_PROTECT_SD_CTRL,
4877 &request, sizeof(request), &resp, sizeof(resp));
4878 } else {
4879 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
4880 mem_protect_sd_ctrl_id), &desc);
4881 resp = desc.ret[0];
4882 }
4883
4884 pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x",
4885 enable, ret, resp);
4886 if (ret)
4887 return ret;
4888
4889 mdss_update_sd_client(mdata, enable);
4890 return resp;
4891}
4892
4893static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
4894{
4895 mdata->suspend_fs_ena = mdata->fs_ena;
4896 mdss_mdp_footswitch_ctrl(mdata, false);
4897
4898 pr_debug("suspend done fs=%d\n", mdata->suspend_fs_ena);
4899
4900 return 0;
4901}
4902
4903static inline int mdss_mdp_resume_sub(struct mdss_data_type *mdata)
4904{
4905 if (mdata->suspend_fs_ena)
4906 mdss_mdp_footswitch_ctrl(mdata, true);
4907
4908 pr_debug("resume done fs=%d\n", mdata->suspend_fs_ena);
4909
4910 return 0;
4911}
4912
4913#ifdef CONFIG_PM_SLEEP
4914static int mdss_mdp_pm_suspend(struct device *dev)
4915{
4916 struct mdss_data_type *mdata;
4917
4918 mdata = dev_get_drvdata(dev);
4919 if (!mdata)
4920 return -ENODEV;
4921
4922 dev_dbg(dev, "display pm suspend\n");
4923
4924 return mdss_mdp_suspend_sub(mdata);
4925}
4926
4927static int mdss_mdp_pm_resume(struct device *dev)
4928{
4929 struct mdss_data_type *mdata;
4930
4931 mdata = dev_get_drvdata(dev);
4932 if (!mdata)
4933 return -ENODEV;
4934
4935 dev_dbg(dev, "display pm resume\n");
4936
4937 /*
4938 * It is possible that the runtime status of the mdp device may
4939 * have been active when the system was suspended. Reset the runtime
4940 * status to suspended state after a complete system resume.
4941 */
4942 pm_runtime_disable(dev);
4943 pm_runtime_set_suspended(dev);
4944 pm_runtime_enable(dev);
4945
4946 return mdss_mdp_resume_sub(mdata);
4947}
4948#endif
4949
4950#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
4951static int mdss_mdp_suspend(struct platform_device *pdev, pm_message_t state)
4952{
4953 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4954
4955 if (!mdata)
4956 return -ENODEV;
4957
4958 dev_dbg(&pdev->dev, "display suspend\n");
4959
4960 return mdss_mdp_suspend_sub(mdata);
4961}
4962
4963static int mdss_mdp_resume(struct platform_device *pdev)
4964{
4965 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4966
4967 if (!mdata)
4968 return -ENODEV;
4969
4970 dev_dbg(&pdev->dev, "display resume\n");
4971
4972 return mdss_mdp_resume_sub(mdata);
4973}
4974#else
4975#define mdss_mdp_suspend NULL
4976#define mdss_mdp_resume NULL
4977#endif
4978
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05304979#ifdef CONFIG_PM
Sachin Bhayareeeb88892018-01-02 16:36:01 +05304980static int mdss_mdp_runtime_resume(struct device *dev)
4981{
4982 struct mdss_data_type *mdata = dev_get_drvdata(dev);
4983 bool device_on = true;
4984
4985 if (!mdata)
4986 return -ENODEV;
4987
4988 dev_dbg(dev, "pm_runtime: resuming. active overlay cnt=%d\n",
4989 atomic_read(&mdata->active_intf_cnt));
4990
4991 /* do not resume panels when coming out of idle power collapse */
4992 if (!mdata->idle_pc)
4993 device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
4994 mdss_mdp_footswitch_ctrl(mdata, true);
4995
4996 return 0;
4997}
4998
4999static int mdss_mdp_runtime_idle(struct device *dev)
5000{
5001 struct mdss_data_type *mdata = dev_get_drvdata(dev);
5002
5003 if (!mdata)
5004 return -ENODEV;
5005
5006 dev_dbg(dev, "pm_runtime: idling...\n");
5007
5008 return 0;
5009}
5010
5011static int mdss_mdp_runtime_suspend(struct device *dev)
5012{
5013 struct mdss_data_type *mdata = dev_get_drvdata(dev);
5014 bool device_on = false;
5015
5016 if (!mdata)
5017 return -ENODEV;
5018 dev_dbg(dev, "pm_runtime: suspending. active overlay cnt=%d\n",
5019 atomic_read(&mdata->active_intf_cnt));
5020
5021 if (mdata->clk_ena) {
5022 pr_err("MDP suspend failed\n");
5023 return -EBUSY;
5024 }
5025
5026 mdss_mdp_footswitch_ctrl(mdata, false);
5027 /* do not suspend panels when going in to idle power collapse */
5028 if (!mdata->idle_pc)
5029 device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
5030
5031 return 0;
5032}
5033#endif
5034
5035static const struct dev_pm_ops mdss_mdp_pm_ops = {
5036 SET_SYSTEM_SLEEP_PM_OPS(mdss_mdp_pm_suspend, mdss_mdp_pm_resume)
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305037#ifdef CONFIG_PM
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305038 SET_RUNTIME_PM_OPS(mdss_mdp_runtime_suspend,
5039 mdss_mdp_runtime_resume,
5040 mdss_mdp_runtime_idle)
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305041#endif
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305042};
5043
5044static int mdss_mdp_remove(struct platform_device *pdev)
5045{
5046 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
5047
5048 if (!mdata)
5049 return -ENODEV;
5050 pm_runtime_disable(&pdev->dev);
5051 mdss_mdp_pp_term(&pdev->dev);
5052 mdss_mdp_bus_scale_unregister(mdata);
5053 mdss_debugfs_remove(mdata);
5054 if (mdata->regulator_notif_register)
5055 regulator_unregister_notifier(mdata->fs, &(mdata->gdsc_cb));
5056 return 0;
5057}
5058
5059static const struct of_device_id mdss_mdp_dt_match[] = {
5060 { .compatible = "qcom,mdss_mdp",},
5061 {}
5062};
5063MODULE_DEVICE_TABLE(of, mdss_mdp_dt_match);
5064
5065static struct platform_driver mdss_mdp_driver = {
5066 .probe = mdss_mdp_probe,
5067 .remove = mdss_mdp_remove,
5068 .suspend = mdss_mdp_suspend,
5069 .resume = mdss_mdp_resume,
5070 .shutdown = NULL,
5071 .driver = {
5072 /*
5073 * Driver name must match the device name added in
5074 * platform.c.
5075 */
5076 .name = "mdp",
5077 .of_match_table = mdss_mdp_dt_match,
5078 .pm = &mdss_mdp_pm_ops,
5079 },
5080};
5081
5082static int mdss_mdp_register_driver(void)
5083{
5084 return platform_driver_register(&mdss_mdp_driver);
5085}
5086
5087static int __init mdss_mdp_driver_init(void)
5088{
5089 int ret;
5090
5091 ret = mdss_mdp_register_driver();
5092 if (ret) {
5093 pr_err("mdp_register_driver() failed!\n");
5094 return ret;
5095 }
5096
5097 return 0;
5098
5099}
5100
5101module_param_string(panel, mdss_mdp_panel, MDSS_MAX_PANEL_LEN, 0600);
5102/*
5103 * panel=<lk_cfg>:<pan_intf>:<pan_intf_cfg>:<panel_topology_cfg>
5104 * where <lk_cfg> is "1"-lk/gcdb config or "0" non-lk/non-gcdb
5105 * config; <pan_intf> is dsi:<ctrl_id> or hdmi or edp
5106 * <pan_intf_cfg> is panel interface specific string
5107 * Ex: This string is panel's device node name from DT
5108 * for DSI interface
5109 * hdmi/edp interface does not use this string
5110 * <panel_topology_cfg> is an optional string. Currently it is
5111 * only valid for DSI panels. In dual-DSI case, it needs to be
5112 * used on both panels or none. When used, format is config%d
5113 * where %d is one of the configuration found in device node of
5114 * panel selected by <pan_intf_cfg>
5115 */
5116MODULE_PARM_DESC(panel, "lk supplied panel selection string");
5117MODULE_PARM_DESC(panel,
5118 "panel=<lk_cfg>:<pan_intf>:<pan_intf_cfg>:<panel_topology_cfg>");
5119module_init(mdss_mdp_driver_init);