blob: c2fffef6fa8d920bb8a07364396a13429e3ce338 [file] [log] [blame]
Dhaval Patel14d46ce2017-01-17 16:28:12 -08001/*
Raviteja Tamatam30fa3e32018-01-04 21:00:22 +05302 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
Dhaval Patel14d46ce2017-01-17 16:28:12 -08003 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07005 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -08006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07009 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -080010 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070017 */
18
Alan Kwong5d324e42016-07-28 22:56:18 -040019#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
20
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070021#include <drm/drm_crtc.h>
Clarence Ip4ce59322016-06-26 22:27:51 -040022#include <linux/debugfs.h>
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -070023#include <linux/of_address.h>
Dhaval Patel04c7e8e2016-09-26 20:14:31 -070024#include <linux/of_irq.h>
Alan Kwong4dd64c82017-02-04 18:41:51 -080025#include <linux/dma-buf.h>
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -070026#include <linux/memblock.h>
27#include <linux/bootmem.h>
Clarence Ip4ce59322016-06-26 22:27:51 -040028
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070029#include "msm_drv.h"
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040030#include "msm_mmu.h"
Clarence Ipd02440b2017-05-21 18:10:01 -040031#include "msm_gem.h"
Clarence Ip3649f8b2016-10-31 09:59:44 -040032
33#include "dsi_display.h"
34#include "dsi_drm.h"
35#include "sde_wb.h"
Padmanabhan Komanduru63758612017-05-23 01:47:18 -070036#include "dp_display.h"
37#include "dp_drm.h"
Clarence Ip3649f8b2016-10-31 09:59:44 -040038
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070039#include "sde_kms.h"
Alan Kwongf5dd86c2016-08-09 18:08:17 -040040#include "sde_core_irq.h"
Clarence Ip4ce59322016-06-26 22:27:51 -040041#include "sde_formats.h"
Alan Kwong5d324e42016-07-28 22:56:18 -040042#include "sde_hw_vbif.h"
Alan Kwong83285fb2016-10-21 20:51:17 -040043#include "sde_vbif.h"
44#include "sde_encoder.h"
45#include "sde_plane.h"
46#include "sde_crtc.h"
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -080047#include "sde_reg_dma.h"
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070048
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -080049#include <soc/qcom/scm.h>
50#include "soc/qcom/secure_buffer.h"
51
Alan Kwong1a00e4d2016-07-18 09:42:30 -040052#define CREATE_TRACE_POINTS
53#include "sde_trace.h"
54
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -080055/* defines for secure channel call */
56#define SEC_SID_CNT 2
57#define SEC_SID_MASK_0 0x80881
58#define SEC_SID_MASK_1 0x80C81
59#define MEM_PROTECT_SD_CTRL_SWITCH 0x18
60#define MDP_DEVICE_ID 0x1A
61
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040062static const char * const iommu_ports[] = {
63 "mdp_0",
64};
65
Clarence Ip4ce59322016-06-26 22:27:51 -040066/**
67 * Controls size of event log buffer. Specified as a power of 2.
68 */
69#define SDE_EVTLOG_SIZE 1024
70
71/*
72 * To enable overall DRM driver logging
73 * # echo 0x2 > /sys/module/drm/parameters/debug
74 *
75 * To enable DRM driver h/w logging
Dhaval Patel6c666622017-03-21 23:02:59 -070076 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
Clarence Ip4ce59322016-06-26 22:27:51 -040077 *
78 * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
79 */
80#define SDE_DEBUGFS_DIR "msm_sde"
81#define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
82
Clarence Ipdd395242016-09-09 10:47:17 -040083/**
84 * sdecustom - enable certain driver customizations for sde clients
85 * Enabling this modifies the standard DRM behavior slightly and assumes
86 * that the clients have specific knowledge about the modifications that
87 * are involved, so don't enable this unless you know what you're doing.
88 *
89 * Parts of the driver that are affected by this setting may be located by
90 * searching for invocations of the 'sde_is_custom_client()' function.
91 *
92 * This is disabled by default.
93 */
Clarence Ipb1b3c802016-10-03 16:49:38 -040094static bool sdecustom = true;
Clarence Ipdd395242016-09-09 10:47:17 -040095module_param(sdecustom, bool, 0400);
96MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
97
Clarence Ip17162b52016-11-24 17:06:29 -050098static int sde_kms_hw_init(struct msm_kms *kms);
99static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -0700100static int _sde_kms_mmu_init(struct sde_kms *sde_kms);
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -0700101static int _sde_kms_register_events(struct msm_kms *kms,
102 struct drm_mode_object *obj, u32 event, bool en);
Clarence Ipdd395242016-09-09 10:47:17 -0400103bool sde_is_custom_client(void)
104{
105 return sdecustom;
106}
107
Veera Sundaram Sankaran1fb97e72018-04-10 15:53:12 -0700108bool sde_kms_is_vbif_operation_allowed(struct sde_kms *sde_kms)
109{
110 struct drm_device *dev;
111 struct drm_crtc *crtc;
112 bool sui_enhancement = false;
113
114 if (!sde_kms || !sde_kms->dev)
115 return false;
116 dev = sde_kms->dev;
117
118 if (!sde_kms->catalog->sui_misr_supported)
119 return true;
120
121 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
122 if (!crtc->state || !crtc->state->active)
123 continue;
124
125 sui_enhancement |= sde_crtc_is_sui_enhancement_enabled(crtc);
126 }
127
128 if (!sui_enhancement)
129 return true;
130
131 return !sde_kms_is_secure_session_inprogress(sde_kms);
132}
133
Alan Kwongf0fd8512016-10-24 21:39:26 -0400134#ifdef CONFIG_DEBUG_FS
135static int _sde_danger_signal_status(struct seq_file *s,
136 bool danger_status)
137{
138 struct sde_kms *kms = (struct sde_kms *)s->private;
139 struct msm_drm_private *priv;
140 struct sde_danger_safe_status status;
141 int i;
Alan Kwong1124f1f2017-11-10 18:14:39 -0500142 int rc;
Alan Kwongf0fd8512016-10-24 21:39:26 -0400143
144 if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
145 SDE_ERROR("invalid arg(s)\n");
146 return 0;
147 }
148
149 priv = kms->dev->dev_private;
150 memset(&status, 0, sizeof(struct sde_danger_safe_status));
151
Alan Kwong1124f1f2017-11-10 18:14:39 -0500152 rc = sde_power_resource_enable(&priv->phandle, kms->core_client, true);
153 if (rc) {
154 SDE_ERROR("failed to enable power resource %d\n", rc);
155 SDE_EVT32(rc, SDE_EVTLOG_ERROR);
156 return rc;
157 }
158
Alan Kwongf0fd8512016-10-24 21:39:26 -0400159 if (danger_status) {
160 seq_puts(s, "\nDanger signal status:\n");
161 if (kms->hw_mdp->ops.get_danger_status)
162 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
163 &status);
164 } else {
165 seq_puts(s, "\nSafe signal status:\n");
166 if (kms->hw_mdp->ops.get_danger_status)
167 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
168 &status);
169 }
170 sde_power_resource_enable(&priv->phandle, kms->core_client, false);
171
172 seq_printf(s, "MDP : 0x%x\n", status.mdp);
173
174 for (i = SSPP_VIG0; i < SSPP_MAX; i++)
175 seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
176 status.sspp[i]);
177 seq_puts(s, "\n");
178
179 for (i = WB_0; i < WB_MAX; i++)
180 seq_printf(s, "WB%d : 0x%x \t", i - WB_0,
181 status.wb[i]);
182 seq_puts(s, "\n");
183
184 return 0;
185}
186
187#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
188static int __prefix ## _open(struct inode *inode, struct file *file) \
189{ \
190 return single_open(file, __prefix ## _show, inode->i_private); \
191} \
192static const struct file_operations __prefix ## _fops = { \
193 .owner = THIS_MODULE, \
194 .open = __prefix ## _open, \
195 .release = single_release, \
196 .read = seq_read, \
197 .llseek = seq_lseek, \
198}
199
200static int sde_debugfs_danger_stats_show(struct seq_file *s, void *v)
201{
202 return _sde_danger_signal_status(s, true);
203}
204DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_danger_stats);
205
206static int sde_debugfs_safe_stats_show(struct seq_file *s, void *v)
207{
208 return _sde_danger_signal_status(s, false);
209}
210DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_safe_stats);
211
212static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms)
213{
214 debugfs_remove_recursive(sde_kms->debugfs_danger);
215 sde_kms->debugfs_danger = NULL;
216}
217
218static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
219 struct dentry *parent)
220{
221 sde_kms->debugfs_danger = debugfs_create_dir("danger",
222 parent);
223 if (!sde_kms->debugfs_danger) {
224 SDE_ERROR("failed to create danger debugfs\n");
225 return -EINVAL;
226 }
227
Lloyd Atkinson8de415a2017-05-23 11:31:16 -0400228 debugfs_create_file("danger_status", 0600, sde_kms->debugfs_danger,
Alan Kwongf0fd8512016-10-24 21:39:26 -0400229 sde_kms, &sde_debugfs_danger_stats_fops);
Lloyd Atkinson8de415a2017-05-23 11:31:16 -0400230 debugfs_create_file("safe_status", 0600, sde_kms->debugfs_danger,
Alan Kwongf0fd8512016-10-24 21:39:26 -0400231 sde_kms, &sde_debugfs_safe_stats_fops);
232
233 return 0;
234}
235
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400236static int _sde_debugfs_show_regset32(struct seq_file *s, void *data)
Clarence Ip4ce59322016-06-26 22:27:51 -0400237{
Clarence Ipaac9f332016-08-31 15:46:35 -0400238 struct sde_debugfs_regset32 *regset;
239 struct sde_kms *sde_kms;
240 struct drm_device *dev;
241 struct msm_drm_private *priv;
Clarence Ip4ce59322016-06-26 22:27:51 -0400242 void __iomem *base;
Clarence Ipaac9f332016-08-31 15:46:35 -0400243 uint32_t i, addr;
Clarence Ip4ce59322016-06-26 22:27:51 -0400244
Clarence Ipaac9f332016-08-31 15:46:35 -0400245 if (!s || !s->private)
246 return 0;
Clarence Ip4ce59322016-06-26 22:27:51 -0400247
Clarence Ipaac9f332016-08-31 15:46:35 -0400248 regset = s->private;
249
250 sde_kms = regset->sde_kms;
251 if (!sde_kms || !sde_kms->mmio)
252 return 0;
253
254 dev = sde_kms->dev;
255 if (!dev)
256 return 0;
257
258 priv = dev->dev_private;
259 if (!priv)
260 return 0;
261
262 base = sde_kms->mmio + regset->offset;
263
264 /* insert padding spaces, if needed */
265 if (regset->offset & 0xF) {
266 seq_printf(s, "[%x]", regset->offset & ~0xF);
267 for (i = 0; i < (regset->offset & 0xF); i += 4)
268 seq_puts(s, " ");
269 }
270
271 if (sde_power_resource_enable(&priv->phandle,
272 sde_kms->core_client, true)) {
273 seq_puts(s, "failed to enable sde clocks\n");
274 return 0;
275 }
276
277 /* main register output */
278 for (i = 0; i < regset->blk_len; i += 4) {
279 addr = regset->offset + i;
280 if ((addr & 0xF) == 0x0)
281 seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
282 seq_printf(s, " %08x", readl_relaxed(base + i));
283 }
284 seq_puts(s, "\n");
285 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
Clarence Ip4ce59322016-06-26 22:27:51 -0400286
287 return 0;
288}
289
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400290static int sde_debugfs_open_regset32(struct inode *inode,
291 struct file *file)
Clarence Ip4ce59322016-06-26 22:27:51 -0400292{
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400293 return single_open(file, _sde_debugfs_show_regset32, inode->i_private);
Clarence Ip4ce59322016-06-26 22:27:51 -0400294}
295
296static const struct file_operations sde_fops_regset32 = {
297 .open = sde_debugfs_open_regset32,
298 .read = seq_read,
299 .llseek = seq_lseek,
300 .release = single_release,
301};
302
303void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
Clarence Ipaac9f332016-08-31 15:46:35 -0400304 uint32_t offset, uint32_t length, struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400305{
306 if (regset) {
307 regset->offset = offset;
308 regset->blk_len = length;
Clarence Ipaac9f332016-08-31 15:46:35 -0400309 regset->sde_kms = sde_kms;
Clarence Ip4ce59322016-06-26 22:27:51 -0400310 }
311}
312
313void *sde_debugfs_create_regset32(const char *name, umode_t mode,
314 void *parent, struct sde_debugfs_regset32 *regset)
315{
Clarence Ipaac9f332016-08-31 15:46:35 -0400316 if (!name || !regset || !regset->sde_kms || !regset->blk_len)
Clarence Ip4ce59322016-06-26 22:27:51 -0400317 return NULL;
318
319 /* make sure offset is a multiple of 4 */
320 regset->offset = round_down(regset->offset, 4);
321
322 return debugfs_create_file(name, mode, parent,
323 regset, &sde_fops_regset32);
324}
325
326void *sde_debugfs_get_root(struct sde_kms *sde_kms)
327{
Dhaval Patel6c666622017-03-21 23:02:59 -0700328 struct msm_drm_private *priv;
329
330 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
331 return NULL;
332
333 priv = sde_kms->dev->dev_private;
334 return priv->debug_root;
Clarence Ip4ce59322016-06-26 22:27:51 -0400335}
336
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400337static int _sde_debugfs_init(struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400338{
339 void *p;
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700340 int rc;
341 void *debugfs_root;
Clarence Ip4ce59322016-06-26 22:27:51 -0400342
343 p = sde_hw_util_get_log_mask_ptr();
344
345 if (!sde_kms || !p)
346 return -EINVAL;
347
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700348 debugfs_root = sde_debugfs_get_root(sde_kms);
349 if (!debugfs_root)
350 return -EINVAL;
Clarence Ip4ce59322016-06-26 22:27:51 -0400351
352 /* allow debugfs_root to be NULL */
Lloyd Atkinson8de415a2017-05-23 11:31:16 -0400353 debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0600, debugfs_root, p);
Alan Kwongcd1c09f2016-11-04 20:37:30 -0400354
Lloyd Atkinson09e64bf2017-04-13 14:09:59 -0700355 (void) sde_debugfs_danger_init(sde_kms, debugfs_root);
356 (void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
357 (void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
Alan Kwongcd1c09f2016-11-04 20:37:30 -0400358
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700359 rc = sde_core_perf_debugfs_init(&sde_kms->perf, debugfs_root);
360 if (rc) {
361 SDE_ERROR("failed to init perf %d\n", rc);
362 return rc;
363 }
Alan Kwongf0fd8512016-10-24 21:39:26 -0400364
Clarence Ip4ce59322016-06-26 22:27:51 -0400365 return 0;
366}
367
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400368static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400369{
370 /* don't need to NULL check debugfs_root */
371 if (sde_kms) {
Alan Kwong748e833d2016-10-26 12:34:48 -0400372 sde_debugfs_vbif_destroy(sde_kms);
Alan Kwongf0fd8512016-10-24 21:39:26 -0400373 sde_debugfs_danger_destroy(sde_kms);
Lloyd Atkinson09e64bf2017-04-13 14:09:59 -0700374 sde_debugfs_core_irq_destroy(sde_kms);
Clarence Ip4ce59322016-06-26 22:27:51 -0400375 }
376}
Alan Kwongf0fd8512016-10-24 21:39:26 -0400377#else
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700378static int _sde_debugfs_init(struct sde_kms *sde_kms)
379{
380 return 0;
381}
382
383static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
384{
Alan Kwongf0fd8512016-10-24 21:39:26 -0400385}
386#endif
Clarence Ip4ce59322016-06-26 22:27:51 -0400387
Alan Kwongf5dd86c2016-08-09 18:08:17 -0400388static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
389{
390 return sde_crtc_vblank(crtc, true);
391}
392
393static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
394{
395 sde_crtc_vblank(crtc, false);
396}
397
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700398static void sde_kms_wait_for_frame_transfer_complete(struct msm_kms *kms,
Jeykumar Sankarandfaeec92017-06-06 15:21:51 -0700399 struct drm_crtc *crtc)
400{
401 struct drm_encoder *encoder;
402 struct drm_device *dev;
403 int ret;
404
405 if (!kms || !crtc || !crtc->state || !crtc->dev) {
406 SDE_ERROR("invalid params\n");
407 return;
408 }
409
410 if (!crtc->state->enable) {
411 SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
412 return;
413 }
414
415 if (!crtc->state->active) {
416 SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
417 return;
418 }
419
420 dev = crtc->dev;
421
422 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
423 if (encoder->crtc != crtc)
424 continue;
425 /*
426 * Video Mode - Wait for VSYNC
427 * Cmd Mode - Wait for PP_DONE. Will be no-op if transfer is
428 * complete
429 */
430 SDE_EVT32_VERBOSE(DRMID(crtc));
431 ret = sde_encoder_wait_for_event(encoder, MSM_ENC_TX_COMPLETE);
432 if (ret && ret != -EWOULDBLOCK) {
433 SDE_ERROR(
434 "[crtc: %d][enc: %d] wait for commit done returned %d\n",
435 crtc->base.id, encoder->base.id, ret);
436 break;
437 }
438 }
439}
440
Veera Sundaram Sankaran5616c9b2018-03-07 14:09:17 -0800441static int _sde_kms_secure_ctrl_xin_clients(struct sde_kms *sde_kms,
442 struct drm_crtc *crtc, bool enable)
443{
444 struct drm_device *dev;
445 struct msm_drm_private *priv;
446 struct sde_mdss_cfg *sde_cfg;
447 struct drm_plane *plane;
448 int i, ret;
449
450 dev = sde_kms->dev;
451 priv = dev->dev_private;
452 sde_cfg = sde_kms->catalog;
453
454 ret = sde_vbif_halt_xin_mask(sde_kms,
455 sde_cfg->sui_block_xin_mask, enable);
456 if (ret) {
457 SDE_ERROR("failed to halt some xin-clients, ret:%d\n", ret);
458 return ret;
459 }
460
461 if (enable) {
462 for (i = 0; i < priv->num_planes; i++) {
463 plane = priv->planes[i];
464 sde_plane_secure_ctrl_xin_client(plane, crtc);
465 }
466 }
467
468 return 0;
469}
470
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800471/**
472 * _sde_kms_scm_call - makes secure channel call to switch the VMIDs
473 * @vimd: switch the stage 2 translation to this VMID.
474 */
475static int _sde_kms_scm_call(int vmid)
476{
477 struct scm_desc desc = {0};
478 uint32_t num_sids;
479 uint32_t *sec_sid;
480 uint32_t mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_SWITCH;
481 int ret = 0;
482
483 /* This info should be queried from catalog */
484 num_sids = SEC_SID_CNT;
485 sec_sid = kcalloc(num_sids, sizeof(uint32_t), GFP_KERNEL);
486 if (!sec_sid)
487 return -ENOMEM;
488
489 /*
490 * derive this info from device tree/catalog, this is combination of
491 * smr mask and SID for secure
492 */
493 sec_sid[0] = SEC_SID_MASK_0;
494 sec_sid[1] = SEC_SID_MASK_1;
495 dmac_flush_range(sec_sid, sec_sid + num_sids);
496
497 SDE_DEBUG("calling scm_call for vmid %d", vmid);
498
499 desc.arginfo = SCM_ARGS(4, SCM_VAL, SCM_RW, SCM_VAL, SCM_VAL);
500 desc.args[0] = MDP_DEVICE_ID;
501 desc.args[1] = SCM_BUFFER_PHYS(sec_sid);
502 desc.args[2] = sizeof(uint32_t) * num_sids;
503 desc.args[3] = vmid;
504
505 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
506 mem_protect_sd_ctrl_id), &desc);
507 if (ret)
508 SDE_ERROR("Error:scm_call2, vmid (%lld): ret%d\n",
509 desc.args[3], ret);
510 SDE_EVT32(mem_protect_sd_ctrl_id,
511 desc.args[0], desc.args[3], num_sids,
512 sec_sid[0], sec_sid[1], ret);
513
514 kfree(sec_sid);
515 return ret;
516}
517
518static int _sde_kms_detach_all_cb(struct sde_kms *sde_kms)
519{
520 u32 ret = 0;
521
522 if (atomic_inc_return(&sde_kms->detach_all_cb) > 1)
523 goto end;
524
525 /* detach_all_contexts */
526 ret = sde_kms_mmu_detach(sde_kms, false);
527 if (ret) {
528 SDE_ERROR("failed to detach all cb ret:%d\n", ret);
529 goto end;
530 }
531
532 ret = _sde_kms_scm_call(VMID_CP_SEC_DISPLAY);
533 if (ret)
534 goto end;
535
536end:
537 return ret;
538}
539
540static int _sde_kms_attach_all_cb(struct sde_kms *sde_kms)
541{
542 u32 ret = 0;
543
544 if (atomic_dec_return(&sde_kms->detach_all_cb) != 0)
545 goto end;
546
547 ret = _sde_kms_scm_call(VMID_CP_PIXEL);
548 if (ret)
549 goto end;
550
551 /* attach_all_contexts */
552 ret = sde_kms_mmu_attach(sde_kms, false);
553 if (ret) {
554 SDE_ERROR("failed to attach all cb ret:%d\n", ret);
555 goto end;
556 }
557
558end:
559 return ret;
560}
561
562static int _sde_kms_detach_sec_cb(struct sde_kms *sde_kms)
563{
564 u32 ret = 0;
565
566 if (atomic_inc_return(&sde_kms->detach_sec_cb) > 1)
567 goto end;
568
569 /* detach secure_context */
570 ret = sde_kms_mmu_detach(sde_kms, true);
571 if (ret) {
572 SDE_ERROR("failed to detach sec cb ret:%d\n", ret);
573 goto end;
574 }
575
576 ret = _sde_kms_scm_call(VMID_CP_CAMERA_PREVIEW);
577 if (ret)
578 goto end;
579
580end:
581 return ret;
582}
583
584static int _sde_kms_attach_sec_cb(struct sde_kms *sde_kms)
585{
586 u32 ret = 0;
587
588 if (atomic_dec_return(&sde_kms->detach_sec_cb) != 0)
589 goto end;
590
591 ret = _sde_kms_scm_call(VMID_CP_PIXEL);
592 if (ret)
593 goto end;
594
595 ret = sde_kms_mmu_attach(sde_kms, true);
596 if (ret) {
597 SDE_ERROR("failed to attach sec cb ret:%d\n", ret);
598 goto end;
599 }
600
601end:
602 return ret;
603}
604
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800605static int _sde_kms_sui_misr_ctrl(struct sde_kms *sde_kms,
606 struct drm_crtc *crtc, bool enable)
607{
608 struct drm_device *dev = sde_kms->dev;
609 struct msm_drm_private *priv = dev->dev_private;
610 int ret;
611
612 if (enable) {
613 ret = sde_power_resource_enable(&priv->phandle,
614 sde_kms->core_client, true);
615 if (ret) {
616 SDE_ERROR("failed to enable resource, ret:%d\n", ret);
617 return ret;
618 }
Veera Sundaram Sankaran5616c9b2018-03-07 14:09:17 -0800619
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800620 sde_crtc_misr_setup(crtc, true, 1);
621
Veera Sundaram Sankaran5616c9b2018-03-07 14:09:17 -0800622 ret = _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, true);
623 if (ret) {
624 sde_power_resource_enable(&priv->phandle,
625 sde_kms->core_client, false);
626 return ret;
627 }
628
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800629 } else {
Veera Sundaram Sankaran5616c9b2018-03-07 14:09:17 -0800630 _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, false);
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800631 sde_crtc_misr_setup(crtc, false, 0);
632 sde_power_resource_enable(&priv->phandle,
633 sde_kms->core_client, false);
634 }
635
636 return 0;
637}
638
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800639static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
640 bool post_commit)
641{
642 struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
643 int old_smmu_state = smmu_state->state;
644 int ret = 0;
645
646 if (!sde_kms || !crtc) {
647 SDE_ERROR("invalid argument(s)\n");
648 return -EINVAL;
649 }
650
651 SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->transition_type,
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800652 post_commit, smmu_state->sui_misr_state,
653 SDE_EVTLOG_FUNC_ENTRY);
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800654
655 if ((!smmu_state->transition_type) ||
656 ((smmu_state->transition_type == POST_COMMIT) && !post_commit))
657 /* Bail out */
658 return 0;
659
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800660 /* enable sui misr if requested, before the transition */
661 if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ) {
662 ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, true);
663 if (ret)
664 goto end;
665 }
666
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800667 mutex_lock(&sde_kms->secure_transition_lock);
668 switch (smmu_state->state) {
669 /* Secure UI use case enable */
670 case DETACH_ALL_REQ:
671 ret = _sde_kms_detach_all_cb(sde_kms);
672 if (!ret)
673 smmu_state->state = DETACHED;
674 break;
675
676 /* Secure UI use case disable */
677 case ATTACH_ALL_REQ:
678 ret = _sde_kms_attach_all_cb(sde_kms);
679 if (!ret)
680 smmu_state->state = ATTACHED;
681 break;
682
683 /* Secure preview enable */
684 case DETACH_SEC_REQ:
685 ret = _sde_kms_detach_sec_cb(sde_kms);
686 if (!ret)
687 smmu_state->state = DETACHED_SEC;
688 break;
689
690 /* Secure preview disable */
691 case ATTACH_SEC_REQ:
692 ret = _sde_kms_attach_sec_cb(sde_kms);
693 if (!ret)
694 smmu_state->state = ATTACHED;
695 break;
696
697 default:
698 SDE_ERROR("crtc:%d invalid smmu state:%d transition type:%d\n",
699 DRMID(crtc), smmu_state->state,
700 smmu_state->transition_type);
701 ret = -EINVAL;
702 break;
703 }
704 mutex_unlock(&sde_kms->secure_transition_lock);
705
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800706 /* disable sui misr if requested, after the transition */
707 if (!ret && (smmu_state->sui_misr_state == SUI_MISR_DISABLE_REQ)) {
708 ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
709 if (ret)
710 goto end;
711 }
712
713end:
714 smmu_state->sui_misr_state = NONE;
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800715 smmu_state->transition_type = NONE;
716 smmu_state->transition_error = ret ? true : false;
717
718 SDE_DEBUG("crtc:%d, old_state %d new_state %d, ret %d\n",
719 DRMID(crtc), old_smmu_state, smmu_state->state, ret);
720 SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->transition_type,
721 smmu_state->transition_error, ret,
722 SDE_EVTLOG_FUNC_EXIT);
723
724 return ret;
725}
726
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700727static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
728 struct drm_atomic_state *state)
729{
730 struct drm_crtc *crtc;
731 struct drm_crtc_state *old_crtc_state;
732
733 struct drm_plane *plane;
734 struct drm_plane_state *plane_state;
735 struct sde_kms *sde_kms = to_sde_kms(kms);
736 struct drm_device *dev = sde_kms->dev;
737 int i, ops = 0, ret = 0;
738 bool old_valid_fb = false;
739
740 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
741 if (!crtc->state || !crtc->state->active)
742 continue;
743 /*
744 * It is safe to assume only one active crtc,
745 * and compatible translation modes on the
746 * planes staged on this crtc.
747 * otherwise validation would have failed.
748 * For this CRTC,
749 */
750
751 /*
752 * 1. Check if old state on the CRTC has planes
753 * staged with valid fbs
754 */
755 for_each_plane_in_state(state, plane, plane_state, i) {
756 if (!plane_state->crtc)
757 continue;
758 if (plane_state->fb) {
759 old_valid_fb = true;
760 break;
761 }
762 }
763
764 /*
765 * 2.Get the operations needed to be performed before
766 * secure transition can be initiated.
767 */
768 ops = sde_crtc_get_secure_transition_ops(crtc,
Veera Sundaram Sankaranfd792402017-10-13 12:50:41 -0700769 old_crtc_state, old_valid_fb);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700770 if (ops < 0) {
771 SDE_ERROR("invalid secure operations %x\n", ops);
772 return ops;
773 }
774
775 if (!ops)
776 goto no_ops;
777
778 SDE_DEBUG("%d:secure operations(%x) started on state:%pK\n",
Veera Sundaram Sankaranfd792402017-10-13 12:50:41 -0700779 crtc->base.id, ops, crtc->state);
780 SDE_EVT32(DRMID(crtc), ops, crtc->state, old_valid_fb);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700781
782 /* 3. Perform operations needed for secure transition */
783 if (ops & SDE_KMS_OPS_WAIT_FOR_TX_DONE) {
784 SDE_DEBUG("wait_for_transfer_done\n");
785 sde_kms_wait_for_frame_transfer_complete(kms, crtc);
786 }
787 if (ops & SDE_KMS_OPS_CLEANUP_PLANE_FB) {
788 SDE_DEBUG("cleanup planes\n");
789 drm_atomic_helper_cleanup_planes(dev, state);
790 }
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800791 if (ops & SDE_KMS_OPS_SECURE_STATE_CHANGE) {
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700792 SDE_DEBUG("secure ctrl\n");
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800793 _sde_kms_secure_ctrl(sde_kms, crtc, false);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700794 }
795 if (ops & SDE_KMS_OPS_PREPARE_PLANE_FB) {
796 SDE_DEBUG("prepare planes %d",
797 crtc->state->plane_mask);
798 drm_atomic_crtc_for_each_plane(plane,
799 crtc) {
800 const struct drm_plane_helper_funcs *funcs;
801
802 plane_state = plane->state;
803 funcs = plane->helper_private;
804
805 SDE_DEBUG("psde:%d FB[%u]\n",
806 plane->base.id,
807 plane->fb->base.id);
808 if (!funcs)
809 continue;
810
811 if (funcs->prepare_fb(plane, plane_state)) {
812 ret = funcs->prepare_fb(plane,
813 plane_state);
814 if (ret)
815 return ret;
816 }
817 }
818 }
Veera Sundaram Sankaranfd792402017-10-13 12:50:41 -0700819 SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700820 SDE_DEBUG("secure operations completed\n");
821 }
822
823no_ops:
824 return 0;
825}
826
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -0700827static int _sde_kms_release_splash_buffer(unsigned int mem_addr,
828 unsigned int size)
829{
830 unsigned long pfn_start, pfn_end, pfn_idx;
831 int ret = 0;
832
833 if (!mem_addr || !size)
834 SDE_ERROR("invalid params\n");
835
836 pfn_start = mem_addr >> PAGE_SHIFT;
837 pfn_end = (mem_addr + size) >> PAGE_SHIFT;
838
839 ret = memblock_free(mem_addr, size);
840 if (ret) {
841 SDE_ERROR("continuous splash memory free failed:%d\n", ret);
842 return ret;
843 }
844 for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
845 free_reserved_page(pfn_to_page(pfn_idx));
846
847 return ret;
848
849}
850
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -0800851static int _sde_kms_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
852 struct sde_splash_data *data)
853{
854 int ret = 0;
855
856 if (!mmu || !data)
857 return -EINVAL;
858
859 ret = mmu->funcs->one_to_one_map(mmu, data->splash_base,
860 data->splash_base, data->splash_size,
861 IOMMU_READ | IOMMU_NOEXEC);
862 if (ret)
863 SDE_ERROR("Splash smmu map failed: %d\n", ret);
864
865 return ret;
866}
867
868static int _sde_kms_splash_smmu_unmap(struct sde_kms *sde_kms)
869{
870 struct sde_splash_data *data;
871 struct msm_mmu *mmu;
872 int rc = 0;
873
874 if (!sde_kms)
875 return -EINVAL;
876
877 data = &sde_kms->splash_data;
878 if (!data) {
879 SDE_ERROR("Invalid splash data\n");
880 return -EINVAL;
881 }
882
883 if (!sde_kms->aspace[0]) {
884 SDE_ERROR("aspace not found for sde kms node\n");
885 return -EINVAL;
886 }
887
888 mmu = sde_kms->aspace[0]->mmu;
889 if (!mmu) {
890 SDE_ERROR("mmu not found for aspace\n");
891 return -EINVAL;
892 }
893
894 if (mmu->funcs && mmu->funcs->one_to_one_unmap)
895 mmu->funcs->one_to_one_unmap(mmu, data->splash_base,
896 data->splash_size);
897
898 return rc;
899}
900
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700901static void sde_kms_prepare_commit(struct msm_kms *kms,
902 struct drm_atomic_state *state)
903{
904 struct sde_kms *sde_kms;
905 struct msm_drm_private *priv;
906 struct drm_device *dev;
907 struct drm_encoder *encoder;
Alan Kwong12def592017-10-26 17:48:35 -0400908 struct drm_crtc *crtc;
909 struct drm_crtc_state *crtc_state;
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -0700910 int i, rc = 0;
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700911
912 if (!kms)
913 return;
914 sde_kms = to_sde_kms(kms);
915 dev = sde_kms->dev;
916
917 if (!dev || !dev->dev_private)
918 return;
919 priv = dev->dev_private;
920
Alan Kwong1124f1f2017-11-10 18:14:39 -0500921 rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
922 true);
923 if (rc) {
924 SDE_ERROR("failed to enable power resource %d\n", rc);
925 SDE_EVT32(rc, SDE_EVTLOG_ERROR);
926 return;
927 }
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700928
Alan Kwong12def592017-10-26 17:48:35 -0400929 for_each_crtc_in_state(state, crtc, crtc_state, i) {
930 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
931 head) {
932 if (encoder->crtc != crtc)
933 continue;
934
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700935 sde_encoder_prepare_commit(encoder);
Alan Kwong12def592017-10-26 17:48:35 -0400936 }
937 }
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700938
939 /*
940 * NOTE: for secure use cases we want to apply the new HW
941 * configuration only after completing preparation for secure
942 * transitions prepare below if any transtions is required.
943 */
944 sde_kms_prepare_secure_transition(kms, state);
945}
946
947static void sde_kms_commit(struct msm_kms *kms,
948 struct drm_atomic_state *old_state)
949{
Alan Kwong1124f1f2017-11-10 18:14:39 -0500950 struct sde_kms *sde_kms;
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700951 struct drm_crtc *crtc;
952 struct drm_crtc_state *old_crtc_state;
953 int i;
954
Alan Kwong1124f1f2017-11-10 18:14:39 -0500955 if (!kms || !old_state)
956 return;
957 sde_kms = to_sde_kms(kms);
958
959 if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
960 SDE_ERROR("power resource is not enabled\n");
961 return;
962 }
963
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700964 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
965 if (crtc->state->active) {
966 SDE_EVT32(DRMID(crtc));
Clarence Ip569d5af2017-10-14 21:09:01 -0400967 sde_crtc_commit_kickoff(crtc, old_crtc_state);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700968 }
969 }
970}
971
Gopikrishnaiah Anandanb38d3292018-02-28 19:25:15 -0800972static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms,
973 struct drm_atomic_state *old_state)
974{
975 struct drm_crtc *crtc;
976 struct drm_crtc_state *crtc_state;
977 bool primary_crtc_active = false;
978 struct msm_drm_private *priv;
979 int i, rc = 0;
980
981 priv = sde_kms->dev->dev_private;
982
983 if (!sde_kms->splash_data.resource_handoff_pending)
984 return;
985
986 SDE_EVT32(SDE_EVTLOG_FUNC_CASE1);
987 for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
988 if (crtc->state->active)
989 primary_crtc_active = true;
990 SDE_EVT32(crtc->base.id, crtc->state->active);
991 }
992
993 if (!primary_crtc_active) {
994 SDE_EVT32(SDE_EVTLOG_FUNC_CASE2);
995 return;
996 }
997
998 sde_kms->splash_data.resource_handoff_pending = false;
999
1000 if (sde_kms->splash_data.cont_splash_en) {
1001 SDE_DEBUG("disabling cont_splash feature\n");
1002 sde_kms->splash_data.cont_splash_en = false;
1003
1004 for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
1005 sde_power_data_bus_set_quota(&priv->phandle,
1006 sde_kms->core_client,
1007 SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i,
1008 SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
1009 SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
1010
1011 sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
1012 false);
1013 }
1014
1015 if (sde_kms->splash_data.splash_base) {
1016 _sde_kms_splash_smmu_unmap(sde_kms);
1017
1018 rc = _sde_kms_release_splash_buffer(
1019 sde_kms->splash_data.splash_base,
1020 sde_kms->splash_data.splash_size);
1021 if (rc)
1022 pr_err("failed to release splash memory\n");
1023 sde_kms->splash_data.splash_base = 0;
1024 sde_kms->splash_data.splash_size = 0;
1025 }
1026}
1027
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07001028static void sde_kms_complete_commit(struct msm_kms *kms,
1029 struct drm_atomic_state *old_state)
1030{
1031 struct sde_kms *sde_kms;
1032 struct msm_drm_private *priv;
1033 struct drm_crtc *crtc;
1034 struct drm_crtc_state *old_crtc_state;
Raviteja Tamatam68892de2017-06-20 04:47:19 +05301035 struct drm_connector *connector;
1036 struct drm_connector_state *old_conn_state;
1037 int i, rc = 0;
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07001038
1039 if (!kms || !old_state)
1040 return;
1041 sde_kms = to_sde_kms(kms);
1042
1043 if (!sde_kms->dev || !sde_kms->dev->dev_private)
1044 return;
1045 priv = sde_kms->dev->dev_private;
1046
Alan Kwong1124f1f2017-11-10 18:14:39 -05001047 if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
1048 SDE_ERROR("power resource is not enabled\n");
1049 return;
1050 }
1051
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -08001052 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07001053 sde_crtc_complete_commit(crtc, old_crtc_state);
1054
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -08001055 /* complete secure transitions if any */
1056 if (sde_kms->smmu_state.transition_type == POST_COMMIT)
1057 _sde_kms_secure_ctrl(sde_kms, crtc, true);
1058 }
1059
Raviteja Tamatam68892de2017-06-20 04:47:19 +05301060 for_each_connector_in_state(old_state, connector, old_conn_state, i) {
1061 struct sde_connector *c_conn;
1062
1063 c_conn = to_sde_connector(connector);
1064 if (!c_conn->ops.post_kickoff)
1065 continue;
1066 rc = c_conn->ops.post_kickoff(connector);
1067 if (rc) {
1068 pr_err("Connector Post kickoff failed rc=%d\n",
1069 rc);
1070 }
1071 }
1072
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07001073 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
1074
Gopikrishnaiah Anandanb38d3292018-02-28 19:25:15 -08001075 _sde_kms_release_splash_resource(sde_kms, old_state);
1076
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07001077 SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
1078}
1079
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001080static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
Abhijit Kulkarni40e38162016-06-26 22:12:09 -04001081 struct drm_crtc *crtc)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001082{
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001083 struct drm_encoder *encoder;
Narendra Muppallaec11a0a2017-06-15 15:35:17 -07001084 struct drm_device *dev;
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001085 int ret;
1086
Alan Kwongf34ef982016-09-29 20:53:53 -04001087 if (!kms || !crtc || !crtc->state) {
1088 SDE_ERROR("invalid params\n");
1089 return;
1090 }
1091
Narendra Muppallaec11a0a2017-06-15 15:35:17 -07001092 dev = crtc->dev;
1093
Alan Kwongf34ef982016-09-29 20:53:53 -04001094 if (!crtc->state->enable) {
1095 SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
1096 return;
1097 }
1098
1099 if (!crtc->state->active) {
1100 SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
1101 return;
1102 }
1103
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001104 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1105 if (encoder->crtc != crtc)
1106 continue;
1107 /*
Dhaval Patel6c666622017-03-21 23:02:59 -07001108 * Wait for post-flush if necessary to delay before
1109 * plane_cleanup. For example, wait for vsync in case of video
1110 * mode panels. This may be a no-op for command mode panels.
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001111 */
Dhaval Patel6c666622017-03-21 23:02:59 -07001112 SDE_EVT32_VERBOSE(DRMID(crtc));
Jeykumar Sankarandfaeec92017-06-06 15:21:51 -07001113 ret = sde_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001114 if (ret && ret != -EWOULDBLOCK) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001115 SDE_ERROR("wait for commit done returned %d\n", ret);
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001116 break;
1117 }
1118 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001119}
Lloyd Atkinson5d722782016-05-30 14:09:41 -04001120
Clarence Ip24f80662016-06-13 19:05:32 -04001121static void sde_kms_prepare_fence(struct msm_kms *kms,
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001122 struct drm_atomic_state *old_state)
Clarence Ip24f80662016-06-13 19:05:32 -04001123{
1124 struct drm_crtc *crtc;
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001125 struct drm_crtc_state *old_crtc_state;
1126 int i, rc;
Clarence Ip24f80662016-06-13 19:05:32 -04001127
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001128 if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
1129 SDE_ERROR("invalid argument(s)\n");
1130 return;
1131 }
1132
1133retry:
1134 /* attempt to acquire ww mutex for connection */
1135 rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
1136 old_state->acquire_ctx);
1137
1138 if (rc == -EDEADLK) {
1139 drm_modeset_backoff(old_state->acquire_ctx);
1140 goto retry;
1141 }
1142
1143 /* old_state actually contains updated crtc pointers */
Veera Sundaram Sankaran675ff622017-06-21 21:44:46 -07001144 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1145 if (crtc->state->active)
1146 sde_crtc_prepare_commit(crtc, old_crtc_state);
1147 }
Clarence Ip24f80662016-06-13 19:05:32 -04001148}
1149
Clarence Ip3649f8b2016-10-31 09:59:44 -04001150/**
1151 * _sde_kms_get_displays - query for underlying display handles and cache them
1152 * @sde_kms: Pointer to sde kms structure
1153 * Returns: Zero on success
1154 */
1155static int _sde_kms_get_displays(struct sde_kms *sde_kms)
1156{
1157 int rc = -ENOMEM;
1158
1159 if (!sde_kms) {
1160 SDE_ERROR("invalid sde kms\n");
1161 return -EINVAL;
1162 }
1163
1164 /* dsi */
1165 sde_kms->dsi_displays = NULL;
1166 sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
1167 if (sde_kms->dsi_display_count) {
1168 sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
1169 sizeof(void *),
1170 GFP_KERNEL);
1171 if (!sde_kms->dsi_displays) {
1172 SDE_ERROR("failed to allocate dsi displays\n");
1173 goto exit_deinit_dsi;
1174 }
1175 sde_kms->dsi_display_count =
1176 dsi_display_get_active_displays(sde_kms->dsi_displays,
1177 sde_kms->dsi_display_count);
1178 }
1179
1180 /* wb */
1181 sde_kms->wb_displays = NULL;
1182 sde_kms->wb_display_count = sde_wb_get_num_of_displays();
1183 if (sde_kms->wb_display_count) {
1184 sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
1185 sizeof(void *),
1186 GFP_KERNEL);
1187 if (!sde_kms->wb_displays) {
1188 SDE_ERROR("failed to allocate wb displays\n");
1189 goto exit_deinit_wb;
1190 }
1191 sde_kms->wb_display_count =
1192 wb_display_get_displays(sde_kms->wb_displays,
1193 sde_kms->wb_display_count);
1194 }
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001195
1196 /* dp */
1197 sde_kms->dp_displays = NULL;
1198 sde_kms->dp_display_count = dp_display_get_num_of_displays();
1199 if (sde_kms->dp_display_count) {
1200 sde_kms->dp_displays = kcalloc(sde_kms->dp_display_count,
1201 sizeof(void *), GFP_KERNEL);
1202 if (!sde_kms->dp_displays) {
1203 SDE_ERROR("failed to allocate dp displays\n");
1204 goto exit_deinit_dp;
1205 }
1206 sde_kms->dp_display_count =
1207 dp_display_get_displays(sde_kms->dp_displays,
1208 sde_kms->dp_display_count);
1209 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001210 return 0;
1211
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001212exit_deinit_dp:
1213 kfree(sde_kms->dp_displays);
1214 sde_kms->dp_display_count = 0;
1215 sde_kms->dp_displays = NULL;
1216
Clarence Ip3649f8b2016-10-31 09:59:44 -04001217exit_deinit_wb:
1218 kfree(sde_kms->wb_displays);
1219 sde_kms->wb_display_count = 0;
1220 sde_kms->wb_displays = NULL;
1221
1222exit_deinit_dsi:
1223 kfree(sde_kms->dsi_displays);
1224 sde_kms->dsi_display_count = 0;
1225 sde_kms->dsi_displays = NULL;
1226 return rc;
1227}
1228
1229/**
1230 * _sde_kms_release_displays - release cache of underlying display handles
1231 * @sde_kms: Pointer to sde kms structure
1232 */
1233static void _sde_kms_release_displays(struct sde_kms *sde_kms)
1234{
1235 if (!sde_kms) {
1236 SDE_ERROR("invalid sde kms\n");
1237 return;
1238 }
1239
1240 kfree(sde_kms->wb_displays);
1241 sde_kms->wb_displays = NULL;
1242 sde_kms->wb_display_count = 0;
1243
1244 kfree(sde_kms->dsi_displays);
1245 sde_kms->dsi_displays = NULL;
1246 sde_kms->dsi_display_count = 0;
1247}
1248
1249/**
1250 * _sde_kms_setup_displays - create encoders, bridges and connectors
1251 * for underlying displays
1252 * @dev: Pointer to drm device structure
1253 * @priv: Pointer to private drm device data
1254 * @sde_kms: Pointer to sde kms structure
1255 * Returns: Zero on success
1256 */
1257static int _sde_kms_setup_displays(struct drm_device *dev,
1258 struct msm_drm_private *priv,
1259 struct sde_kms *sde_kms)
1260{
1261 static const struct sde_connector_ops dsi_ops = {
Alan Kwong769fba92017-11-13 16:50:36 -05001262 .set_info_blob = dsi_conn_set_info_blob,
Clarence Ip3649f8b2016-10-31 09:59:44 -04001263 .detect = dsi_conn_detect,
1264 .get_modes = dsi_connector_get_modes,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -07001265 .put_modes = dsi_connector_put_modes,
Clarence Ip3649f8b2016-10-31 09:59:44 -04001266 .mode_valid = dsi_conn_mode_valid,
1267 .get_info = dsi_display_get_info,
Lloyd Atkinson8c49c582016-11-18 14:23:54 -05001268 .set_backlight = dsi_display_set_backlight,
Lloyd Atkinson05d75512017-01-17 14:45:51 -05001269 .soft_reset = dsi_display_soft_reset,
Veera Sundaram Sankaranbb2bf9a2017-03-29 18:56:47 -07001270 .pre_kickoff = dsi_conn_pre_kickoff,
Jeykumar Sankaran2b098072017-03-16 17:25:59 -07001271 .clk_ctrl = dsi_display_clk_ctrl,
Clarence Ipd57b0622017-07-10 11:28:57 -04001272 .set_power = dsi_display_set_power,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -07001273 .get_mode_info = dsi_conn_get_mode_info,
1274 .get_dst_format = dsi_display_get_dst_format,
Sandeep Panda98d6ab22017-09-05 08:03:16 +05301275 .post_kickoff = dsi_conn_post_kickoff,
1276 .check_status = dsi_display_check_status,
Govinda Rajulu Chennab95b9c32017-10-13 15:00:32 -04001277 .enable_event = dsi_conn_enable_event,
1278 .cmd_transfer = dsi_display_cmd_transfer,
Sandeep Panda8693e8f2018-03-08 08:16:44 +05301279 .cont_splash_config = dsi_display_cont_splash_config,
Kalyan Thota6a9f3b72018-01-18 18:00:02 +05301280 .get_panel_vfp = dsi_display_get_panel_vfp,
Clarence Ip3649f8b2016-10-31 09:59:44 -04001281 };
1282 static const struct sde_connector_ops wb_ops = {
1283 .post_init = sde_wb_connector_post_init,
Alan Kwong769fba92017-11-13 16:50:36 -05001284 .set_info_blob = sde_wb_connector_set_info_blob,
Clarence Ip3649f8b2016-10-31 09:59:44 -04001285 .detect = sde_wb_connector_detect,
1286 .get_modes = sde_wb_connector_get_modes,
1287 .set_property = sde_wb_connector_set_property,
1288 .get_info = sde_wb_get_info,
Jeykumar Sankaran2b098072017-03-16 17:25:59 -07001289 .soft_reset = NULL,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -07001290 .get_mode_info = sde_wb_get_mode_info,
Sandeep Panda98d6ab22017-09-05 08:03:16 +05301291 .get_dst_format = NULL,
1292 .check_status = NULL,
Govinda Rajulu Chennab95b9c32017-10-13 15:00:32 -04001293 .cmd_transfer = NULL,
Sandeep Panda8693e8f2018-03-08 08:16:44 +05301294 .cont_splash_config = NULL,
Kalyan Thota6a9f3b72018-01-18 18:00:02 +05301295 .get_panel_vfp = NULL,
Clarence Ip3649f8b2016-10-31 09:59:44 -04001296 };
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001297 static const struct sde_connector_ops dp_ops = {
1298 .post_init = dp_connector_post_init,
1299 .detect = dp_connector_detect,
1300 .get_modes = dp_connector_get_modes,
1301 .mode_valid = dp_connector_mode_valid,
1302 .get_info = dp_connector_get_info,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -07001303 .get_mode_info = dp_connector_get_mode_info,
Ajay Singh Parmar315e5852017-11-23 21:47:32 -08001304 .post_open = dp_connector_post_open,
Sandeep Panda98d6ab22017-09-05 08:03:16 +05301305 .check_status = NULL,
Ajay Singh Parmar87af50b2017-12-22 22:22:55 -08001306 .config_hdr = dp_connector_config_hdr,
Govinda Rajulu Chennab95b9c32017-10-13 15:00:32 -04001307 .cmd_transfer = NULL,
Sandeep Panda8693e8f2018-03-08 08:16:44 +05301308 .cont_splash_config = NULL,
Kalyan Thota6a9f3b72018-01-18 18:00:02 +05301309 .get_panel_vfp = NULL,
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001310 };
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001311 static const struct sde_connector_ops ext_bridge_ops = {
1312 .set_info_blob = dsi_conn_set_info_blob,
1313 .mode_valid = dsi_conn_mode_valid,
1314 .get_info = dsi_display_ext_bridge_get_info,
1315 .soft_reset = dsi_display_soft_reset,
1316 .clk_ctrl = dsi_display_clk_ctrl,
1317 .get_mode_info = dsi_conn_ext_bridge_get_mode_info,
1318 .get_dst_format = dsi_display_get_dst_format,
1319 .enable_event = dsi_conn_enable_event,
1320 .cmd_transfer = NULL,
Sandeep Panda8693e8f2018-03-08 08:16:44 +05301321 .cont_splash_config = NULL,
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001322 };
Clarence Ip3649f8b2016-10-31 09:59:44 -04001323 struct msm_display_info info;
1324 struct drm_encoder *encoder;
1325 void *display, *connector;
1326 int i, max_encoders;
1327 int rc = 0;
1328
1329 if (!dev || !priv || !sde_kms) {
1330 SDE_ERROR("invalid argument(s)\n");
1331 return -EINVAL;
1332 }
1333
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001334 max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count +
1335 sde_kms->dp_display_count;
Clarence Ip3649f8b2016-10-31 09:59:44 -04001336 if (max_encoders > ARRAY_SIZE(priv->encoders)) {
1337 max_encoders = ARRAY_SIZE(priv->encoders);
1338 SDE_ERROR("capping number of displays to %d", max_encoders);
1339 }
1340
1341 /* dsi */
1342 for (i = 0; i < sde_kms->dsi_display_count &&
1343 priv->num_encoders < max_encoders; ++i) {
1344 display = sde_kms->dsi_displays[i];
1345 encoder = NULL;
1346
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001347 if (!dsi_display_has_ext_bridge(display)) {
1348 memset(&info, 0x0, sizeof(info));
1349 rc = dsi_display_get_info(&info, display);
1350 if (rc) {
1351 SDE_ERROR("dsi get_info %d failed\n", i);
1352 continue;
1353 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001354
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001355 encoder = sde_encoder_init(dev, &info);
1356 if (IS_ERR_OR_NULL(encoder)) {
1357 SDE_ERROR("encoder init failed for dsi %d\n",
1358 i);
1359 continue;
1360 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001361
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001362 rc = dsi_display_drm_bridge_init(display, encoder);
1363 if (rc) {
1364 SDE_ERROR("dsi bridge %d init failed, %d\n",
1365 i, rc);
1366 sde_encoder_destroy(encoder);
1367 continue;
1368 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001369
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001370 connector = sde_connector_init(dev,
1371 encoder,
1372 NULL,
1373 display,
1374 &dsi_ops,
1375 DRM_CONNECTOR_POLL_HPD,
1376 DRM_MODE_CONNECTOR_DSI);
1377 if (connector) {
1378 priv->encoders[priv->num_encoders++] = encoder;
1379 } else {
1380 SDE_ERROR("dsi %d connector init failed\n", i);
1381 dsi_display_drm_bridge_deinit(display);
1382 sde_encoder_destroy(encoder);
1383 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001384 } else {
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001385 memset(&info, 0x0, sizeof(info));
1386 rc = dsi_display_ext_bridge_get_info(&info, display);
1387 if (rc) {
1388 SDE_ERROR("ext get_info %d failed\n", i);
1389 continue;
1390 }
1391
1392 encoder = sde_encoder_init(dev, &info);
1393 if (IS_ERR_OR_NULL(encoder)) {
1394 SDE_ERROR("encoder init failed for ext %d\n",
1395 i);
1396 continue;
1397 }
1398
1399 rc = dsi_display_drm_bridge_init(display, encoder);
1400 if (rc) {
1401 SDE_ERROR("dsi bridge %d init failed for ext\n",
1402 i);
1403 sde_encoder_destroy(encoder);
1404 continue;
1405 }
1406
1407 connector = sde_connector_init(dev,
1408 encoder,
1409 NULL,
1410 display,
1411 &ext_bridge_ops,
1412 DRM_CONNECTOR_POLL_HPD,
1413 DRM_MODE_CONNECTOR_DSI);
1414 if (connector) {
1415 priv->encoders[priv->num_encoders++] = encoder;
1416 } else {
1417 SDE_ERROR("connector init %d failed for ext\n",
1418 i);
1419 dsi_display_drm_bridge_deinit(display);
1420 sde_encoder_destroy(encoder);
1421 continue;
1422 }
1423
1424 rc = dsi_display_drm_ext_bridge_init(display,
1425 encoder, connector);
1426 if (rc) {
1427 struct drm_connector *conn = connector;
1428
1429 SDE_ERROR("ext bridge %d init failed, %d\n",
1430 i, rc);
1431 conn->funcs->destroy(connector);
1432 dsi_display_drm_bridge_deinit(display);
1433 sde_encoder_destroy(encoder);
1434 continue;
1435 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001436 }
1437 }
1438
1439 /* wb */
1440 for (i = 0; i < sde_kms->wb_display_count &&
1441 priv->num_encoders < max_encoders; ++i) {
1442 display = sde_kms->wb_displays[i];
1443 encoder = NULL;
1444
1445 memset(&info, 0x0, sizeof(info));
1446 rc = sde_wb_get_info(&info, display);
1447 if (rc) {
1448 SDE_ERROR("wb get_info %d failed\n", i);
1449 continue;
1450 }
1451
1452 encoder = sde_encoder_init(dev, &info);
1453 if (IS_ERR_OR_NULL(encoder)) {
1454 SDE_ERROR("encoder init failed for wb %d\n", i);
1455 continue;
1456 }
1457
1458 rc = sde_wb_drm_init(display, encoder);
1459 if (rc) {
1460 SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
1461 sde_encoder_destroy(encoder);
1462 continue;
1463 }
1464
1465 connector = sde_connector_init(dev,
1466 encoder,
1467 0,
1468 display,
1469 &wb_ops,
1470 DRM_CONNECTOR_POLL_HPD,
1471 DRM_MODE_CONNECTOR_VIRTUAL);
1472 if (connector) {
1473 priv->encoders[priv->num_encoders++] = encoder;
1474 } else {
1475 SDE_ERROR("wb %d connector init failed\n", i);
1476 sde_wb_drm_deinit(display);
1477 sde_encoder_destroy(encoder);
1478 }
1479 }
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001480 /* dp */
1481 for (i = 0; i < sde_kms->dp_display_count &&
1482 priv->num_encoders < max_encoders; ++i) {
1483 display = sde_kms->dp_displays[i];
1484 encoder = NULL;
1485
1486 memset(&info, 0x0, sizeof(info));
1487 rc = dp_connector_get_info(&info, display);
1488 if (rc) {
1489 SDE_ERROR("dp get_info %d failed\n", i);
1490 continue;
1491 }
1492
1493 encoder = sde_encoder_init(dev, &info);
1494 if (IS_ERR_OR_NULL(encoder)) {
1495 SDE_ERROR("dp encoder init failed %d\n", i);
1496 continue;
1497 }
1498
1499 rc = dp_drm_bridge_init(display, encoder);
1500 if (rc) {
1501 SDE_ERROR("dp bridge %d init failed, %d\n", i, rc);
1502 sde_encoder_destroy(encoder);
1503 continue;
1504 }
1505
1506 connector = sde_connector_init(dev,
1507 encoder,
1508 NULL,
1509 display,
1510 &dp_ops,
1511 DRM_CONNECTOR_POLL_HPD,
1512 DRM_MODE_CONNECTOR_DisplayPort);
1513 if (connector) {
1514 priv->encoders[priv->num_encoders++] = encoder;
1515 } else {
1516 SDE_ERROR("dp %d connector init failed\n", i);
1517 dp_drm_bridge_deinit(display);
1518 sde_encoder_destroy(encoder);
1519 }
1520 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001521
1522 return 0;
1523}
1524
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001525static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
1526{
1527 struct msm_drm_private *priv;
1528 int i;
1529
1530 if (!sde_kms) {
1531 SDE_ERROR("invalid sde_kms\n");
1532 return;
1533 } else if (!sde_kms->dev) {
1534 SDE_ERROR("invalid dev\n");
1535 return;
1536 } else if (!sde_kms->dev->dev_private) {
1537 SDE_ERROR("invalid dev_private\n");
1538 return;
1539 }
1540 priv = sde_kms->dev->dev_private;
1541
1542 for (i = 0; i < priv->num_crtcs; i++)
1543 priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
Clarence Ip17162b52016-11-24 17:06:29 -05001544 priv->num_crtcs = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001545
1546 for (i = 0; i < priv->num_planes; i++)
1547 priv->planes[i]->funcs->destroy(priv->planes[i]);
Clarence Ip17162b52016-11-24 17:06:29 -05001548 priv->num_planes = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001549
1550 for (i = 0; i < priv->num_connectors; i++)
1551 priv->connectors[i]->funcs->destroy(priv->connectors[i]);
Clarence Ip17162b52016-11-24 17:06:29 -05001552 priv->num_connectors = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001553
1554 for (i = 0; i < priv->num_encoders; i++)
1555 priv->encoders[i]->funcs->destroy(priv->encoders[i]);
Clarence Ip17162b52016-11-24 17:06:29 -05001556 priv->num_encoders = 0;
1557
1558 _sde_kms_release_displays(sde_kms);
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001559}
1560
1561static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001562{
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001563 struct drm_device *dev;
Dhaval Patel44f12472016-08-29 12:19:47 -07001564 struct drm_plane *primary_planes[MAX_PLANES], *plane;
1565 struct drm_crtc *crtc;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001566
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001567 struct msm_drm_private *priv;
1568 struct sde_mdss_cfg *catalog;
Dhaval Patel44f12472016-08-29 12:19:47 -07001569
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001570 int primary_planes_idx = 0, i, ret;
1571 int max_crtc_count;
1572
1573 u32 sspp_id[MAX_PLANES];
1574 u32 master_plane_id[MAX_PLANES];
1575 u32 num_virt_planes = 0;
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001576
Clarence Ipdd395242016-09-09 10:47:17 -04001577 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001578 SDE_ERROR("invalid sde_kms\n");
1579 return -EINVAL;
1580 }
1581
1582 dev = sde_kms->dev;
1583 priv = dev->dev_private;
1584 catalog = sde_kms->catalog;
1585
Abhinav Kumar2316fb92017-01-30 23:07:08 -08001586 ret = sde_core_irq_domain_add(sde_kms);
1587 if (ret)
1588 goto fail_irq;
Clarence Ip3649f8b2016-10-31 09:59:44 -04001589 /*
1590 * Query for underlying display drivers, and create connectors,
1591 * bridges and encoders for them.
1592 */
1593 if (!_sde_kms_get_displays(sde_kms))
1594 (void)_sde_kms_setup_displays(dev, priv, sde_kms);
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001595
1596 max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001597
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001598 /* Create the planes */
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001599 for (i = 0; i < catalog->sspp_count; i++) {
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001600 bool primary = true;
1601
1602 if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001603 || primary_planes_idx >= max_crtc_count)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001604 primary = false;
1605
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001606 plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001607 (1UL << max_crtc_count) - 1, 0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001608 if (IS_ERR(plane)) {
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001609 SDE_ERROR("sde_plane_init failed\n");
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001610 ret = PTR_ERR(plane);
1611 goto fail;
1612 }
1613 priv->planes[priv->num_planes++] = plane;
1614
1615 if (primary)
1616 primary_planes[primary_planes_idx++] = plane;
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001617
1618 if (sde_hw_sspp_multirect_enabled(&catalog->sspp[i]) &&
1619 sde_is_custom_client()) {
1620 int priority =
1621 catalog->sspp[i].sblk->smart_dma_priority;
1622 sspp_id[priority - 1] = catalog->sspp[i].id;
1623 master_plane_id[priority - 1] = plane->base.id;
1624 num_virt_planes++;
1625 }
1626 }
1627
1628 /* Initialize smart DMA virtual planes */
1629 for (i = 0; i < num_virt_planes; i++) {
1630 plane = sde_plane_init(dev, sspp_id[i], false,
1631 (1UL << max_crtc_count) - 1, master_plane_id[i]);
1632 if (IS_ERR(plane)) {
1633 SDE_ERROR("sde_plane for virtual SSPP init failed\n");
1634 ret = PTR_ERR(plane);
1635 goto fail;
1636 }
1637 priv->planes[priv->num_planes++] = plane;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001638 }
1639
Dhaval Patel44f12472016-08-29 12:19:47 -07001640 max_crtc_count = min(max_crtc_count, primary_planes_idx);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001641
Dhaval Patel44f12472016-08-29 12:19:47 -07001642 /* Create one CRTC per encoder */
1643 for (i = 0; i < max_crtc_count; i++) {
Lloyd Atkinsonac933642016-09-14 11:52:00 -04001644 crtc = sde_crtc_init(dev, primary_planes[i]);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001645 if (IS_ERR(crtc)) {
1646 ret = PTR_ERR(crtc);
1647 goto fail;
1648 }
1649 priv->crtcs[priv->num_crtcs++] = crtc;
1650 }
1651
Clarence Ipdd395242016-09-09 10:47:17 -04001652 if (sde_is_custom_client()) {
1653 /* All CRTCs are compatible with all planes */
1654 for (i = 0; i < priv->num_planes; i++)
1655 priv->planes[i]->possible_crtcs =
1656 (1 << priv->num_crtcs) - 1;
1657 }
1658
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001659 /* All CRTCs are compatible with all encoders */
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001660 for (i = 0; i < priv->num_encoders; i++)
1661 priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
1662
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001663 return 0;
1664fail:
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001665 _sde_kms_drm_obj_destroy(sde_kms);
Abhinav Kumar2316fb92017-01-30 23:07:08 -08001666fail_irq:
1667 sde_core_irq_domain_fini(sde_kms);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001668 return ret;
1669}
1670
Alan Kwong4dd64c82017-02-04 18:41:51 -08001671/**
Dhaval Patel2a3c37a2017-10-25 12:30:36 -07001672 * sde_kms_timeline_status - provides current timeline status
1673 * This API should be called without mode config lock.
1674 * @dev: Pointer to drm device
1675 */
1676void sde_kms_timeline_status(struct drm_device *dev)
1677{
1678 struct drm_crtc *crtc;
1679 struct drm_connector *conn;
1680
1681 if (!dev) {
1682 SDE_ERROR("invalid drm device node\n");
1683 return;
1684 }
1685
1686 drm_for_each_crtc(crtc, dev)
1687 sde_crtc_timeline_status(crtc);
1688
1689 mutex_lock(&dev->mode_config.mutex);
1690 drm_for_each_connector(conn, dev)
1691 sde_conn_timeline_status(conn);
1692 mutex_unlock(&dev->mode_config.mutex);
1693}
1694
1695/**
Alan Kwong4dd64c82017-02-04 18:41:51 -08001696 * struct sde_kms_fbo_fb - framebuffer creation list
1697 * @list: list of framebuffer attached to framebuffer object
1698 * @fb: Pointer to framebuffer attached to framebuffer object
1699 */
1700struct sde_kms_fbo_fb {
1701 struct list_head list;
1702 struct drm_framebuffer *fb;
1703};
1704
1705struct drm_framebuffer *sde_kms_fbo_create_fb(struct drm_device *dev,
1706 struct sde_kms_fbo *fbo)
1707{
1708 struct drm_framebuffer *fb = NULL;
1709 struct sde_kms_fbo_fb *fbo_fb;
1710 struct drm_mode_fb_cmd2 mode_cmd = {0};
1711 u32 base_offset = 0;
1712 int i, ret;
1713
1714 if (!dev) {
1715 SDE_ERROR("invalid drm device node\n");
1716 return NULL;
1717 }
1718
1719 fbo_fb = kzalloc(sizeof(struct sde_kms_fbo_fb), GFP_KERNEL);
1720 if (!fbo_fb)
1721 return NULL;
1722
1723 mode_cmd.pixel_format = fbo->pixel_format;
1724 mode_cmd.width = fbo->width;
1725 mode_cmd.height = fbo->height;
1726 mode_cmd.flags = fbo->flags;
1727
1728 for (i = 0; i < fbo->nplane; i++) {
1729 mode_cmd.offsets[i] = base_offset;
1730 mode_cmd.pitches[i] = fbo->layout.plane_pitch[i];
1731 mode_cmd.modifier[i] = fbo->modifier[i];
1732 base_offset += fbo->layout.plane_size[i];
1733 SDE_DEBUG("offset[%d]:%x\n", i, mode_cmd.offsets[i]);
1734 }
1735
1736 fb = msm_framebuffer_init(dev, &mode_cmd, fbo->bo);
1737 if (IS_ERR(fb)) {
1738 ret = PTR_ERR(fb);
1739 fb = NULL;
1740 SDE_ERROR("failed to allocate fb %d\n", ret);
1741 goto fail;
1742 }
1743
1744 /* need to take one reference for gem object */
1745 for (i = 0; i < fbo->nplane; i++)
1746 drm_gem_object_reference(fbo->bo[i]);
1747
1748 SDE_DEBUG("register private fb:%d\n", fb->base.id);
1749
1750 INIT_LIST_HEAD(&fbo_fb->list);
1751 fbo_fb->fb = fb;
1752 drm_framebuffer_reference(fbo_fb->fb);
1753 list_add_tail(&fbo_fb->list, &fbo->fb_list);
1754
1755 return fb;
1756
1757fail:
1758 kfree(fbo_fb);
1759 return NULL;
1760}
1761
1762static void sde_kms_fbo_destroy(struct sde_kms_fbo *fbo)
1763{
1764 struct msm_drm_private *priv;
1765 struct sde_kms *sde_kms;
1766 struct drm_device *dev;
1767 struct sde_kms_fbo_fb *curr, *next;
1768 int i;
1769
1770 if (!fbo) {
1771 SDE_ERROR("invalid drm device node\n");
1772 return;
1773 }
1774 dev = fbo->dev;
1775
1776 if (!dev || !dev->dev_private) {
1777 SDE_ERROR("invalid drm device node\n");
1778 return;
1779 }
1780 priv = dev->dev_private;
1781
1782 if (!priv->kms) {
1783 SDE_ERROR("invalid kms handle\n");
1784 return;
1785 }
1786 sde_kms = to_sde_kms(priv->kms);
1787
1788 SDE_DEBUG("%dx%d@%c%c%c%c/%llx/%x\n", fbo->width, fbo->height,
1789 fbo->pixel_format >> 0, fbo->pixel_format >> 8,
1790 fbo->pixel_format >> 16, fbo->pixel_format >> 24,
1791 fbo->modifier[0], fbo->flags);
1792
1793 list_for_each_entry_safe(curr, next, &fbo->fb_list, list) {
1794 SDE_DEBUG("unregister private fb:%d\n", curr->fb->base.id);
1795 drm_framebuffer_unregister_private(curr->fb);
1796 drm_framebuffer_unreference(curr->fb);
1797 list_del(&curr->list);
1798 kfree(curr);
1799 }
1800
1801 for (i = 0; i < fbo->layout.num_planes; i++) {
1802 if (fbo->bo[i]) {
1803 mutex_lock(&dev->struct_mutex);
1804 drm_gem_object_unreference(fbo->bo[i]);
1805 mutex_unlock(&dev->struct_mutex);
1806 fbo->bo[i] = NULL;
1807 }
1808 }
1809
1810 if (fbo->dma_buf) {
1811 dma_buf_put(fbo->dma_buf);
1812 fbo->dma_buf = NULL;
1813 }
1814
Alan Kwong54125bb2017-02-26 16:01:36 -08001815 if (sde_kms->iclient && fbo->ihandle) {
1816 ion_free(sde_kms->iclient, fbo->ihandle);
1817 fbo->ihandle = NULL;
Alan Kwong4dd64c82017-02-04 18:41:51 -08001818 }
1819}
1820
Clarence Ipd02440b2017-05-21 18:10:01 -04001821static void sde_kms_set_gem_flags(struct msm_gem_object *msm_obj,
1822 uint32_t flags)
1823{
1824 if (msm_obj)
1825 msm_obj->flags |= flags;
1826}
1827
Alan Kwong4dd64c82017-02-04 18:41:51 -08001828struct sde_kms_fbo *sde_kms_fbo_alloc(struct drm_device *dev, u32 width,
1829 u32 height, u32 pixel_format, u64 modifier[4], u32 flags)
1830{
1831 struct msm_drm_private *priv;
1832 struct sde_kms *sde_kms;
1833 struct sde_kms_fbo *fbo;
1834 int i, ret;
1835
1836 if (!dev || !dev->dev_private) {
1837 SDE_ERROR("invalid drm device node\n");
1838 return NULL;
1839 }
1840 priv = dev->dev_private;
1841
1842 if (!priv->kms) {
1843 SDE_ERROR("invalid kms handle\n");
1844 return NULL;
1845 }
1846 sde_kms = to_sde_kms(priv->kms);
1847
1848 SDE_DEBUG("%dx%d@%c%c%c%c/%llx/%x\n", width, height,
1849 pixel_format >> 0, pixel_format >> 8,
1850 pixel_format >> 16, pixel_format >> 24,
1851 modifier[0], flags);
1852
1853 fbo = kzalloc(sizeof(struct sde_kms_fbo), GFP_KERNEL);
1854 if (!fbo)
1855 return NULL;
1856
1857 atomic_set(&fbo->refcount, 0);
1858 INIT_LIST_HEAD(&fbo->fb_list);
1859 fbo->dev = dev;
1860 fbo->width = width;
1861 fbo->height = height;
1862 fbo->pixel_format = pixel_format;
1863 fbo->flags = flags;
1864 for (i = 0; i < ARRAY_SIZE(fbo->modifier); i++)
1865 fbo->modifier[i] = modifier[i];
1866 fbo->nplane = drm_format_num_planes(fbo->pixel_format);
1867 fbo->fmt = sde_get_sde_format_ext(fbo->pixel_format, fbo->modifier,
1868 fbo->nplane);
1869 if (!fbo->fmt) {
1870 ret = -EINVAL;
1871 SDE_ERROR("failed to find pixel format\n");
1872 goto done;
1873 }
1874
1875 ret = sde_format_get_plane_sizes(fbo->fmt, fbo->width, fbo->height,
Narendra Muppalla58a64e22017-07-24 10:54:47 -07001876 &fbo->layout, fbo->layout.plane_pitch);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001877 if (ret) {
1878 SDE_ERROR("failed to get plane sizes\n");
1879 goto done;
1880 }
1881
1882 /* allocate backing buffer object */
Alan Kwong54125bb2017-02-26 16:01:36 -08001883 if (sde_kms->iclient) {
1884 u32 heap_id = fbo->flags & DRM_MODE_FB_SECURE ?
Alan Kwong3f2a5152017-08-25 16:19:43 -04001885 ION_HEAP(ION_SECURE_HEAP_ID) :
Alan Kwong54125bb2017-02-26 16:01:36 -08001886 ION_HEAP(ION_SYSTEM_HEAP_ID);
Alan Kwong3f2a5152017-08-25 16:19:43 -04001887 u32 iflags = fbo->flags & DRM_MODE_FB_SECURE ?
1888 (ION_FLAG_SECURE | ION_FLAG_CP_PIXEL) : 0;
Alan Kwong54125bb2017-02-26 16:01:36 -08001889
1890 fbo->ihandle = ion_alloc(sde_kms->iclient,
Alan Kwong3f2a5152017-08-25 16:19:43 -04001891 fbo->layout.total_size, SZ_4K, heap_id, iflags);
Alan Kwong54125bb2017-02-26 16:01:36 -08001892 if (IS_ERR_OR_NULL(fbo->ihandle)) {
1893 SDE_ERROR("failed to alloc ion memory\n");
1894 ret = PTR_ERR(fbo->ihandle);
1895 fbo->ihandle = NULL;
1896 goto done;
1897 }
1898
1899 fbo->dma_buf = ion_share_dma_buf(sde_kms->iclient,
1900 fbo->ihandle);
1901 if (IS_ERR(fbo->dma_buf)) {
1902 SDE_ERROR("failed to share ion memory\n");
1903 ret = -ENOMEM;
1904 fbo->dma_buf = NULL;
1905 goto done;
1906 }
1907
1908 fbo->bo[0] = dev->driver->gem_prime_import(dev,
1909 fbo->dma_buf);
1910 if (IS_ERR(fbo->bo[0])) {
1911 SDE_ERROR("failed to import ion memory\n");
1912 ret = PTR_ERR(fbo->bo[0]);
1913 fbo->bo[0] = NULL;
1914 goto done;
1915 }
Clarence Ipd02440b2017-05-21 18:10:01 -04001916
1917 /* insert extra bo flags */
1918 sde_kms_set_gem_flags(to_msm_bo(fbo->bo[0]), MSM_BO_KEEPATTRS);
Alan Kwong54125bb2017-02-26 16:01:36 -08001919 } else {
1920 mutex_lock(&dev->struct_mutex);
1921 fbo->bo[0] = msm_gem_new(dev, fbo->layout.total_size,
Clarence Ipd02440b2017-05-21 18:10:01 -04001922 MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_KEEPATTRS);
Alan Kwong54125bb2017-02-26 16:01:36 -08001923 if (IS_ERR(fbo->bo[0])) {
1924 mutex_unlock(&dev->struct_mutex);
1925 SDE_ERROR("failed to new gem buffer\n");
1926 ret = PTR_ERR(fbo->bo[0]);
1927 fbo->bo[0] = NULL;
1928 goto done;
1929 }
Alan Kwong4dd64c82017-02-04 18:41:51 -08001930 mutex_unlock(&dev->struct_mutex);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001931 }
1932
Alan Kwong54125bb2017-02-26 16:01:36 -08001933 mutex_lock(&dev->struct_mutex);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001934 for (i = 1; i < fbo->layout.num_planes; i++) {
1935 fbo->bo[i] = fbo->bo[0];
1936 drm_gem_object_reference(fbo->bo[i]);
1937 }
1938 mutex_unlock(&dev->struct_mutex);
1939
1940done:
1941 if (ret) {
1942 sde_kms_fbo_destroy(fbo);
1943 kfree(fbo);
1944 fbo = NULL;
1945 } else {
1946 sde_kms_fbo_reference(fbo);
1947 }
1948
1949 return fbo;
1950}
1951
1952int sde_kms_fbo_reference(struct sde_kms_fbo *fbo)
1953{
1954 if (!fbo) {
1955 SDE_ERROR("invalid parameters\n");
1956 return -EINVAL;
1957 }
1958
1959 SDE_DEBUG("%pS refcount:%d\n", __builtin_return_address(0),
1960 atomic_read(&fbo->refcount));
1961
1962 atomic_inc(&fbo->refcount);
1963
1964 return 0;
1965}
1966
1967void sde_kms_fbo_unreference(struct sde_kms_fbo *fbo)
1968{
1969 if (!fbo) {
1970 SDE_ERROR("invalid parameters\n");
1971 return;
1972 }
1973
1974 SDE_DEBUG("%pS refcount:%d\n", __builtin_return_address(0),
1975 atomic_read(&fbo->refcount));
1976
1977 if (!atomic_read(&fbo->refcount)) {
1978 SDE_ERROR("invalid refcount\n");
1979 return;
1980 } else if (atomic_dec_return(&fbo->refcount) == 0) {
1981 sde_kms_fbo_destroy(fbo);
1982 }
1983}
1984
Alan Kwong5a3ac752016-10-16 01:02:35 -04001985static int sde_kms_postinit(struct msm_kms *kms)
1986{
1987 struct sde_kms *sde_kms = to_sde_kms(kms);
1988 struct drm_device *dev;
Dhaval Patel91399a52017-11-27 22:21:27 -08001989 struct drm_crtc *crtc;
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07001990 int rc;
Alan Kwong5a3ac752016-10-16 01:02:35 -04001991
1992 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
1993 SDE_ERROR("invalid sde_kms\n");
1994 return -EINVAL;
1995 }
1996
1997 dev = sde_kms->dev;
1998
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07001999 rc = _sde_debugfs_init(sde_kms);
2000 if (rc)
2001 SDE_ERROR("sde_debugfs init failed: %d\n", rc);
2002
Dhaval Patel91399a52017-11-27 22:21:27 -08002003 drm_for_each_crtc(crtc, dev)
2004 sde_crtc_post_init(dev, crtc);
2005
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07002006 return rc;
Alan Kwong5a3ac752016-10-16 01:02:35 -04002007}
2008
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002009static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002010 struct drm_encoder *encoder)
2011{
2012 return rate;
2013}
2014
Clarence Ip17162b52016-11-24 17:06:29 -05002015static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
2016 struct platform_device *pdev)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002017{
Clarence Ip17162b52016-11-24 17:06:29 -05002018 struct drm_device *dev;
2019 struct msm_drm_private *priv;
Alan Kwong5d324e42016-07-28 22:56:18 -04002020 int i;
2021
Clarence Ip17162b52016-11-24 17:06:29 -05002022 if (!sde_kms || !pdev)
2023 return;
2024
2025 dev = sde_kms->dev;
2026 if (!dev)
2027 return;
2028
2029 priv = dev->dev_private;
2030 if (!priv)
2031 return;
2032
Alan Kwong23afc2d92017-09-15 10:59:06 -04002033 if (sde_kms->genpd_init) {
2034 sde_kms->genpd_init = false;
2035 pm_genpd_remove(&sde_kms->genpd);
2036 of_genpd_del_provider(pdev->dev.of_node);
2037 }
2038
Clarence Ip17162b52016-11-24 17:06:29 -05002039 if (sde_kms->hw_intr)
2040 sde_hw_intr_destroy(sde_kms->hw_intr);
2041 sde_kms->hw_intr = NULL;
2042
Clarence Ip7f0de632017-05-31 14:59:14 -04002043 if (sde_kms->power_event)
2044 sde_power_handle_unregister_event(
2045 &priv->phandle, sde_kms->power_event);
2046
Clarence Ip17162b52016-11-24 17:06:29 -05002047 _sde_kms_release_displays(sde_kms);
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08002048 (void)_sde_kms_release_splash_buffer(
2049 sde_kms->splash_data.splash_base,
2050 sde_kms->splash_data.splash_size);
Clarence Ip17162b52016-11-24 17:06:29 -05002051
2052 /* safe to call these more than once during shutdown */
2053 _sde_debugfs_destroy(sde_kms);
2054 _sde_kms_mmu_destroy(sde_kms);
2055
Alan Kwong54125bb2017-02-26 16:01:36 -08002056 if (sde_kms->iclient) {
2057 ion_client_destroy(sde_kms->iclient);
2058 sde_kms->iclient = NULL;
2059 }
2060
Lloyd Atkinson79f08802017-01-09 17:37:18 -05002061 if (sde_kms->catalog) {
2062 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
2063 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
Alan Kwong5d324e42016-07-28 22:56:18 -04002064
Lloyd Atkinson79f08802017-01-09 17:37:18 -05002065 if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
2066 sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
2067 }
Alan Kwong5d324e42016-07-28 22:56:18 -04002068 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002069
Clarence Ip17162b52016-11-24 17:06:29 -05002070 if (sde_kms->rm_init)
2071 sde_rm_destroy(&sde_kms->rm);
2072 sde_kms->rm_init = false;
2073
2074 if (sde_kms->catalog)
2075 sde_hw_catalog_deinit(sde_kms->catalog);
2076 sde_kms->catalog = NULL;
2077
2078 if (sde_kms->core_client)
2079 sde_power_client_destroy(&priv->phandle, sde_kms->core_client);
2080 sde_kms->core_client = NULL;
2081
2082 if (sde_kms->vbif[VBIF_NRT])
2083 msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
2084 sde_kms->vbif[VBIF_NRT] = NULL;
2085
2086 if (sde_kms->vbif[VBIF_RT])
2087 msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
2088 sde_kms->vbif[VBIF_RT] = NULL;
2089
2090 if (sde_kms->mmio)
2091 msm_iounmap(pdev, sde_kms->mmio);
2092 sde_kms->mmio = NULL;
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -07002093
2094 sde_reg_dma_deinit();
Clarence Ip17162b52016-11-24 17:06:29 -05002095}
2096
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07002097int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only)
2098{
2099 int i;
2100
2101 if (!sde_kms)
2102 return -EINVAL;
2103
2104 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
2105 struct msm_mmu *mmu;
2106 struct msm_gem_address_space *aspace = sde_kms->aspace[i];
2107
2108 if (!aspace)
2109 continue;
2110
2111 mmu = sde_kms->aspace[i]->mmu;
2112
2113 if (secure_only &&
2114 !aspace->mmu->funcs->is_domain_secure(mmu))
2115 continue;
2116
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07002117 /* cleanup aspace before detaching */
2118 msm_gem_aspace_domain_attach_detach_update(aspace, true);
2119
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07002120 SDE_DEBUG("Detaching domain:%d\n", i);
2121 aspace->mmu->funcs->detach(mmu, (const char **)iommu_ports,
2122 ARRAY_SIZE(iommu_ports));
2123
2124 aspace->domain_attached = false;
2125 }
2126
2127 return 0;
2128}
2129
2130int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only)
2131{
2132 int i;
2133
2134 if (!sde_kms)
2135 return -EINVAL;
2136
2137 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
2138 struct msm_mmu *mmu;
2139 struct msm_gem_address_space *aspace = sde_kms->aspace[i];
2140
2141 if (!aspace)
2142 continue;
2143
2144 mmu = sde_kms->aspace[i]->mmu;
2145
2146 if (secure_only &&
2147 !aspace->mmu->funcs->is_domain_secure(mmu))
2148 continue;
2149
2150 SDE_DEBUG("Attaching domain:%d\n", i);
2151 aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
2152 ARRAY_SIZE(iommu_ports));
2153
2154 aspace->domain_attached = true;
Veera Sundaram Sankaranb024ae42018-05-24 10:05:54 -07002155 msm_gem_aspace_domain_attach_detach_update(aspace, false);
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07002156 }
2157
2158 return 0;
2159}
2160
Clarence Ip17162b52016-11-24 17:06:29 -05002161static void sde_kms_destroy(struct msm_kms *kms)
2162{
2163 struct sde_kms *sde_kms;
2164 struct drm_device *dev;
2165
2166 if (!kms) {
2167 SDE_ERROR("invalid kms\n");
2168 return;
2169 }
2170
2171 sde_kms = to_sde_kms(kms);
2172 dev = sde_kms->dev;
2173 if (!dev) {
2174 SDE_ERROR("invalid device\n");
2175 return;
2176 }
2177
2178 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002179 kfree(sde_kms);
2180}
2181
Veera Sundaram Sankarane2bf6862017-08-01 13:55:12 -07002182static void _sde_kms_plane_force_remove(struct drm_plane *plane,
2183 struct drm_atomic_state *state)
2184{
2185 struct drm_plane_state *plane_state;
2186 int ret = 0;
2187
2188 if (!plane->crtc)
2189 return;
2190
2191 plane_state = drm_atomic_get_plane_state(state, plane);
2192 if (IS_ERR(plane_state)) {
2193 ret = PTR_ERR(plane_state);
2194 SDE_ERROR("error %d getting plane %d state\n",
2195 ret, plane->base.id);
2196 return;
2197 }
2198
2199 plane->old_fb = plane->fb;
2200
2201 SDE_DEBUG("disabling plane %d\n", plane->base.id);
2202
2203 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
2204 if (ret != 0)
2205 SDE_ERROR("error %d disabling plane %d\n", ret,
2206 plane->base.id);
2207}
2208
2209static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file,
2210 struct drm_atomic_state *state)
2211{
2212 struct drm_device *dev = sde_kms->dev;
2213 struct drm_framebuffer *fb, *tfb;
2214 struct list_head fbs;
2215 struct drm_plane *plane;
2216 int ret = 0;
2217 u32 plane_mask = 0;
2218
2219 INIT_LIST_HEAD(&fbs);
2220
2221 list_for_each_entry_safe(fb, tfb, &file->fbs, filp_head) {
2222 if (drm_framebuffer_read_refcount(fb) > 1) {
2223 list_move_tail(&fb->filp_head, &fbs);
2224
2225 drm_for_each_plane(plane, dev) {
2226 if (plane->fb == fb) {
2227 plane_mask |=
2228 1 << drm_plane_index(plane);
2229 _sde_kms_plane_force_remove(
2230 plane, state);
2231 }
2232 }
2233 } else {
2234 list_del_init(&fb->filp_head);
2235 drm_framebuffer_unreference(fb);
2236 }
2237 }
2238
2239 if (list_empty(&fbs)) {
2240 SDE_DEBUG("skip commit as no fb(s)\n");
2241 drm_atomic_state_free(state);
2242 return 0;
2243 }
2244
2245 SDE_DEBUG("committing after removing all the pipes\n");
2246 ret = drm_atomic_commit(state);
2247
2248 if (ret) {
2249 /*
2250 * move the fbs back to original list, so it would be
2251 * handled during drm_release
2252 */
2253 list_for_each_entry_safe(fb, tfb, &fbs, filp_head)
2254 list_move_tail(&fb->filp_head, &file->fbs);
2255
2256 SDE_ERROR("atomic commit failed in preclose, ret:%d\n", ret);
2257 goto end;
2258 }
2259
2260 while (!list_empty(&fbs)) {
2261 fb = list_first_entry(&fbs, typeof(*fb), filp_head);
2262
2263 list_del_init(&fb->filp_head);
2264 drm_framebuffer_unreference(fb);
2265 }
2266
2267end:
2268 drm_atomic_clean_old_fb(dev, plane_mask, ret);
2269
2270 return ret;
2271}
2272
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002273static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
2274{
2275 struct sde_kms *sde_kms = to_sde_kms(kms);
2276 struct drm_device *dev = sde_kms->dev;
2277 struct msm_drm_private *priv = dev->dev_private;
2278 unsigned int i;
Veera Sundaram Sankarane2bf6862017-08-01 13:55:12 -07002279 struct drm_atomic_state *state = NULL;
2280 int ret = 0;
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002281
2282 for (i = 0; i < priv->num_crtcs; i++)
2283 sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
Veera Sundaram Sankarane2bf6862017-08-01 13:55:12 -07002284
2285 drm_modeset_lock_all(dev);
2286 state = drm_atomic_state_alloc(dev);
2287 if (!state) {
2288 ret = -ENOMEM;
2289 goto end;
2290 }
2291
2292 state->acquire_ctx = dev->mode_config.acquire_ctx;
2293
2294 for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
2295 ret = _sde_kms_remove_fbs(sde_kms, file, state);
2296 if (ret != -EDEADLK)
2297 break;
2298 drm_atomic_state_clear(state);
2299 drm_atomic_legacy_backoff(state);
2300 }
2301
2302end:
2303 if ((ret != 0) && state)
2304 drm_atomic_state_free(state);
2305
2306 SDE_DEBUG("sde preclose done, ret:%d\n", ret);
2307 drm_modeset_unlock_all(dev);
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002308}
2309
Lloyd Atkinsone08229c2017-10-02 17:53:30 -04002310static int _sde_kms_helper_reset_custom_properties(struct sde_kms *sde_kms,
2311 struct drm_atomic_state *state)
2312{
2313 struct drm_device *dev = sde_kms->dev;
2314 struct drm_plane *plane;
2315 struct drm_plane_state *plane_state;
2316 struct drm_crtc *crtc;
2317 struct drm_crtc_state *crtc_state;
2318 struct drm_connector *conn;
2319 struct drm_connector_state *conn_state;
2320 int ret = 0;
2321
2322 drm_for_each_plane(plane, dev) {
2323 plane_state = drm_atomic_get_plane_state(state, plane);
2324 if (IS_ERR(plane_state)) {
2325 ret = PTR_ERR(plane_state);
2326 SDE_ERROR("error %d getting plane %d state\n",
2327 ret, DRMID(plane));
2328 return ret;
2329 }
2330
2331 ret = sde_plane_helper_reset_custom_properties(plane,
2332 plane_state);
2333 if (ret) {
2334 SDE_ERROR("error %d resetting plane props %d\n",
2335 ret, DRMID(plane));
2336 return ret;
2337 }
2338 }
2339 drm_for_each_crtc(crtc, dev) {
2340 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2341 if (IS_ERR(crtc_state)) {
2342 ret = PTR_ERR(crtc_state);
2343 SDE_ERROR("error %d getting crtc %d state\n",
2344 ret, DRMID(crtc));
2345 return ret;
2346 }
2347
2348 ret = sde_crtc_helper_reset_custom_properties(crtc, crtc_state);
2349 if (ret) {
2350 SDE_ERROR("error %d resetting crtc props %d\n",
2351 ret, DRMID(crtc));
2352 return ret;
2353 }
2354 }
2355
2356 drm_for_each_connector(conn, dev) {
2357 conn_state = drm_atomic_get_connector_state(state, conn);
2358 if (IS_ERR(conn_state)) {
2359 ret = PTR_ERR(conn_state);
2360 SDE_ERROR("error %d getting connector %d state\n",
2361 ret, DRMID(conn));
2362 return ret;
2363 }
2364
2365 ret = sde_connector_helper_reset_custom_properties(conn,
2366 conn_state);
2367 if (ret) {
2368 SDE_ERROR("error %d resetting connector props %d\n",
2369 ret, DRMID(conn));
2370 return ret;
2371 }
2372 }
2373
2374 return ret;
2375}
2376
2377static void sde_kms_lastclose(struct msm_kms *kms)
2378{
2379 struct sde_kms *sde_kms;
2380 struct drm_device *dev;
2381 struct drm_atomic_state *state;
2382 int ret, i;
2383
2384 if (!kms) {
2385 SDE_ERROR("invalid argument\n");
2386 return;
2387 }
2388
2389 sde_kms = to_sde_kms(kms);
2390 dev = sde_kms->dev;
2391
2392 state = drm_atomic_state_alloc(dev);
2393 if (!state)
2394 return;
2395
2396 state->acquire_ctx = dev->mode_config.acquire_ctx;
2397
2398 for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
2399 /* add reset of custom properties to the state */
2400 ret = _sde_kms_helper_reset_custom_properties(sde_kms, state);
2401 if (ret)
2402 break;
2403
2404 ret = drm_atomic_commit(state);
2405 if (ret != -EDEADLK)
2406 break;
2407
2408 drm_atomic_state_clear(state);
2409 drm_atomic_legacy_backoff(state);
2410 SDE_DEBUG("deadlock backoff on attempt %d\n", i);
2411 }
2412
2413 if (ret) {
2414 /**
2415 * on success, atomic state object ownership transfers to
2416 * framework, otherwise, free it here
2417 */
2418 drm_atomic_state_free(state);
2419 SDE_ERROR("failed to run last close: %d\n", ret);
2420 }
2421}
2422
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002423static int sde_kms_check_secure_transition(struct msm_kms *kms,
2424 struct drm_atomic_state *state)
2425{
2426 struct sde_kms *sde_kms;
2427 struct drm_device *dev;
2428 struct drm_crtc *crtc;
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002429 struct drm_crtc *cur_crtc = NULL, *global_crtc = NULL;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002430 struct drm_crtc_state *crtc_state;
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002431 int active_crtc_cnt = 0, global_active_crtc_cnt = 0;
2432 bool sec_session = false, global_sec_session = false;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002433 int i;
2434
2435 if (!kms || !state) {
2436 return -EINVAL;
2437 SDE_ERROR("invalid arguments\n");
2438 }
2439
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002440 sde_kms = to_sde_kms(kms);
2441 dev = sde_kms->dev;
2442
2443 /* iterate state object for active secure/non-secure crtc */
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002444 for_each_crtc_in_state(state, crtc, crtc_state, i) {
2445 if (!crtc_state->active)
2446 continue;
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002447
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002448 active_crtc_cnt++;
2449 if (sde_crtc_get_secure_level(crtc, crtc_state) ==
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002450 SDE_DRM_SEC_ONLY)
2451 sec_session = true;
2452
2453 cur_crtc = crtc;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002454 }
2455
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002456 /* iterate global list for active and secure crtc */
2457 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002458 if (!crtc->state->active)
2459 continue;
2460
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002461 global_active_crtc_cnt++;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002462 if (sde_crtc_get_secure_level(crtc, crtc->state) ==
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002463 SDE_DRM_SEC_ONLY)
2464 global_sec_session = true;
2465
2466 global_crtc = crtc;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002467 }
2468
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002469 /*
2470 * - fail secure crtc commit, if any other crtc session is already
2471 * in progress
2472 * - fail non-secure crtc commit, if any secure crtc session is already
2473 * in progress
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002474 */
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002475 if (global_sec_session || sec_session) {
2476 if ((global_active_crtc_cnt >
2477 MAX_ALLOWED_CRTC_CNT_DURING_SECURE) ||
2478 (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE)) {
2479 SDE_ERROR(
2480 "Secure check failed global_active:%d active:%d\n",
2481 global_active_crtc_cnt, active_crtc_cnt);
2482 return -EPERM;
2483
2484 /*
2485 * As only one crtc is allowed during secure session, the crtc
2486 * in this commit should match with the global crtc, if it
2487 * exists
2488 */
2489 } else if (global_crtc && (global_crtc != cur_crtc)) {
2490 SDE_ERROR(
2491 "crtc%d-sec%d not allowed during crtc%d-sec%d\n",
Veera Sundaram Sankaran4db71f22017-11-16 14:33:10 -08002492 cur_crtc ? cur_crtc->base.id : -1, sec_session,
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002493 global_crtc->base.id, global_sec_session);
2494 return -EPERM;
2495 }
2496
2497 }
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002498
2499 return 0;
2500}
2501
2502static int sde_kms_atomic_check(struct msm_kms *kms,
2503 struct drm_atomic_state *state)
2504{
2505 struct sde_kms *sde_kms;
2506 struct drm_device *dev;
2507 int ret;
2508
2509 if (!kms || !state)
2510 return -EINVAL;
2511
2512 sde_kms = to_sde_kms(kms);
2513 dev = sde_kms->dev;
2514
Clarence Ipd86f6e42017-08-08 18:31:00 -04002515 if (sde_kms_is_suspend_blocked(dev)) {
2516 SDE_DEBUG("suspended, skip atomic_check\n");
2517 return -EBUSY;
2518 }
2519
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002520 ret = drm_atomic_helper_check(dev, state);
2521 if (ret)
2522 return ret;
2523 /*
2524 * Check if any secure transition(moving CRTC between secure and
2525 * non-secure state and vice-versa) is allowed or not. when moving
2526 * to secure state, planes with fb_mode set to dir_translated only can
2527 * be staged on the CRTC, and only one CRTC can be active during
2528 * Secure state
2529 */
2530 return sde_kms_check_secure_transition(kms, state);
2531}
2532
Jordan Croused8e96522017-02-13 10:14:16 -07002533static struct msm_gem_address_space*
2534_sde_kms_get_address_space(struct msm_kms *kms,
2535 unsigned int domain)
2536{
2537 struct sde_kms *sde_kms;
2538
2539 if (!kms) {
2540 SDE_ERROR("invalid kms\n");
2541 return NULL;
2542 }
2543
2544 sde_kms = to_sde_kms(kms);
2545 if (!sde_kms) {
2546 SDE_ERROR("invalid sde_kms\n");
2547 return NULL;
2548 }
2549
2550 if (domain >= MSM_SMMU_DOMAIN_MAX)
2551 return NULL;
2552
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07002553 return (sde_kms->aspace[domain] &&
2554 sde_kms->aspace[domain]->domain_attached) ?
2555 sde_kms->aspace[domain] : NULL;
Jordan Croused8e96522017-02-13 10:14:16 -07002556}
2557
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07002558static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
2559{
2560 struct drm_device *dev = NULL;
2561 struct sde_kms *sde_kms = NULL;
Padmanabhan Komanduru71aec2d2017-08-30 20:07:59 +05302562 struct drm_connector *connector = NULL;
2563 struct sde_connector *sde_conn = NULL;
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07002564
2565 if (!kms) {
2566 SDE_ERROR("invalid kms\n");
2567 return;
2568 }
2569
2570 sde_kms = to_sde_kms(kms);
2571 dev = sde_kms->dev;
2572
2573 if (!dev) {
2574 SDE_ERROR("invalid device\n");
2575 return;
2576 }
2577
Padmanabhan Komanduru71aec2d2017-08-30 20:07:59 +05302578 if (!dev->mode_config.poll_enabled)
2579 return;
2580
2581 mutex_lock(&dev->mode_config.mutex);
2582 drm_for_each_connector(connector, dev) {
2583 /* Only handle HPD capable connectors. */
2584 if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
2585 continue;
2586
2587 sde_conn = to_sde_connector(connector);
2588
Ajay Singh Parmar315e5852017-11-23 21:47:32 -08002589 if (sde_conn->ops.post_open)
2590 sde_conn->ops.post_open(sde_conn->display);
Padmanabhan Komanduru71aec2d2017-08-30 20:07:59 +05302591 }
2592 mutex_unlock(&dev->mode_config.mutex);
2593
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07002594}
2595
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002596static int sde_kms_cont_splash_config(struct msm_kms *kms)
2597{
2598 void *display;
2599 struct dsi_display *dsi_display;
2600 struct msm_display_info info;
2601 struct drm_encoder *encoder = NULL;
2602 struct drm_crtc *crtc = NULL;
2603 int i, rc = 0;
2604 struct drm_display_mode *drm_mode = NULL;
2605 struct drm_device *dev;
2606 struct msm_drm_private *priv;
2607 struct sde_kms *sde_kms;
Jeykumar Sankaran74d7c382017-11-10 17:20:17 -08002608 struct list_head *connector_list = NULL;
2609 struct drm_connector *conn_iter = NULL;
2610 struct drm_connector *connector = NULL;
Sandeep Panda8693e8f2018-03-08 08:16:44 +05302611 struct sde_connector *sde_conn = NULL;
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002612
2613 if (!kms) {
2614 SDE_ERROR("invalid kms\n");
2615 return -EINVAL;
2616 }
2617
2618 sde_kms = to_sde_kms(kms);
2619 dev = sde_kms->dev;
2620 if (!dev || !dev->platformdev) {
2621 SDE_ERROR("invalid device\n");
2622 return -EINVAL;
2623 }
2624
Chandan Uddaraju9efbbe32017-11-09 23:57:05 -08002625 if (!sde_kms->splash_data.cont_splash_en) {
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002626 DRM_INFO("cont_splash feature not enabled\n");
2627 return rc;
2628 }
2629
2630 /* Currently, we only support one dsi display configuration */
2631 /* dsi */
2632 for (i = 0; i < sde_kms->dsi_display_count; ++i) {
2633 display = sde_kms->dsi_displays[i];
2634 dsi_display = (struct dsi_display *)display;
2635 SDE_DEBUG("display->name = %s\n", dsi_display->name);
2636
2637 if (dsi_display->bridge->base.encoder) {
2638 encoder = dsi_display->bridge->base.encoder;
2639 SDE_DEBUG("encoder name = %s\n", encoder->name);
2640 }
2641 memset(&info, 0x0, sizeof(info));
2642 rc = dsi_display_get_info(&info, display);
2643 if (rc) {
2644 SDE_ERROR("dsi get_info %d failed\n", i);
2645 encoder = NULL;
2646 continue;
2647 }
2648 SDE_DEBUG("info.is_connected = %s, info.is_primary = %s\n",
2649 ((info.is_connected) ? "true" : "false"),
2650 ((info.is_primary) ? "true" : "false"));
2651 break;
2652 }
2653
2654 if (!encoder) {
2655 SDE_ERROR("encoder not initialized\n");
2656 return -EINVAL;
2657 }
2658
2659 priv = sde_kms->dev->dev_private;
2660 encoder->crtc = priv->crtcs[0];
2661 crtc = encoder->crtc;
2662 SDE_DEBUG("crtc id = %d\n", crtc->base.id);
2663
Jeykumar Sankaran74d7c382017-11-10 17:20:17 -08002664
2665 mutex_lock(&dev->mode_config.mutex);
2666 connector_list = &dev->mode_config.connector_list;
Shubhashree Dharcc9091d2018-05-10 15:56:42 +05302667 if (connector_list) {
2668 list_for_each_entry(conn_iter, connector_list, head) {
2669 /**
2670 * SDE_KMS doesn't attach more than one encoder to
2671 * a DSI connector. So it is safe to check only with
2672 * the first encoder entry. Revisit this logic if we
2673 * ever have to support continuous splash for
2674 * external displays in MST configuration.
2675 */
2676 if (conn_iter &&
2677 (conn_iter->encoder_ids[0] == encoder->base.id)) {
2678 connector = conn_iter;
2679 break;
2680 }
Jeykumar Sankaran74d7c382017-11-10 17:20:17 -08002681 }
2682 }
Jeykumar Sankaran74d7c382017-11-10 17:20:17 -08002683 if (!connector) {
2684 SDE_ERROR("connector not initialized\n");
2685 mutex_unlock(&dev->mode_config.mutex);
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002686 return -EINVAL;
2687 }
Jeykumar Sankaran74d7c382017-11-10 17:20:17 -08002688
2689 if (connector->funcs->fill_modes) {
2690 connector->funcs->fill_modes(connector,
2691 dev->mode_config.max_width,
2692 dev->mode_config.max_height);
2693 } else {
2694 SDE_ERROR("fill_modes api not defined\n");
2695 mutex_unlock(&dev->mode_config.mutex);
2696 return -EINVAL;
2697 }
2698 mutex_unlock(&dev->mode_config.mutex);
2699
2700 crtc->state->encoder_mask = (1 << drm_encoder_index(encoder));
2701
2702 /* currently consider modes[0] as the preferred mode */
2703 drm_mode = list_first_entry(&connector->modes,
2704 struct drm_display_mode, head);
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002705 SDE_DEBUG("drm_mode->name = %s, id=%d, type=0x%x, flags=0x%x\n",
2706 drm_mode->name, drm_mode->base.id,
2707 drm_mode->type, drm_mode->flags);
2708
2709 /* Update CRTC drm structure */
2710 crtc->state->active = true;
2711 rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode);
2712 if (rc) {
2713 SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc);
2714 return rc;
2715 }
2716 drm_mode_copy(&crtc->state->adjusted_mode, drm_mode);
2717 drm_mode_copy(&crtc->mode, drm_mode);
2718
2719 /* Update encoder structure */
2720 sde_encoder_update_caps_for_cont_splash(encoder);
2721
2722 sde_crtc_update_cont_splash_mixer_settings(crtc);
2723
Sandeep Panda8693e8f2018-03-08 08:16:44 +05302724 sde_conn = to_sde_connector(connector);
2725 if (sde_conn && sde_conn->ops.cont_splash_config)
2726 sde_conn->ops.cont_splash_config(sde_conn->display);
2727
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002728 return rc;
2729}
2730
Kalyan Thota2f0444a2018-04-20 17:50:33 +05302731static bool sde_kms_check_for_splash(struct msm_kms *kms)
2732{
2733 struct sde_kms *sde_kms;
2734
2735 if (!kms) {
2736 SDE_ERROR("invalid kms\n");
2737 return false;
2738 }
2739
2740 sde_kms = to_sde_kms(kms);
2741 return sde_kms->splash_data.cont_splash_en;
2742}
2743
Clarence Ipd86f6e42017-08-08 18:31:00 -04002744static int sde_kms_pm_suspend(struct device *dev)
2745{
2746 struct drm_device *ddev;
2747 struct drm_modeset_acquire_ctx ctx;
2748 struct drm_connector *conn;
2749 struct drm_atomic_state *state;
2750 struct sde_kms *sde_kms;
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002751 int ret = 0, num_crtcs = 0;
Clarence Ipd86f6e42017-08-08 18:31:00 -04002752
2753 if (!dev)
2754 return -EINVAL;
2755
2756 ddev = dev_get_drvdata(dev);
2757 if (!ddev || !ddev_to_msm_kms(ddev))
2758 return -EINVAL;
2759
2760 sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
2761 SDE_EVT32(0);
2762
2763 /* disable hot-plug polling */
2764 drm_kms_helper_poll_disable(ddev);
2765
2766 /* acquire modeset lock(s) */
2767 drm_modeset_acquire_init(&ctx, 0);
2768
2769retry:
2770 ret = drm_modeset_lock_all_ctx(ddev, &ctx);
2771 if (ret)
2772 goto unlock;
2773
2774 /* save current state for resume */
2775 if (sde_kms->suspend_state)
2776 drm_atomic_state_free(sde_kms->suspend_state);
2777 sde_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
2778 if (IS_ERR_OR_NULL(sde_kms->suspend_state)) {
2779 DRM_ERROR("failed to back up suspend state\n");
2780 sde_kms->suspend_state = NULL;
2781 goto unlock;
2782 }
2783
2784 /* create atomic state to disable all CRTCs */
2785 state = drm_atomic_state_alloc(ddev);
2786 if (IS_ERR_OR_NULL(state)) {
2787 DRM_ERROR("failed to allocate crtc disable state\n");
2788 goto unlock;
2789 }
2790
2791 state->acquire_ctx = &ctx;
2792 drm_for_each_connector(conn, ddev) {
2793 struct drm_crtc_state *crtc_state;
2794 uint64_t lp;
2795
2796 if (!conn->state || !conn->state->crtc ||
2797 conn->dpms != DRM_MODE_DPMS_ON)
2798 continue;
2799
2800 lp = sde_connector_get_lp(conn);
2801 if (lp == SDE_MODE_DPMS_LP1) {
2802 /* transition LP1->LP2 on pm suspend */
2803 ret = sde_connector_set_property_for_commit(conn, state,
2804 CONNECTOR_PROP_LP, SDE_MODE_DPMS_LP2);
2805 if (ret) {
2806 DRM_ERROR("failed to set lp2 for conn %d\n",
2807 conn->base.id);
2808 drm_atomic_state_free(state);
2809 goto unlock;
2810 }
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002811 }
2812
2813 if (lp != SDE_MODE_DPMS_LP2) {
Clarence Ipd86f6e42017-08-08 18:31:00 -04002814 /* force CRTC to be inactive */
2815 crtc_state = drm_atomic_get_crtc_state(state,
2816 conn->state->crtc);
2817 if (IS_ERR_OR_NULL(crtc_state)) {
2818 DRM_ERROR("failed to get crtc %d state\n",
2819 conn->state->crtc->base.id);
2820 drm_atomic_state_free(state);
2821 goto unlock;
2822 }
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002823
2824 if (lp != SDE_MODE_DPMS_LP1)
2825 crtc_state->active = false;
2826 ++num_crtcs;
Clarence Ipd86f6e42017-08-08 18:31:00 -04002827 }
2828 }
2829
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002830 /* check for nothing to do */
2831 if (num_crtcs == 0) {
2832 DRM_DEBUG("all crtcs are already in the off state\n");
2833 drm_atomic_state_free(state);
Dhaval Patel8a7c3282017-12-05 00:41:58 -08002834 sde_kms->suspend_block = true;
2835 goto unlock;
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002836 }
2837
Clarence Ipd86f6e42017-08-08 18:31:00 -04002838 /* commit the "disable all" state */
2839 ret = drm_atomic_commit(state);
2840 if (ret < 0) {
2841 DRM_ERROR("failed to disable crtcs, %d\n", ret);
2842 drm_atomic_state_free(state);
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002843 goto unlock;
Clarence Ipd86f6e42017-08-08 18:31:00 -04002844 }
2845
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002846 sde_kms->suspend_block = true;
2847
Dhaval Patel8a7c3282017-12-05 00:41:58 -08002848 drm_for_each_connector(conn, ddev) {
2849 uint64_t lp;
2850
2851 lp = sde_connector_get_lp(conn);
2852 if (lp != SDE_MODE_DPMS_LP2)
2853 continue;
2854
2855 ret = sde_encoder_wait_for_event(conn->encoder,
2856 MSM_ENC_TX_COMPLETE);
2857 if (ret && ret != -EWOULDBLOCK)
2858 SDE_ERROR(
2859 "[enc: %d] wait for commit done returned %d\n",
2860 conn->encoder->base.id, ret);
2861 else if (!ret)
2862 sde_encoder_idle_request(conn->encoder);
2863 }
Clarence Ipd86f6e42017-08-08 18:31:00 -04002864unlock:
2865 if (ret == -EDEADLK) {
2866 drm_modeset_backoff(&ctx);
2867 goto retry;
2868 }
2869 drm_modeset_drop_locks(&ctx);
2870 drm_modeset_acquire_fini(&ctx);
2871
2872 return 0;
2873}
2874
2875static int sde_kms_pm_resume(struct device *dev)
2876{
2877 struct drm_device *ddev;
2878 struct sde_kms *sde_kms;
2879 int ret;
2880
2881 if (!dev)
2882 return -EINVAL;
2883
2884 ddev = dev_get_drvdata(dev);
2885 if (!ddev || !ddev_to_msm_kms(ddev))
2886 return -EINVAL;
2887
2888 sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
2889
2890 SDE_EVT32(sde_kms->suspend_state != NULL);
2891
2892 drm_mode_config_reset(ddev);
2893
2894 drm_modeset_lock_all(ddev);
2895
2896 sde_kms->suspend_block = false;
2897
2898 if (sde_kms->suspend_state) {
2899 sde_kms->suspend_state->acquire_ctx =
2900 ddev->mode_config.acquire_ctx;
2901 ret = drm_atomic_commit(sde_kms->suspend_state);
2902 if (ret < 0) {
2903 DRM_ERROR("failed to restore state, %d\n", ret);
2904 drm_atomic_state_free(sde_kms->suspend_state);
2905 }
2906 sde_kms->suspend_state = NULL;
2907 }
2908 drm_modeset_unlock_all(ddev);
2909
2910 /* enable hot-plug polling */
2911 drm_kms_helper_poll_enable(ddev);
2912
2913 return 0;
2914}
2915
Ben Chan78647cd2016-06-26 22:02:47 -04002916static const struct msm_kms_funcs kms_funcs = {
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002917 .hw_init = sde_kms_hw_init,
Alan Kwong5a3ac752016-10-16 01:02:35 -04002918 .postinit = sde_kms_postinit,
Ben Chan78647cd2016-06-26 22:02:47 -04002919 .irq_preinstall = sde_irq_preinstall,
2920 .irq_postinstall = sde_irq_postinstall,
2921 .irq_uninstall = sde_irq_uninstall,
2922 .irq = sde_irq,
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002923 .preclose = sde_kms_preclose,
Lloyd Atkinsone08229c2017-10-02 17:53:30 -04002924 .lastclose = sde_kms_lastclose,
Clarence Ip24f80662016-06-13 19:05:32 -04002925 .prepare_fence = sde_kms_prepare_fence,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002926 .prepare_commit = sde_kms_prepare_commit,
2927 .commit = sde_kms_commit,
2928 .complete_commit = sde_kms_complete_commit,
2929 .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07002930 .wait_for_tx_complete = sde_kms_wait_for_frame_transfer_complete,
Alan Kwongf5dd86c2016-08-09 18:08:17 -04002931 .enable_vblank = sde_kms_enable_vblank,
2932 .disable_vblank = sde_kms_disable_vblank,
Lloyd Atkinsonfa2489c2016-05-25 15:16:03 -04002933 .check_modified_format = sde_format_check_modified_format,
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002934 .atomic_check = sde_kms_atomic_check,
Clarence Ip4ce59322016-06-26 22:27:51 -04002935 .get_format = sde_get_msm_format,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002936 .round_pixclk = sde_kms_round_pixclk,
Clarence Ipd86f6e42017-08-08 18:31:00 -04002937 .pm_suspend = sde_kms_pm_suspend,
2938 .pm_resume = sde_kms_pm_resume,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002939 .destroy = sde_kms_destroy,
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002940 .cont_splash_config = sde_kms_cont_splash_config,
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07002941 .register_events = _sde_kms_register_events,
Jordan Croused8e96522017-02-13 10:14:16 -07002942 .get_address_space = _sde_kms_get_address_space,
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07002943 .postopen = _sde_kms_post_open,
Kalyan Thota2f0444a2018-04-20 17:50:33 +05302944 .check_for_splash = sde_kms_check_for_splash,
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002945};
2946
Dhaval Patel3949f032016-06-20 16:24:33 -07002947/* the caller api needs to turn on clock before calling it */
Clarence Ip17162b52016-11-24 17:06:29 -05002948static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002949{
Dhaval Patel88739332017-04-11 11:08:04 -07002950 sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002951}
2952
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002953static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
2954{
2955 struct msm_mmu *mmu;
2956 int i;
2957
Jordan Croused8e96522017-02-13 10:14:16 -07002958 for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
2959 if (!sde_kms->aspace[i])
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002960 continue;
2961
Jordan Croused8e96522017-02-13 10:14:16 -07002962 mmu = sde_kms->aspace[i]->mmu;
2963
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002964 mmu->funcs->detach(mmu, (const char **)iommu_ports,
2965 ARRAY_SIZE(iommu_ports));
Jordan Crouse12bf3622017-02-13 10:14:11 -07002966 msm_gem_address_space_destroy(sde_kms->aspace[i]);
2967
Jordan Croused8e96522017-02-13 10:14:16 -07002968 sde_kms->aspace[i] = NULL;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002969 }
2970
2971 return 0;
2972}
2973
2974static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002975{
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002976 struct msm_mmu *mmu;
2977 int i, ret;
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08002978 int early_map = 1;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002979
Alan Kwong112a84f2016-05-24 20:49:21 -04002980 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
Jordan Crouse12bf3622017-02-13 10:14:11 -07002981 struct msm_gem_address_space *aspace;
2982
Alan Kwong112a84f2016-05-24 20:49:21 -04002983 mmu = msm_smmu_new(sde_kms->dev->dev, i);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002984 if (IS_ERR(mmu)) {
2985 ret = PTR_ERR(mmu);
Dhaval Patel5473cd22017-03-19 21:38:08 -07002986 SDE_DEBUG("failed to init iommu id %d: rc:%d\n",
2987 i, ret);
Dhaval Patel5200c602017-01-17 15:53:37 -08002988 continue;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002989 }
2990
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08002991 /*
2992 * Before attaching SMMU, we need to honor continuous splash
2993 * use case where hardware tries to fetch buffer from physical
2994 * address. To facilitate this requirement we need to have a
2995 * one to one mapping on SMMU until we have our first frame.
2996 */
Gopikrishnaiah Anandanb38d3292018-02-28 19:25:15 -08002997 if (i == MSM_SMMU_DOMAIN_UNSECURE) {
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08002998 ret = mmu->funcs->set_attribute(mmu,
2999 DOMAIN_ATTR_EARLY_MAP,
3000 &early_map);
3001 if (ret) {
3002 SDE_ERROR("failed to set map att: %d\n", ret);
3003 goto fail;
3004 }
3005 }
3006
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07003007 aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
Jordan Crouse12bf3622017-02-13 10:14:11 -07003008 mmu, "sde");
3009 if (IS_ERR(aspace)) {
3010 ret = PTR_ERR(aspace);
3011 mmu->funcs->destroy(mmu);
3012 goto fail;
3013 }
3014
3015 sde_kms->aspace[i] = aspace;
3016
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003017 ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
3018 ARRAY_SIZE(iommu_ports));
3019 if (ret) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003020 SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
Jordan Crouse12bf3622017-02-13 10:14:11 -07003021 msm_gem_address_space_destroy(aspace);
3022 goto fail;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003023 }
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07003024 aspace->domain_attached = true;
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003025 early_map = 0;
Gopikrishnaiah Anandanb38d3292018-02-28 19:25:15 -08003026
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003027 /* Mapping splash memory block */
3028 if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
Gopikrishnaiah Anandanb38d3292018-02-28 19:25:15 -08003029 sde_kms->splash_data.splash_base) {
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003030 ret = _sde_kms_splash_smmu_map(sde_kms->dev, mmu,
3031 &sde_kms->splash_data);
3032 if (ret) {
3033 SDE_ERROR("failed to map ret:%d\n", ret);
3034 goto fail;
3035 }
Gopikrishnaiah Anandanb38d3292018-02-28 19:25:15 -08003036 }
3037
3038 /*
3039 * Turning off early map after generating one to one
3040 * mapping for splash address space.
3041 */
3042 ret = mmu->funcs->set_attribute(mmu, DOMAIN_ATTR_EARLY_MAP,
3043 &early_map);
3044 if (ret) {
3045 SDE_ERROR("failed to set map att ret:%d\n", ret);
3046 goto early_map_fail;
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003047 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003048 }
3049
3050 return 0;
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003051early_map_fail:
3052 mmu->funcs->one_to_one_unmap(mmu, sde_kms->splash_data.splash_base,
3053 sde_kms->splash_data.splash_size);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003054fail:
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003055 mmu->funcs->destroy(mmu);
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003056 _sde_kms_mmu_destroy(sde_kms);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003057
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003058 return ret;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003059}
3060
Clarence Ip7f0de632017-05-31 14:59:14 -04003061static void sde_kms_handle_power_event(u32 event_type, void *usr)
3062{
3063 struct sde_kms *sde_kms = usr;
Harsh Sahu08a4a742017-09-18 11:42:39 -07003064 struct msm_kms *msm_kms;
Clarence Ip7f0de632017-05-31 14:59:14 -04003065
Harsh Sahu08a4a742017-09-18 11:42:39 -07003066 msm_kms = &sde_kms->base;
Clarence Ip7f0de632017-05-31 14:59:14 -04003067 if (!sde_kms)
3068 return;
3069
Harsh Sahu08a4a742017-09-18 11:42:39 -07003070 SDE_DEBUG("event_type:%d\n", event_type);
3071 SDE_EVT32_VERBOSE(event_type);
3072
3073 if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
3074 sde_irq_update(msm_kms, true);
Clarence Ip7f0de632017-05-31 14:59:14 -04003075 sde_vbif_init_memtypes(sde_kms);
Harsh Sahu08a4a742017-09-18 11:42:39 -07003076 } else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
3077 sde_irq_update(msm_kms, false);
3078 }
Clarence Ip7f0de632017-05-31 14:59:14 -04003079}
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003080
Alan Kwong23afc2d92017-09-15 10:59:06 -04003081#define genpd_to_sde_kms(domain) container_of(domain, struct sde_kms, genpd)
3082
3083static int sde_kms_pd_enable(struct generic_pm_domain *genpd)
3084{
3085 struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
3086 struct drm_device *dev;
3087 struct msm_drm_private *priv;
3088 int rc;
3089
3090 SDE_DEBUG("\n");
3091
3092 dev = sde_kms->dev;
3093 if (!dev)
3094 return -EINVAL;
3095
3096 priv = dev->dev_private;
3097 if (!priv)
3098 return -EINVAL;
3099
3100 SDE_EVT32(genpd->device_count);
3101
3102 rc = sde_power_resource_enable(&priv->phandle, priv->pclient, true);
3103
3104 return rc;
3105}
3106
3107static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
3108{
3109 struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
3110 struct drm_device *dev;
3111 struct msm_drm_private *priv;
3112 int rc;
3113
3114 SDE_DEBUG("\n");
3115
3116 dev = sde_kms->dev;
3117 if (!dev)
3118 return -EINVAL;
3119
3120 priv = dev->dev_private;
3121 if (!priv)
3122 return -EINVAL;
3123
3124 SDE_EVT32(genpd->device_count);
3125
3126 rc = sde_power_resource_enable(&priv->phandle, priv->pclient, false);
3127
3128 return rc;
3129}
3130
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -07003131static int _sde_kms_get_splash_data(struct sde_splash_data *data)
3132{
3133 int ret = 0;
3134 struct device_node *parent, *node;
3135 struct resource r;
3136
3137 if (!data)
3138 return -EINVAL;
3139
3140 parent = of_find_node_by_path("/reserved-memory");
3141 if (!parent) {
3142 SDE_ERROR("failed to find reserved-memory node\n");
3143 return -EINVAL;
3144 }
3145
3146 node = of_find_node_by_name(parent, "cont_splash_region");
3147 if (!node) {
3148 SDE_ERROR("failed to find splash memory reservation\n");
3149 return -EINVAL;
3150 }
3151
3152 if (of_address_to_resource(node, 0, &r)) {
3153 SDE_ERROR("failed to find data for splash memory\n");
3154 return -EINVAL;
3155 }
3156
3157 data->splash_base = (unsigned long)r.start;
3158 data->splash_size = (r.end - r.start) + 1;
3159
3160 pr_info("found continuous splash base address:%lx size:%x\n",
3161 data->splash_base,
3162 data->splash_size);
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -07003163 return ret;
3164}
3165
Clarence Ip17162b52016-11-24 17:06:29 -05003166static int sde_kms_hw_init(struct msm_kms *kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003167{
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003168 struct sde_kms *sde_kms;
Clarence Ip17162b52016-11-24 17:06:29 -05003169 struct drm_device *dev;
Dhaval Patel3949f032016-06-20 16:24:33 -07003170 struct msm_drm_private *priv;
Chandan Uddaraju9efbbe32017-11-09 23:57:05 -08003171 struct sde_rm *rm = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05003172 int i, rc = -EINVAL;
Dhaval Patel3949f032016-06-20 16:24:33 -07003173
Clarence Ip17162b52016-11-24 17:06:29 -05003174 if (!kms) {
3175 SDE_ERROR("invalid kms\n");
3176 goto end;
3177 }
3178
3179 sde_kms = to_sde_kms(kms);
3180 dev = sde_kms->dev;
3181 if (!dev || !dev->platformdev) {
3182 SDE_ERROR("invalid device\n");
Dhaval Patel3949f032016-06-20 16:24:33 -07003183 goto end;
3184 }
3185
3186 priv = dev->dev_private;
Clarence Ip17162b52016-11-24 17:06:29 -05003187 if (!priv) {
3188 SDE_ERROR("invalid private data\n");
Dhaval Patel3949f032016-06-20 16:24:33 -07003189 goto end;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003190 }
3191
Dhaval Patela2430842017-06-15 14:32:36 -07003192 sde_kms->mmio = msm_ioremap(dev->platformdev, "mdp_phys", "mdp_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05003193 if (IS_ERR(sde_kms->mmio)) {
3194 rc = PTR_ERR(sde_kms->mmio);
3195 SDE_ERROR("mdp register memory map failed: %d\n", rc);
3196 sde_kms->mmio = NULL;
3197 goto error;
3198 }
Lakshmi Narayana Kalavala89b6cbe2018-05-11 11:28:12 -07003199 DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio);
Dhaval Patela2430842017-06-15 14:32:36 -07003200 sde_kms->mmio_len = msm_iomap_size(dev->platformdev, "mdp_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05003201
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003202 rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
3203 sde_kms->mmio_len);
3204 if (rc)
3205 SDE_ERROR("dbg base register kms failed: %d\n", rc);
3206
Dhaval Patela2430842017-06-15 14:32:36 -07003207 sde_kms->vbif[VBIF_RT] = msm_ioremap(dev->platformdev, "vbif_phys",
3208 "vbif_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05003209 if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
3210 rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
3211 SDE_ERROR("vbif register memory map failed: %d\n", rc);
3212 sde_kms->vbif[VBIF_RT] = NULL;
3213 goto error;
3214 }
Dhaval Patela2430842017-06-15 14:32:36 -07003215 sde_kms->vbif_len[VBIF_RT] = msm_iomap_size(dev->platformdev,
3216 "vbif_phys");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003217 rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
3218 sde_kms->vbif_len[VBIF_RT]);
3219 if (rc)
3220 SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
3221
Dhaval Patela2430842017-06-15 14:32:36 -07003222 sde_kms->vbif[VBIF_NRT] = msm_ioremap(dev->platformdev, "vbif_nrt_phys",
3223 "vbif_nrt_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05003224 if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
3225 sde_kms->vbif[VBIF_NRT] = NULL;
3226 SDE_DEBUG("VBIF NRT is not defined");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003227 } else {
Dhaval Patela2430842017-06-15 14:32:36 -07003228 sde_kms->vbif_len[VBIF_NRT] = msm_iomap_size(dev->platformdev,
3229 "vbif_nrt_phys");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003230 rc = sde_dbg_reg_register_base("vbif_nrt",
3231 sde_kms->vbif[VBIF_NRT],
3232 sde_kms->vbif_len[VBIF_NRT]);
3233 if (rc)
3234 SDE_ERROR("dbg base register vbif_nrt failed: %d\n",
3235 rc);
Clarence Ip17162b52016-11-24 17:06:29 -05003236 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003237
Dhaval Patela2430842017-06-15 14:32:36 -07003238 sde_kms->reg_dma = msm_ioremap(dev->platformdev, "regdma_phys",
3239 "regdma_phys");
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08003240 if (IS_ERR(sde_kms->reg_dma)) {
3241 sde_kms->reg_dma = NULL;
3242 SDE_DEBUG("REG_DMA is not defined");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003243 } else {
Dhaval Patela2430842017-06-15 14:32:36 -07003244 sde_kms->reg_dma_len = msm_iomap_size(dev->platformdev,
3245 "regdma_phys");
Gopikrishnaiah Anandanbc5aa792017-08-23 18:30:08 -07003246 rc = sde_dbg_reg_register_base("reg_dma",
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003247 sde_kms->reg_dma,
3248 sde_kms->reg_dma_len);
3249 if (rc)
3250 SDE_ERROR("dbg base register reg_dma failed: %d\n",
3251 rc);
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08003252 }
3253
Dhaval Patel3949f032016-06-20 16:24:33 -07003254 sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
3255 if (IS_ERR_OR_NULL(sde_kms->core_client)) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003256 rc = PTR_ERR(sde_kms->core_client);
Dhaval Patel5398f602017-03-25 18:25:18 -07003257 if (!sde_kms->core_client)
3258 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003259 SDE_ERROR("sde power client create failed: %d\n", rc);
3260 sde_kms->core_client = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05003261 goto error;
Dhaval Patel3949f032016-06-20 16:24:33 -07003262 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003263
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -07003264 rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003265 if (rc)
Vara Reddyc90c7fe2017-11-10 17:02:02 -08003266 SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -07003267
Dhaval Patel3949f032016-06-20 16:24:33 -07003268 rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
3269 true);
3270 if (rc) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003271 SDE_ERROR("resource enable failed: %d\n", rc);
Clarence Ip17162b52016-11-24 17:06:29 -05003272 goto error;
Dhaval Patel3949f032016-06-20 16:24:33 -07003273 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003274
Dhaval Patelb0a25be2017-12-11 22:38:26 -08003275 for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
3276 sde_power_data_bus_set_quota(&priv->phandle,
3277 sde_kms->core_client,
3278 SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i,
3279 SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA,
3280 SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA);
3281
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003282 _sde_kms_core_hw_rev_init(sde_kms);
Lloyd Atkinson11f34442016-08-11 11:19:52 -04003283
Dhaval Patelb271b842016-10-19 21:41:22 -07003284 pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
3285
Dhaval Patel8bf7ff32016-07-20 18:13:24 -07003286 sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
Dhaval Patel3949f032016-06-20 16:24:33 -07003287 if (IS_ERR_OR_NULL(sde_kms->catalog)) {
Dhaval Patel3949f032016-06-20 16:24:33 -07003288 rc = PTR_ERR(sde_kms->catalog);
Dhaval Patel5398f602017-03-25 18:25:18 -07003289 if (!sde_kms->catalog)
3290 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003291 SDE_ERROR("catalog init failed: %d\n", rc);
3292 sde_kms->catalog = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05003293 goto power_error;
Dhaval Patel3949f032016-06-20 16:24:33 -07003294 }
3295
Lloyd Atkinson274cc462017-02-21 11:52:06 -05003296 sde_dbg_init_dbg_buses(sde_kms->core_rev);
3297
Chandan Uddaraju9efbbe32017-11-09 23:57:05 -08003298 rm = &sde_kms->rm;
3299 rc = sde_rm_init(rm, sde_kms->catalog, sde_kms->mmio,
3300 sde_kms->dev);
3301 if (rc) {
3302 SDE_ERROR("rm init failed: %d\n", rc);
3303 goto power_error;
3304 }
3305
3306 sde_kms->rm_init = true;
3307
Chandan Uddarajufa184062017-11-28 17:26:31 -08003308 sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
3309 if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
3310 rc = PTR_ERR(sde_kms->hw_intr);
3311 SDE_ERROR("hw_intr init failed: %d\n", rc);
3312 sde_kms->hw_intr = NULL;
3313 goto hw_intr_init_err;
3314 }
3315
Vara Reddyc90c7fe2017-11-10 17:02:02 -08003316 /*
3317 * Attempt continuous splash handoff only if reserved
3318 * splash memory is found.
3319 */
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003320 if (sde_kms->splash_data.splash_base)
Chandan Uddarajufa184062017-11-28 17:26:31 -08003321 sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
Chandan Uddaraju9efbbe32017-11-09 23:57:05 -08003322 &sde_kms->splash_data,
3323 sde_kms->catalog);
Gopikrishnaiah Anandane69dc592017-03-29 14:00:55 -07003324
Gopikrishnaiah Anandanb38d3292018-02-28 19:25:15 -08003325 sde_kms->splash_data.resource_handoff_pending = true;
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003326
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08003327 /* Initialize reg dma block which is a singleton */
3328 rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
3329 sde_kms->dev);
3330 if (rc) {
3331 SDE_ERROR("failed: reg dma init failed\n");
3332 goto power_error;
3333 }
3334
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -07003335 rc = _sde_kms_mmu_init(sde_kms);
3336 if (rc) {
3337 SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
3338 goto power_error;
3339 }
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003340 sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
3341 if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
3342 rc = PTR_ERR(sde_kms->hw_mdp);
Dhaval Patel5398f602017-03-25 18:25:18 -07003343 if (!sde_kms->hw_mdp)
3344 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003345 SDE_ERROR("failed to get hw_mdp: %d\n", rc);
3346 sde_kms->hw_mdp = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05003347 goto power_error;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003348 }
Dhaval Patel3949f032016-06-20 16:24:33 -07003349
Alan Kwong5d324e42016-07-28 22:56:18 -04003350 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
3351 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
3352
3353 sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
3354 sde_kms->vbif[vbif_idx], sde_kms->catalog);
3355 if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003356 rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
Dhaval Patel5398f602017-03-25 18:25:18 -07003357 if (!sde_kms->hw_vbif[vbif_idx])
3358 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003359 SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
Alan Kwong5d324e42016-07-28 22:56:18 -04003360 sde_kms->hw_vbif[vbif_idx] = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05003361 goto power_error;
Alan Kwong5d324e42016-07-28 22:56:18 -04003362 }
3363 }
3364
Alan Kwong54125bb2017-02-26 16:01:36 -08003365 sde_kms->iclient = msm_ion_client_create(dev->unique);
3366 if (IS_ERR(sde_kms->iclient)) {
3367 rc = PTR_ERR(sde_kms->iclient);
3368 SDE_DEBUG("msm_ion_client not available: %d\n", rc);
3369 sde_kms->iclient = NULL;
3370 }
3371
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003372
Alan Kwong67a3f792016-11-01 23:16:53 -04003373 rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
Dhaval Patel446446e2017-04-21 19:38:17 -07003374 &priv->phandle, priv->pclient, "core_clk");
Alan Kwong67a3f792016-11-01 23:16:53 -04003375 if (rc) {
3376 SDE_ERROR("failed to init perf %d\n", rc);
3377 goto perf_err;
3378 }
3379
Clarence Ip4ce59322016-06-26 22:27:51 -04003380 /*
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003381 * _sde_kms_drm_obj_init should create the DRM related objects
3382 * i.e. CRTCs, planes, encoders, connectors and so forth
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003383 */
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003384 rc = _sde_kms_drm_obj_init(sde_kms);
3385 if (rc) {
3386 SDE_ERROR("modeset init failed: %d\n", rc);
Alan Kwong67a3f792016-11-01 23:16:53 -04003387 goto drm_obj_init_err;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003388 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003389
Dhaval Patel79797b12018-02-13 19:58:05 -08003390 dev->mode_config.min_width = sde_kms->catalog->min_display_width;
3391 dev->mode_config.min_height = sde_kms->catalog->min_display_height;
3392 dev->mode_config.max_width = sde_kms->catalog->max_display_width;
3393 dev->mode_config.max_height = sde_kms->catalog->max_display_height;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003394
Dhaval Patel6c9cb2b2017-12-14 23:08:32 -08003395 mutex_init(&sde_kms->secure_transition_lock);
3396 atomic_set(&sde_kms->detach_sec_cb, 0);
3397 atomic_set(&sde_kms->detach_all_cb, 0);
3398
Lloyd Atkinsonfa2489c2016-05-25 15:16:03 -04003399 /*
3400 * Support format modifiers for compression etc.
3401 */
3402 dev->mode_config.allow_fb_modifiers = true;
3403
Clarence Ip7f0de632017-05-31 14:59:14 -04003404 /*
3405 * Handle (re)initializations during power enable
3406 */
3407 sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
3408 sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
Harsh Sahu08a4a742017-09-18 11:42:39 -07003409 SDE_POWER_EVENT_POST_ENABLE |
3410 SDE_POWER_EVENT_PRE_DISABLE,
Clarence Ip7f0de632017-05-31 14:59:14 -04003411 sde_kms_handle_power_event, sde_kms, "kms");
3412
Alan Kwong23afc2d92017-09-15 10:59:06 -04003413 /* initialize power domain if defined */
3414 if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) {
3415 sde_kms->genpd.name = dev->unique;
3416 sde_kms->genpd.power_off = sde_kms_pd_disable;
3417 sde_kms->genpd.power_on = sde_kms_pd_enable;
3418
3419 rc = pm_genpd_init(&sde_kms->genpd, NULL, true);
3420 if (rc < 0) {
3421 SDE_ERROR("failed to init genpd provider %s: %d\n",
3422 sde_kms->genpd.name, rc);
3423 goto genpd_err;
3424 }
3425
3426 rc = of_genpd_add_provider_simple(dev->dev->of_node,
3427 &sde_kms->genpd);
3428 if (rc < 0) {
3429 SDE_ERROR("failed to add genpd provider %s: %d\n",
3430 sde_kms->genpd.name, rc);
3431 pm_genpd_remove(&sde_kms->genpd);
3432 goto genpd_err;
3433 }
3434
3435 sde_kms->genpd_init = true;
3436 SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
3437 }
3438
Raviteja Tamatam30fa3e32018-01-04 21:00:22 +05303439 if (sde_kms->splash_data.cont_splash_en) {
Chandan Uddaraju18f09402017-09-29 11:54:29 -07003440 SDE_DEBUG("Skipping MDP Resources disable\n");
Raviteja Tamatam30fa3e32018-01-04 21:00:22 +05303441 } else {
3442 for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
3443 sde_power_data_bus_set_quota(&priv->phandle,
3444 sde_kms->core_client,
3445 SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i,
3446 SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
3447 SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
3448
Chandan Uddaraju18f09402017-09-29 11:54:29 -07003449 sde_power_resource_enable(&priv->phandle,
3450 sde_kms->core_client, false);
Raviteja Tamatam30fa3e32018-01-04 21:00:22 +05303451 }
Clarence Ip17162b52016-11-24 17:06:29 -05003452 return 0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003453
Alan Kwong23afc2d92017-09-15 10:59:06 -04003454genpd_err:
Alan Kwong67a3f792016-11-01 23:16:53 -04003455drm_obj_init_err:
3456 sde_core_perf_destroy(&sde_kms->perf);
Abhinav Kumar2316fb92017-01-30 23:07:08 -08003457hw_intr_init_err:
Alan Kwong67a3f792016-11-01 23:16:53 -04003458perf_err:
Clarence Ip17162b52016-11-24 17:06:29 -05003459power_error:
Dhaval Patel3949f032016-06-20 16:24:33 -07003460 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
Clarence Ip17162b52016-11-24 17:06:29 -05003461error:
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003462 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
Dhaval Patel3949f032016-06-20 16:24:33 -07003463end:
Clarence Ip17162b52016-11-24 17:06:29 -05003464 return rc;
3465}
3466
3467struct msm_kms *sde_kms_init(struct drm_device *dev)
3468{
3469 struct msm_drm_private *priv;
3470 struct sde_kms *sde_kms;
3471
3472 if (!dev || !dev->dev_private) {
3473 SDE_ERROR("drm device node invalid\n");
3474 return ERR_PTR(-EINVAL);
3475 }
3476
3477 priv = dev->dev_private;
3478
3479 sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
3480 if (!sde_kms) {
3481 SDE_ERROR("failed to allocate sde kms\n");
3482 return ERR_PTR(-ENOMEM);
3483 }
3484
3485 msm_kms_init(&sde_kms->base, &kms_funcs);
3486 sde_kms->dev = dev;
3487
3488 return &sde_kms->base;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003489}
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07003490
3491static int _sde_kms_register_events(struct msm_kms *kms,
3492 struct drm_mode_object *obj, u32 event, bool en)
3493{
3494 int ret = 0;
3495 struct drm_crtc *crtc = NULL;
3496 struct drm_connector *conn = NULL;
3497 struct sde_kms *sde_kms = NULL;
3498
3499 if (!kms || !obj) {
3500 SDE_ERROR("invalid argument kms %pK obj %pK\n", kms, obj);
3501 return -EINVAL;
3502 }
3503
3504 sde_kms = to_sde_kms(kms);
3505 switch (obj->type) {
3506 case DRM_MODE_OBJECT_CRTC:
3507 crtc = obj_to_crtc(obj);
3508 ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
3509 break;
3510 case DRM_MODE_OBJECT_CONNECTOR:
3511 conn = obj_to_connector(obj);
3512 ret = sde_connector_register_custom_event(sde_kms, conn, event,
3513 en);
3514 break;
3515 }
3516
3517 return ret;
3518}
Sandeep Panda11b20d82017-06-19 12:57:27 +05303519
3520int sde_kms_handle_recovery(struct drm_encoder *encoder)
3521{
3522 SDE_EVT32(DRMID(encoder), MSM_ENC_ACTIVE_REGION);
3523 return sde_encoder_wait_for_event(encoder, MSM_ENC_ACTIVE_REGION);
3524}