blob: df4eaecf779be6a0d47d516655f3d75444807720 [file] [log] [blame]
Dhaval Patel14d46ce2017-01-17 16:28:12 -08001/*
Narender Ankam1afbd172020-03-16 17:27:44 +05302 * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
Dhaval Patel14d46ce2017-01-17 16:28:12 -08003 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07005 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -08006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07009 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -080010 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070017 */
18
Alan Kwong5d324e42016-07-28 22:56:18 -040019#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
20
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070021#include <drm/drm_crtc.h>
Clarence Ip4ce59322016-06-26 22:27:51 -040022#include <linux/debugfs.h>
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -070023#include <linux/of_address.h>
Dhaval Patel04c7e8e2016-09-26 20:14:31 -070024#include <linux/of_irq.h>
Alan Kwong4dd64c82017-02-04 18:41:51 -080025#include <linux/dma-buf.h>
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -070026#include <linux/memblock.h>
27#include <linux/bootmem.h>
Clarence Ip4ce59322016-06-26 22:27:51 -040028
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070029#include "msm_drv.h"
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040030#include "msm_mmu.h"
Clarence Ipd02440b2017-05-21 18:10:01 -040031#include "msm_gem.h"
Clarence Ip3649f8b2016-10-31 09:59:44 -040032
33#include "dsi_display.h"
34#include "dsi_drm.h"
35#include "sde_wb.h"
Padmanabhan Komanduru63758612017-05-23 01:47:18 -070036#include "dp_display.h"
37#include "dp_drm.h"
Clarence Ip3649f8b2016-10-31 09:59:44 -040038
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070039#include "sde_kms.h"
Alan Kwongf5dd86c2016-08-09 18:08:17 -040040#include "sde_core_irq.h"
Clarence Ip4ce59322016-06-26 22:27:51 -040041#include "sde_formats.h"
Alan Kwong5d324e42016-07-28 22:56:18 -040042#include "sde_hw_vbif.h"
Alan Kwong83285fb2016-10-21 20:51:17 -040043#include "sde_vbif.h"
44#include "sde_encoder.h"
45#include "sde_plane.h"
46#include "sde_crtc.h"
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -080047#include "sde_reg_dma.h"
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070048
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -080049#include <soc/qcom/scm.h>
50#include "soc/qcom/secure_buffer.h"
51
Alan Kwong1a00e4d2016-07-18 09:42:30 -040052#define CREATE_TRACE_POINTS
53#include "sde_trace.h"
54
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -080055/* defines for secure channel call */
56#define SEC_SID_CNT 2
57#define SEC_SID_MASK_0 0x80881
58#define SEC_SID_MASK_1 0x80C81
59#define MEM_PROTECT_SD_CTRL_SWITCH 0x18
60#define MDP_DEVICE_ID 0x1A
61
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040062static const char * const iommu_ports[] = {
63 "mdp_0",
64};
65
Clarence Ip4ce59322016-06-26 22:27:51 -040066/**
67 * Controls size of event log buffer. Specified as a power of 2.
68 */
69#define SDE_EVTLOG_SIZE 1024
70
71/*
72 * To enable overall DRM driver logging
73 * # echo 0x2 > /sys/module/drm/parameters/debug
74 *
75 * To enable DRM driver h/w logging
Dhaval Patel6c666622017-03-21 23:02:59 -070076 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
Clarence Ip4ce59322016-06-26 22:27:51 -040077 *
78 * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
79 */
80#define SDE_DEBUGFS_DIR "msm_sde"
81#define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
82
Prashant Singhaf73d452018-11-12 10:52:34 -080083#define SDE_KMS_MODESET_LOCK_TIMEOUT_US 500
84#define SDE_KMS_MODESET_LOCK_MAX_TRIALS 20
85
Clarence Ipdd395242016-09-09 10:47:17 -040086/**
87 * sdecustom - enable certain driver customizations for sde clients
88 * Enabling this modifies the standard DRM behavior slightly and assumes
89 * that the clients have specific knowledge about the modifications that
90 * are involved, so don't enable this unless you know what you're doing.
91 *
92 * Parts of the driver that are affected by this setting may be located by
93 * searching for invocations of the 'sde_is_custom_client()' function.
94 *
95 * This is disabled by default.
96 */
Clarence Ipb1b3c802016-10-03 16:49:38 -040097static bool sdecustom = true;
Clarence Ipdd395242016-09-09 10:47:17 -040098module_param(sdecustom, bool, 0400);
99MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
100
Clarence Ip17162b52016-11-24 17:06:29 -0500101static int sde_kms_hw_init(struct msm_kms *kms);
102static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -0700103static int _sde_kms_mmu_init(struct sde_kms *sde_kms);
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -0700104static int _sde_kms_register_events(struct msm_kms *kms,
105 struct drm_mode_object *obj, u32 event, bool en);
Clarence Ipdd395242016-09-09 10:47:17 -0400106bool sde_is_custom_client(void)
107{
108 return sdecustom;
109}
110
Veera Sundaram Sankaran1fb97e72018-04-10 15:53:12 -0700111bool sde_kms_is_vbif_operation_allowed(struct sde_kms *sde_kms)
112{
113 struct drm_device *dev;
114 struct drm_crtc *crtc;
115 bool sui_enhancement = false;
116
117 if (!sde_kms || !sde_kms->dev)
118 return false;
119 dev = sde_kms->dev;
120
121 if (!sde_kms->catalog->sui_misr_supported)
122 return true;
123
124 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
125 if (!crtc->state || !crtc->state->active)
126 continue;
127
128 sui_enhancement |= sde_crtc_is_sui_enhancement_enabled(crtc);
129 }
130
131 if (!sui_enhancement)
132 return true;
133
134 return !sde_kms_is_secure_session_inprogress(sde_kms);
135}
136
Alan Kwongf0fd8512016-10-24 21:39:26 -0400137#ifdef CONFIG_DEBUG_FS
138static int _sde_danger_signal_status(struct seq_file *s,
139 bool danger_status)
140{
141 struct sde_kms *kms = (struct sde_kms *)s->private;
142 struct msm_drm_private *priv;
143 struct sde_danger_safe_status status;
144 int i;
Alan Kwong1124f1f2017-11-10 18:14:39 -0500145 int rc;
Alan Kwongf0fd8512016-10-24 21:39:26 -0400146
147 if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
148 SDE_ERROR("invalid arg(s)\n");
149 return 0;
150 }
151
152 priv = kms->dev->dev_private;
153 memset(&status, 0, sizeof(struct sde_danger_safe_status));
154
Alan Kwong1124f1f2017-11-10 18:14:39 -0500155 rc = sde_power_resource_enable(&priv->phandle, kms->core_client, true);
156 if (rc) {
157 SDE_ERROR("failed to enable power resource %d\n", rc);
158 SDE_EVT32(rc, SDE_EVTLOG_ERROR);
159 return rc;
160 }
161
Alan Kwongf0fd8512016-10-24 21:39:26 -0400162 if (danger_status) {
163 seq_puts(s, "\nDanger signal status:\n");
164 if (kms->hw_mdp->ops.get_danger_status)
165 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
166 &status);
167 } else {
168 seq_puts(s, "\nSafe signal status:\n");
169 if (kms->hw_mdp->ops.get_danger_status)
170 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
171 &status);
172 }
173 sde_power_resource_enable(&priv->phandle, kms->core_client, false);
174
175 seq_printf(s, "MDP : 0x%x\n", status.mdp);
176
177 for (i = SSPP_VIG0; i < SSPP_MAX; i++)
178 seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
179 status.sspp[i]);
180 seq_puts(s, "\n");
181
182 for (i = WB_0; i < WB_MAX; i++)
183 seq_printf(s, "WB%d : 0x%x \t", i - WB_0,
184 status.wb[i]);
185 seq_puts(s, "\n");
186
187 return 0;
188}
189
190#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
191static int __prefix ## _open(struct inode *inode, struct file *file) \
192{ \
193 return single_open(file, __prefix ## _show, inode->i_private); \
194} \
195static const struct file_operations __prefix ## _fops = { \
196 .owner = THIS_MODULE, \
197 .open = __prefix ## _open, \
198 .release = single_release, \
199 .read = seq_read, \
200 .llseek = seq_lseek, \
201}
202
203static int sde_debugfs_danger_stats_show(struct seq_file *s, void *v)
204{
205 return _sde_danger_signal_status(s, true);
206}
207DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_danger_stats);
208
209static int sde_debugfs_safe_stats_show(struct seq_file *s, void *v)
210{
211 return _sde_danger_signal_status(s, false);
212}
213DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_safe_stats);
214
215static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms)
216{
217 debugfs_remove_recursive(sde_kms->debugfs_danger);
218 sde_kms->debugfs_danger = NULL;
219}
220
221static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
222 struct dentry *parent)
223{
224 sde_kms->debugfs_danger = debugfs_create_dir("danger",
225 parent);
226 if (!sde_kms->debugfs_danger) {
227 SDE_ERROR("failed to create danger debugfs\n");
228 return -EINVAL;
229 }
230
Lloyd Atkinson8de415a2017-05-23 11:31:16 -0400231 debugfs_create_file("danger_status", 0600, sde_kms->debugfs_danger,
Alan Kwongf0fd8512016-10-24 21:39:26 -0400232 sde_kms, &sde_debugfs_danger_stats_fops);
Lloyd Atkinson8de415a2017-05-23 11:31:16 -0400233 debugfs_create_file("safe_status", 0600, sde_kms->debugfs_danger,
Alan Kwongf0fd8512016-10-24 21:39:26 -0400234 sde_kms, &sde_debugfs_safe_stats_fops);
235
236 return 0;
237}
238
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400239static int _sde_debugfs_show_regset32(struct seq_file *s, void *data)
Clarence Ip4ce59322016-06-26 22:27:51 -0400240{
Clarence Ipaac9f332016-08-31 15:46:35 -0400241 struct sde_debugfs_regset32 *regset;
242 struct sde_kms *sde_kms;
243 struct drm_device *dev;
244 struct msm_drm_private *priv;
Clarence Ip4ce59322016-06-26 22:27:51 -0400245 void __iomem *base;
Clarence Ipaac9f332016-08-31 15:46:35 -0400246 uint32_t i, addr;
Clarence Ip4ce59322016-06-26 22:27:51 -0400247
Clarence Ipaac9f332016-08-31 15:46:35 -0400248 if (!s || !s->private)
249 return 0;
Clarence Ip4ce59322016-06-26 22:27:51 -0400250
Clarence Ipaac9f332016-08-31 15:46:35 -0400251 regset = s->private;
252
253 sde_kms = regset->sde_kms;
254 if (!sde_kms || !sde_kms->mmio)
255 return 0;
256
257 dev = sde_kms->dev;
258 if (!dev)
259 return 0;
260
261 priv = dev->dev_private;
262 if (!priv)
263 return 0;
264
265 base = sde_kms->mmio + regset->offset;
266
267 /* insert padding spaces, if needed */
268 if (regset->offset & 0xF) {
269 seq_printf(s, "[%x]", regset->offset & ~0xF);
270 for (i = 0; i < (regset->offset & 0xF); i += 4)
271 seq_puts(s, " ");
272 }
273
274 if (sde_power_resource_enable(&priv->phandle,
275 sde_kms->core_client, true)) {
276 seq_puts(s, "failed to enable sde clocks\n");
277 return 0;
278 }
279
280 /* main register output */
281 for (i = 0; i < regset->blk_len; i += 4) {
282 addr = regset->offset + i;
283 if ((addr & 0xF) == 0x0)
284 seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
285 seq_printf(s, " %08x", readl_relaxed(base + i));
286 }
287 seq_puts(s, "\n");
288 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
Clarence Ip4ce59322016-06-26 22:27:51 -0400289
290 return 0;
291}
292
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400293static int sde_debugfs_open_regset32(struct inode *inode,
294 struct file *file)
Clarence Ip4ce59322016-06-26 22:27:51 -0400295{
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400296 return single_open(file, _sde_debugfs_show_regset32, inode->i_private);
Clarence Ip4ce59322016-06-26 22:27:51 -0400297}
298
299static const struct file_operations sde_fops_regset32 = {
300 .open = sde_debugfs_open_regset32,
301 .read = seq_read,
302 .llseek = seq_lseek,
303 .release = single_release,
304};
305
306void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
Clarence Ipaac9f332016-08-31 15:46:35 -0400307 uint32_t offset, uint32_t length, struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400308{
309 if (regset) {
310 regset->offset = offset;
311 regset->blk_len = length;
Clarence Ipaac9f332016-08-31 15:46:35 -0400312 regset->sde_kms = sde_kms;
Clarence Ip4ce59322016-06-26 22:27:51 -0400313 }
314}
315
316void *sde_debugfs_create_regset32(const char *name, umode_t mode,
317 void *parent, struct sde_debugfs_regset32 *regset)
318{
Clarence Ipaac9f332016-08-31 15:46:35 -0400319 if (!name || !regset || !regset->sde_kms || !regset->blk_len)
Clarence Ip4ce59322016-06-26 22:27:51 -0400320 return NULL;
321
322 /* make sure offset is a multiple of 4 */
323 regset->offset = round_down(regset->offset, 4);
324
325 return debugfs_create_file(name, mode, parent,
326 regset, &sde_fops_regset32);
327}
328
329void *sde_debugfs_get_root(struct sde_kms *sde_kms)
330{
Dhaval Patel6c666622017-03-21 23:02:59 -0700331 struct msm_drm_private *priv;
332
333 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
334 return NULL;
335
336 priv = sde_kms->dev->dev_private;
337 return priv->debug_root;
Clarence Ip4ce59322016-06-26 22:27:51 -0400338}
339
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400340static int _sde_debugfs_init(struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400341{
342 void *p;
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700343 int rc;
344 void *debugfs_root;
Clarence Ip4ce59322016-06-26 22:27:51 -0400345
346 p = sde_hw_util_get_log_mask_ptr();
347
348 if (!sde_kms || !p)
349 return -EINVAL;
350
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700351 debugfs_root = sde_debugfs_get_root(sde_kms);
352 if (!debugfs_root)
353 return -EINVAL;
Clarence Ip4ce59322016-06-26 22:27:51 -0400354
355 /* allow debugfs_root to be NULL */
Lloyd Atkinson8de415a2017-05-23 11:31:16 -0400356 debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0600, debugfs_root, p);
Alan Kwongcd1c09f2016-11-04 20:37:30 -0400357
Lloyd Atkinson09e64bf2017-04-13 14:09:59 -0700358 (void) sde_debugfs_danger_init(sde_kms, debugfs_root);
359 (void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
360 (void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
Alan Kwongcd1c09f2016-11-04 20:37:30 -0400361
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700362 rc = sde_core_perf_debugfs_init(&sde_kms->perf, debugfs_root);
363 if (rc) {
364 SDE_ERROR("failed to init perf %d\n", rc);
365 return rc;
366 }
Alan Kwongf0fd8512016-10-24 21:39:26 -0400367
Clarence Ip4ce59322016-06-26 22:27:51 -0400368 return 0;
369}
370
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400371static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400372{
373 /* don't need to NULL check debugfs_root */
374 if (sde_kms) {
Alan Kwong748e833d2016-10-26 12:34:48 -0400375 sde_debugfs_vbif_destroy(sde_kms);
Alan Kwongf0fd8512016-10-24 21:39:26 -0400376 sde_debugfs_danger_destroy(sde_kms);
Lloyd Atkinson09e64bf2017-04-13 14:09:59 -0700377 sde_debugfs_core_irq_destroy(sde_kms);
Clarence Ip4ce59322016-06-26 22:27:51 -0400378 }
379}
Alan Kwongf0fd8512016-10-24 21:39:26 -0400380#else
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700381static int _sde_debugfs_init(struct sde_kms *sde_kms)
382{
383 return 0;
384}
385
386static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
387{
Alan Kwongf0fd8512016-10-24 21:39:26 -0400388}
389#endif
Clarence Ip4ce59322016-06-26 22:27:51 -0400390
Alan Kwongf5dd86c2016-08-09 18:08:17 -0400391static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
392{
393 return sde_crtc_vblank(crtc, true);
394}
395
396static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
397{
398 sde_crtc_vblank(crtc, false);
399}
400
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700401static void sde_kms_wait_for_frame_transfer_complete(struct msm_kms *kms,
Jeykumar Sankarandfaeec92017-06-06 15:21:51 -0700402 struct drm_crtc *crtc)
403{
404 struct drm_encoder *encoder;
405 struct drm_device *dev;
406 int ret;
407
408 if (!kms || !crtc || !crtc->state || !crtc->dev) {
409 SDE_ERROR("invalid params\n");
410 return;
411 }
412
413 if (!crtc->state->enable) {
414 SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
415 return;
416 }
417
418 if (!crtc->state->active) {
419 SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
420 return;
421 }
422
423 dev = crtc->dev;
424
425 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
426 if (encoder->crtc != crtc)
427 continue;
428 /*
429 * Video Mode - Wait for VSYNC
430 * Cmd Mode - Wait for PP_DONE. Will be no-op if transfer is
431 * complete
432 */
433 SDE_EVT32_VERBOSE(DRMID(crtc));
434 ret = sde_encoder_wait_for_event(encoder, MSM_ENC_TX_COMPLETE);
435 if (ret && ret != -EWOULDBLOCK) {
436 SDE_ERROR(
437 "[crtc: %d][enc: %d] wait for commit done returned %d\n",
438 crtc->base.id, encoder->base.id, ret);
439 break;
440 }
441 }
442}
443
Veera Sundaram Sankaran5616c9b2018-03-07 14:09:17 -0800444static int _sde_kms_secure_ctrl_xin_clients(struct sde_kms *sde_kms,
445 struct drm_crtc *crtc, bool enable)
446{
447 struct drm_device *dev;
448 struct msm_drm_private *priv;
449 struct sde_mdss_cfg *sde_cfg;
450 struct drm_plane *plane;
451 int i, ret;
452
453 dev = sde_kms->dev;
454 priv = dev->dev_private;
455 sde_cfg = sde_kms->catalog;
456
457 ret = sde_vbif_halt_xin_mask(sde_kms,
458 sde_cfg->sui_block_xin_mask, enable);
459 if (ret) {
460 SDE_ERROR("failed to halt some xin-clients, ret:%d\n", ret);
461 return ret;
462 }
463
464 if (enable) {
465 for (i = 0; i < priv->num_planes; i++) {
466 plane = priv->planes[i];
467 sde_plane_secure_ctrl_xin_client(plane, crtc);
468 }
469 }
470
471 return 0;
472}
473
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800474/**
475 * _sde_kms_scm_call - makes secure channel call to switch the VMIDs
476 * @vimd: switch the stage 2 translation to this VMID.
477 */
478static int _sde_kms_scm_call(int vmid)
479{
480 struct scm_desc desc = {0};
481 uint32_t num_sids;
482 uint32_t *sec_sid;
483 uint32_t mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_SWITCH;
484 int ret = 0;
485
486 /* This info should be queried from catalog */
487 num_sids = SEC_SID_CNT;
488 sec_sid = kcalloc(num_sids, sizeof(uint32_t), GFP_KERNEL);
489 if (!sec_sid)
490 return -ENOMEM;
491
492 /*
493 * derive this info from device tree/catalog, this is combination of
494 * smr mask and SID for secure
495 */
496 sec_sid[0] = SEC_SID_MASK_0;
497 sec_sid[1] = SEC_SID_MASK_1;
498 dmac_flush_range(sec_sid, sec_sid + num_sids);
499
500 SDE_DEBUG("calling scm_call for vmid %d", vmid);
501
502 desc.arginfo = SCM_ARGS(4, SCM_VAL, SCM_RW, SCM_VAL, SCM_VAL);
503 desc.args[0] = MDP_DEVICE_ID;
504 desc.args[1] = SCM_BUFFER_PHYS(sec_sid);
505 desc.args[2] = sizeof(uint32_t) * num_sids;
506 desc.args[3] = vmid;
507
508 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
509 mem_protect_sd_ctrl_id), &desc);
510 if (ret)
511 SDE_ERROR("Error:scm_call2, vmid (%lld): ret%d\n",
512 desc.args[3], ret);
513 SDE_EVT32(mem_protect_sd_ctrl_id,
514 desc.args[0], desc.args[3], num_sids,
515 sec_sid[0], sec_sid[1], ret);
516
517 kfree(sec_sid);
518 return ret;
519}
520
521static int _sde_kms_detach_all_cb(struct sde_kms *sde_kms)
522{
523 u32 ret = 0;
524
525 if (atomic_inc_return(&sde_kms->detach_all_cb) > 1)
526 goto end;
527
528 /* detach_all_contexts */
529 ret = sde_kms_mmu_detach(sde_kms, false);
530 if (ret) {
531 SDE_ERROR("failed to detach all cb ret:%d\n", ret);
532 goto end;
533 }
534
535 ret = _sde_kms_scm_call(VMID_CP_SEC_DISPLAY);
536 if (ret)
537 goto end;
538
539end:
540 return ret;
541}
542
543static int _sde_kms_attach_all_cb(struct sde_kms *sde_kms)
544{
545 u32 ret = 0;
546
547 if (atomic_dec_return(&sde_kms->detach_all_cb) != 0)
548 goto end;
549
550 ret = _sde_kms_scm_call(VMID_CP_PIXEL);
551 if (ret)
552 goto end;
553
554 /* attach_all_contexts */
555 ret = sde_kms_mmu_attach(sde_kms, false);
556 if (ret) {
557 SDE_ERROR("failed to attach all cb ret:%d\n", ret);
558 goto end;
559 }
560
561end:
562 return ret;
563}
564
565static int _sde_kms_detach_sec_cb(struct sde_kms *sde_kms)
566{
567 u32 ret = 0;
568
569 if (atomic_inc_return(&sde_kms->detach_sec_cb) > 1)
570 goto end;
571
572 /* detach secure_context */
573 ret = sde_kms_mmu_detach(sde_kms, true);
574 if (ret) {
575 SDE_ERROR("failed to detach sec cb ret:%d\n", ret);
576 goto end;
577 }
578
579 ret = _sde_kms_scm_call(VMID_CP_CAMERA_PREVIEW);
580 if (ret)
581 goto end;
582
583end:
584 return ret;
585}
586
587static int _sde_kms_attach_sec_cb(struct sde_kms *sde_kms)
588{
589 u32 ret = 0;
590
591 if (atomic_dec_return(&sde_kms->detach_sec_cb) != 0)
592 goto end;
593
594 ret = _sde_kms_scm_call(VMID_CP_PIXEL);
595 if (ret)
596 goto end;
597
598 ret = sde_kms_mmu_attach(sde_kms, true);
599 if (ret) {
600 SDE_ERROR("failed to attach sec cb ret:%d\n", ret);
601 goto end;
602 }
603
604end:
605 return ret;
606}
607
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800608static int _sde_kms_sui_misr_ctrl(struct sde_kms *sde_kms,
609 struct drm_crtc *crtc, bool enable)
610{
611 struct drm_device *dev = sde_kms->dev;
612 struct msm_drm_private *priv = dev->dev_private;
613 int ret;
614
615 if (enable) {
616 ret = sde_power_resource_enable(&priv->phandle,
617 sde_kms->core_client, true);
618 if (ret) {
619 SDE_ERROR("failed to enable resource, ret:%d\n", ret);
620 return ret;
621 }
Veera Sundaram Sankaran5616c9b2018-03-07 14:09:17 -0800622
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800623 sde_crtc_misr_setup(crtc, true, 1);
624
Veera Sundaram Sankaran5616c9b2018-03-07 14:09:17 -0800625 ret = _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, true);
626 if (ret) {
627 sde_power_resource_enable(&priv->phandle,
628 sde_kms->core_client, false);
629 return ret;
630 }
631
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800632 } else {
Veera Sundaram Sankaran5616c9b2018-03-07 14:09:17 -0800633 _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, false);
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800634 sde_crtc_misr_setup(crtc, false, 0);
635 sde_power_resource_enable(&priv->phandle,
636 sde_kms->core_client, false);
637 }
638
639 return 0;
640}
641
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800642static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
643 bool post_commit)
644{
645 struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
646 int old_smmu_state = smmu_state->state;
647 int ret = 0;
648
649 if (!sde_kms || !crtc) {
650 SDE_ERROR("invalid argument(s)\n");
651 return -EINVAL;
652 }
653
654 SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->transition_type,
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800655 post_commit, smmu_state->sui_misr_state,
656 SDE_EVTLOG_FUNC_ENTRY);
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800657
658 if ((!smmu_state->transition_type) ||
659 ((smmu_state->transition_type == POST_COMMIT) && !post_commit))
660 /* Bail out */
661 return 0;
662
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800663 /* enable sui misr if requested, before the transition */
664 if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ) {
665 ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, true);
666 if (ret)
667 goto end;
668 }
669
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800670 mutex_lock(&sde_kms->secure_transition_lock);
671 switch (smmu_state->state) {
672 /* Secure UI use case enable */
673 case DETACH_ALL_REQ:
674 ret = _sde_kms_detach_all_cb(sde_kms);
675 if (!ret)
676 smmu_state->state = DETACHED;
677 break;
678
679 /* Secure UI use case disable */
680 case ATTACH_ALL_REQ:
681 ret = _sde_kms_attach_all_cb(sde_kms);
682 if (!ret)
683 smmu_state->state = ATTACHED;
684 break;
685
686 /* Secure preview enable */
687 case DETACH_SEC_REQ:
688 ret = _sde_kms_detach_sec_cb(sde_kms);
689 if (!ret)
690 smmu_state->state = DETACHED_SEC;
691 break;
692
693 /* Secure preview disable */
694 case ATTACH_SEC_REQ:
695 ret = _sde_kms_attach_sec_cb(sde_kms);
696 if (!ret)
697 smmu_state->state = ATTACHED;
698 break;
699
700 default:
701 SDE_ERROR("crtc:%d invalid smmu state:%d transition type:%d\n",
702 DRMID(crtc), smmu_state->state,
703 smmu_state->transition_type);
704 ret = -EINVAL;
705 break;
706 }
707 mutex_unlock(&sde_kms->secure_transition_lock);
708
Veera Sundaram Sankaran61e4fba2018-02-27 17:59:22 -0800709 /* disable sui misr if requested, after the transition */
710 if (!ret && (smmu_state->sui_misr_state == SUI_MISR_DISABLE_REQ)) {
711 ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
712 if (ret)
713 goto end;
714 }
715
716end:
717 smmu_state->sui_misr_state = NONE;
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800718 smmu_state->transition_type = NONE;
719 smmu_state->transition_error = ret ? true : false;
720
721 SDE_DEBUG("crtc:%d, old_state %d new_state %d, ret %d\n",
722 DRMID(crtc), old_smmu_state, smmu_state->state, ret);
723 SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->transition_type,
724 smmu_state->transition_error, ret,
725 SDE_EVTLOG_FUNC_EXIT);
726
727 return ret;
728}
729
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700730static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
731 struct drm_atomic_state *state)
732{
733 struct drm_crtc *crtc;
734 struct drm_crtc_state *old_crtc_state;
735
736 struct drm_plane *plane;
737 struct drm_plane_state *plane_state;
738 struct sde_kms *sde_kms = to_sde_kms(kms);
739 struct drm_device *dev = sde_kms->dev;
740 int i, ops = 0, ret = 0;
741 bool old_valid_fb = false;
742
743 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
744 if (!crtc->state || !crtc->state->active)
745 continue;
746 /*
747 * It is safe to assume only one active crtc,
748 * and compatible translation modes on the
749 * planes staged on this crtc.
750 * otherwise validation would have failed.
751 * For this CRTC,
752 */
753
754 /*
755 * 1. Check if old state on the CRTC has planes
756 * staged with valid fbs
757 */
758 for_each_plane_in_state(state, plane, plane_state, i) {
759 if (!plane_state->crtc)
760 continue;
761 if (plane_state->fb) {
762 old_valid_fb = true;
763 break;
764 }
765 }
766
767 /*
768 * 2.Get the operations needed to be performed before
769 * secure transition can be initiated.
770 */
771 ops = sde_crtc_get_secure_transition_ops(crtc,
Veera Sundaram Sankaranfd792402017-10-13 12:50:41 -0700772 old_crtc_state, old_valid_fb);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700773 if (ops < 0) {
774 SDE_ERROR("invalid secure operations %x\n", ops);
775 return ops;
776 }
777
778 if (!ops)
779 goto no_ops;
780
781 SDE_DEBUG("%d:secure operations(%x) started on state:%pK\n",
Veera Sundaram Sankaranfd792402017-10-13 12:50:41 -0700782 crtc->base.id, ops, crtc->state);
783 SDE_EVT32(DRMID(crtc), ops, crtc->state, old_valid_fb);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700784
785 /* 3. Perform operations needed for secure transition */
786 if (ops & SDE_KMS_OPS_WAIT_FOR_TX_DONE) {
787 SDE_DEBUG("wait_for_transfer_done\n");
788 sde_kms_wait_for_frame_transfer_complete(kms, crtc);
789 }
790 if (ops & SDE_KMS_OPS_CLEANUP_PLANE_FB) {
791 SDE_DEBUG("cleanup planes\n");
792 drm_atomic_helper_cleanup_planes(dev, state);
793 }
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800794 if (ops & SDE_KMS_OPS_SECURE_STATE_CHANGE) {
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700795 SDE_DEBUG("secure ctrl\n");
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -0800796 _sde_kms_secure_ctrl(sde_kms, crtc, false);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700797 }
798 if (ops & SDE_KMS_OPS_PREPARE_PLANE_FB) {
799 SDE_DEBUG("prepare planes %d",
800 crtc->state->plane_mask);
801 drm_atomic_crtc_for_each_plane(plane,
802 crtc) {
803 const struct drm_plane_helper_funcs *funcs;
804
805 plane_state = plane->state;
806 funcs = plane->helper_private;
807
808 SDE_DEBUG("psde:%d FB[%u]\n",
809 plane->base.id,
810 plane->fb->base.id);
811 if (!funcs)
812 continue;
813
814 if (funcs->prepare_fb(plane, plane_state)) {
815 ret = funcs->prepare_fb(plane,
816 plane_state);
817 if (ret)
818 return ret;
819 }
820 }
821 }
Veera Sundaram Sankaranfd792402017-10-13 12:50:41 -0700822 SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700823 SDE_DEBUG("secure operations completed\n");
824 }
825
826no_ops:
827 return 0;
828}
829
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -0700830static int _sde_kms_release_splash_buffer(unsigned int mem_addr,
831 unsigned int size)
832{
833 unsigned long pfn_start, pfn_end, pfn_idx;
834 int ret = 0;
835
836 if (!mem_addr || !size)
837 SDE_ERROR("invalid params\n");
838
839 pfn_start = mem_addr >> PAGE_SHIFT;
840 pfn_end = (mem_addr + size) >> PAGE_SHIFT;
841
842 ret = memblock_free(mem_addr, size);
843 if (ret) {
844 SDE_ERROR("continuous splash memory free failed:%d\n", ret);
845 return ret;
846 }
847 for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
848 free_reserved_page(pfn_to_page(pfn_idx));
849
850 return ret;
851
852}
853
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -0800854static int _sde_kms_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
855 struct sde_splash_data *data)
856{
857 int ret = 0;
858
859 if (!mmu || !data)
860 return -EINVAL;
861
862 ret = mmu->funcs->one_to_one_map(mmu, data->splash_base,
863 data->splash_base, data->splash_size,
864 IOMMU_READ | IOMMU_NOEXEC);
865 if (ret)
866 SDE_ERROR("Splash smmu map failed: %d\n", ret);
867
868 return ret;
869}
870
871static int _sde_kms_splash_smmu_unmap(struct sde_kms *sde_kms)
872{
873 struct sde_splash_data *data;
874 struct msm_mmu *mmu;
875 int rc = 0;
876
877 if (!sde_kms)
878 return -EINVAL;
879
880 data = &sde_kms->splash_data;
881 if (!data) {
882 SDE_ERROR("Invalid splash data\n");
883 return -EINVAL;
884 }
885
886 if (!sde_kms->aspace[0]) {
887 SDE_ERROR("aspace not found for sde kms node\n");
888 return -EINVAL;
889 }
890
891 mmu = sde_kms->aspace[0]->mmu;
892 if (!mmu) {
893 SDE_ERROR("mmu not found for aspace\n");
894 return -EINVAL;
895 }
896
897 if (mmu->funcs && mmu->funcs->one_to_one_unmap)
898 mmu->funcs->one_to_one_unmap(mmu, data->splash_base,
899 data->splash_size);
900
901 return rc;
902}
903
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700904static void sde_kms_prepare_commit(struct msm_kms *kms,
905 struct drm_atomic_state *state)
906{
907 struct sde_kms *sde_kms;
908 struct msm_drm_private *priv;
909 struct drm_device *dev;
910 struct drm_encoder *encoder;
Alan Kwong12def592017-10-26 17:48:35 -0400911 struct drm_crtc *crtc;
912 struct drm_crtc_state *crtc_state;
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -0700913 int i, rc = 0;
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700914
915 if (!kms)
916 return;
917 sde_kms = to_sde_kms(kms);
918 dev = sde_kms->dev;
919
920 if (!dev || !dev->dev_private)
921 return;
922 priv = dev->dev_private;
923
Alan Kwong1124f1f2017-11-10 18:14:39 -0500924 rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
925 true);
926 if (rc) {
927 SDE_ERROR("failed to enable power resource %d\n", rc);
928 SDE_EVT32(rc, SDE_EVTLOG_ERROR);
929 return;
930 }
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700931
Dhaval Patel30874eb2018-05-31 13:33:31 -0700932 if (sde_kms->first_kickoff) {
933 sde_power_scale_reg_bus(&priv->phandle, sde_kms->core_client,
934 VOTE_INDEX_HIGH, false);
935 sde_kms->first_kickoff = false;
936 }
937
Alan Kwong12def592017-10-26 17:48:35 -0400938 for_each_crtc_in_state(state, crtc, crtc_state, i) {
939 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
940 head) {
941 if (encoder->crtc != crtc)
942 continue;
943
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700944 sde_encoder_prepare_commit(encoder);
Alan Kwong12def592017-10-26 17:48:35 -0400945 }
946 }
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700947
948 /*
949 * NOTE: for secure use cases we want to apply the new HW
950 * configuration only after completing preparation for secure
951 * transitions prepare below if any transtions is required.
952 */
953 sde_kms_prepare_secure_transition(kms, state);
954}
955
956static void sde_kms_commit(struct msm_kms *kms,
957 struct drm_atomic_state *old_state)
958{
Alan Kwong1124f1f2017-11-10 18:14:39 -0500959 struct sde_kms *sde_kms;
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700960 struct drm_crtc *crtc;
961 struct drm_crtc_state *old_crtc_state;
962 int i;
963
Alan Kwong1124f1f2017-11-10 18:14:39 -0500964 if (!kms || !old_state)
965 return;
966 sde_kms = to_sde_kms(kms);
967
968 if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
969 SDE_ERROR("power resource is not enabled\n");
970 return;
971 }
972
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700973 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
974 if (crtc->state->active) {
975 SDE_EVT32(DRMID(crtc));
Clarence Ip569d5af2017-10-14 21:09:01 -0400976 sde_crtc_commit_kickoff(crtc, old_crtc_state);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700977 }
978 }
979}
980
Gopikrishnaiah Anandanb38d3292018-02-28 19:25:15 -0800981static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms,
982 struct drm_atomic_state *old_state)
983{
984 struct drm_crtc *crtc;
985 struct drm_crtc_state *crtc_state;
986 bool primary_crtc_active = false;
987 struct msm_drm_private *priv;
988 int i, rc = 0;
989
990 priv = sde_kms->dev->dev_private;
991
992 if (!sde_kms->splash_data.resource_handoff_pending)
993 return;
994
995 SDE_EVT32(SDE_EVTLOG_FUNC_CASE1);
996 for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
997 if (crtc->state->active)
998 primary_crtc_active = true;
999 SDE_EVT32(crtc->base.id, crtc->state->active);
1000 }
1001
1002 if (!primary_crtc_active) {
1003 SDE_EVT32(SDE_EVTLOG_FUNC_CASE2);
1004 return;
1005 }
1006
1007 sde_kms->splash_data.resource_handoff_pending = false;
1008
1009 if (sde_kms->splash_data.cont_splash_en) {
1010 SDE_DEBUG("disabling cont_splash feature\n");
1011 sde_kms->splash_data.cont_splash_en = false;
1012
1013 for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
1014 sde_power_data_bus_set_quota(&priv->phandle,
1015 sde_kms->core_client,
1016 SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i,
1017 SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
1018 SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
1019
1020 sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
1021 false);
1022 }
1023
1024 if (sde_kms->splash_data.splash_base) {
1025 _sde_kms_splash_smmu_unmap(sde_kms);
1026
1027 rc = _sde_kms_release_splash_buffer(
1028 sde_kms->splash_data.splash_base,
1029 sde_kms->splash_data.splash_size);
1030 if (rc)
1031 pr_err("failed to release splash memory\n");
1032 sde_kms->splash_data.splash_base = 0;
1033 sde_kms->splash_data.splash_size = 0;
1034 }
1035}
1036
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07001037static void sde_kms_complete_commit(struct msm_kms *kms,
1038 struct drm_atomic_state *old_state)
1039{
1040 struct sde_kms *sde_kms;
1041 struct msm_drm_private *priv;
1042 struct drm_crtc *crtc;
1043 struct drm_crtc_state *old_crtc_state;
Raviteja Tamatam68892de2017-06-20 04:47:19 +05301044 struct drm_connector *connector;
1045 struct drm_connector_state *old_conn_state;
1046 int i, rc = 0;
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07001047
1048 if (!kms || !old_state)
1049 return;
1050 sde_kms = to_sde_kms(kms);
1051
1052 if (!sde_kms->dev || !sde_kms->dev->dev_private)
1053 return;
1054 priv = sde_kms->dev->dev_private;
1055
Alan Kwong1124f1f2017-11-10 18:14:39 -05001056 if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
1057 SDE_ERROR("power resource is not enabled\n");
1058 return;
1059 }
1060
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -08001061 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07001062 sde_crtc_complete_commit(crtc, old_crtc_state);
1063
Veera Sundaram Sankaran8fd692a2018-02-26 17:49:14 -08001064 /* complete secure transitions if any */
1065 if (sde_kms->smmu_state.transition_type == POST_COMMIT)
1066 _sde_kms_secure_ctrl(sde_kms, crtc, true);
1067 }
1068
Raviteja Tamatam68892de2017-06-20 04:47:19 +05301069 for_each_connector_in_state(old_state, connector, old_conn_state, i) {
1070 struct sde_connector *c_conn;
1071
1072 c_conn = to_sde_connector(connector);
1073 if (!c_conn->ops.post_kickoff)
1074 continue;
1075 rc = c_conn->ops.post_kickoff(connector);
1076 if (rc) {
1077 pr_err("Connector Post kickoff failed rc=%d\n",
1078 rc);
1079 }
1080 }
1081
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07001082 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
1083
Gopikrishnaiah Anandanb38d3292018-02-28 19:25:15 -08001084 _sde_kms_release_splash_resource(sde_kms, old_state);
1085
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07001086 SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
1087}
1088
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001089static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
Abhijit Kulkarni40e38162016-06-26 22:12:09 -04001090 struct drm_crtc *crtc)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001091{
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001092 struct drm_encoder *encoder;
Narendra Muppallaec11a0a2017-06-15 15:35:17 -07001093 struct drm_device *dev;
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001094 int ret;
1095
Alan Kwongf34ef982016-09-29 20:53:53 -04001096 if (!kms || !crtc || !crtc->state) {
1097 SDE_ERROR("invalid params\n");
1098 return;
1099 }
1100
Narendra Muppallaec11a0a2017-06-15 15:35:17 -07001101 dev = crtc->dev;
1102
Alan Kwongf34ef982016-09-29 20:53:53 -04001103 if (!crtc->state->enable) {
1104 SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
1105 return;
1106 }
1107
1108 if (!crtc->state->active) {
1109 SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
1110 return;
1111 }
1112
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001113 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1114 if (encoder->crtc != crtc)
1115 continue;
1116 /*
Dhaval Patel6c666622017-03-21 23:02:59 -07001117 * Wait for post-flush if necessary to delay before
1118 * plane_cleanup. For example, wait for vsync in case of video
1119 * mode panels. This may be a no-op for command mode panels.
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001120 */
Dhaval Patel6c666622017-03-21 23:02:59 -07001121 SDE_EVT32_VERBOSE(DRMID(crtc));
Jeykumar Sankarandfaeec92017-06-06 15:21:51 -07001122 ret = sde_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001123 if (ret && ret != -EWOULDBLOCK) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001124 SDE_ERROR("wait for commit done returned %d\n", ret);
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001125 break;
1126 }
Dhaval Patelc68896d2018-06-13 13:55:46 -07001127
1128 sde_crtc_complete_flip(crtc, NULL);
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001129 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001130}
Lloyd Atkinson5d722782016-05-30 14:09:41 -04001131
Clarence Ip24f80662016-06-13 19:05:32 -04001132static void sde_kms_prepare_fence(struct msm_kms *kms,
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001133 struct drm_atomic_state *old_state)
Clarence Ip24f80662016-06-13 19:05:32 -04001134{
1135 struct drm_crtc *crtc;
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001136 struct drm_crtc_state *old_crtc_state;
1137 int i, rc;
Clarence Ip24f80662016-06-13 19:05:32 -04001138
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001139 if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
1140 SDE_ERROR("invalid argument(s)\n");
1141 return;
1142 }
1143
1144retry:
1145 /* attempt to acquire ww mutex for connection */
1146 rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
1147 old_state->acquire_ctx);
1148
1149 if (rc == -EDEADLK) {
1150 drm_modeset_backoff(old_state->acquire_ctx);
1151 goto retry;
1152 }
1153
1154 /* old_state actually contains updated crtc pointers */
Veera Sundaram Sankaran675ff622017-06-21 21:44:46 -07001155 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1156 if (crtc->state->active)
1157 sde_crtc_prepare_commit(crtc, old_crtc_state);
1158 }
Clarence Ip24f80662016-06-13 19:05:32 -04001159}
1160
Clarence Ip3649f8b2016-10-31 09:59:44 -04001161/**
1162 * _sde_kms_get_displays - query for underlying display handles and cache them
1163 * @sde_kms: Pointer to sde kms structure
1164 * Returns: Zero on success
1165 */
1166static int _sde_kms_get_displays(struct sde_kms *sde_kms)
1167{
1168 int rc = -ENOMEM;
1169
1170 if (!sde_kms) {
1171 SDE_ERROR("invalid sde kms\n");
1172 return -EINVAL;
1173 }
1174
1175 /* dsi */
1176 sde_kms->dsi_displays = NULL;
1177 sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
1178 if (sde_kms->dsi_display_count) {
1179 sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
1180 sizeof(void *),
1181 GFP_KERNEL);
1182 if (!sde_kms->dsi_displays) {
1183 SDE_ERROR("failed to allocate dsi displays\n");
1184 goto exit_deinit_dsi;
1185 }
1186 sde_kms->dsi_display_count =
1187 dsi_display_get_active_displays(sde_kms->dsi_displays,
1188 sde_kms->dsi_display_count);
1189 }
1190
1191 /* wb */
1192 sde_kms->wb_displays = NULL;
1193 sde_kms->wb_display_count = sde_wb_get_num_of_displays();
1194 if (sde_kms->wb_display_count) {
1195 sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
1196 sizeof(void *),
1197 GFP_KERNEL);
1198 if (!sde_kms->wb_displays) {
1199 SDE_ERROR("failed to allocate wb displays\n");
1200 goto exit_deinit_wb;
1201 }
1202 sde_kms->wb_display_count =
1203 wb_display_get_displays(sde_kms->wb_displays,
1204 sde_kms->wb_display_count);
1205 }
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001206
1207 /* dp */
1208 sde_kms->dp_displays = NULL;
1209 sde_kms->dp_display_count = dp_display_get_num_of_displays();
1210 if (sde_kms->dp_display_count) {
1211 sde_kms->dp_displays = kcalloc(sde_kms->dp_display_count,
1212 sizeof(void *), GFP_KERNEL);
1213 if (!sde_kms->dp_displays) {
1214 SDE_ERROR("failed to allocate dp displays\n");
1215 goto exit_deinit_dp;
1216 }
1217 sde_kms->dp_display_count =
1218 dp_display_get_displays(sde_kms->dp_displays,
1219 sde_kms->dp_display_count);
1220 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001221 return 0;
1222
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001223exit_deinit_dp:
1224 kfree(sde_kms->dp_displays);
1225 sde_kms->dp_display_count = 0;
1226 sde_kms->dp_displays = NULL;
1227
Clarence Ip3649f8b2016-10-31 09:59:44 -04001228exit_deinit_wb:
1229 kfree(sde_kms->wb_displays);
1230 sde_kms->wb_display_count = 0;
1231 sde_kms->wb_displays = NULL;
1232
1233exit_deinit_dsi:
1234 kfree(sde_kms->dsi_displays);
1235 sde_kms->dsi_display_count = 0;
1236 sde_kms->dsi_displays = NULL;
1237 return rc;
1238}
1239
1240/**
1241 * _sde_kms_release_displays - release cache of underlying display handles
1242 * @sde_kms: Pointer to sde kms structure
1243 */
1244static void _sde_kms_release_displays(struct sde_kms *sde_kms)
1245{
1246 if (!sde_kms) {
1247 SDE_ERROR("invalid sde kms\n");
1248 return;
1249 }
1250
1251 kfree(sde_kms->wb_displays);
1252 sde_kms->wb_displays = NULL;
1253 sde_kms->wb_display_count = 0;
1254
1255 kfree(sde_kms->dsi_displays);
1256 sde_kms->dsi_displays = NULL;
1257 sde_kms->dsi_display_count = 0;
1258}
1259
1260/**
1261 * _sde_kms_setup_displays - create encoders, bridges and connectors
1262 * for underlying displays
1263 * @dev: Pointer to drm device structure
1264 * @priv: Pointer to private drm device data
1265 * @sde_kms: Pointer to sde kms structure
1266 * Returns: Zero on success
1267 */
1268static int _sde_kms_setup_displays(struct drm_device *dev,
1269 struct msm_drm_private *priv,
1270 struct sde_kms *sde_kms)
1271{
1272 static const struct sde_connector_ops dsi_ops = {
Alan Kwong769fba92017-11-13 16:50:36 -05001273 .set_info_blob = dsi_conn_set_info_blob,
Clarence Ip3649f8b2016-10-31 09:59:44 -04001274 .detect = dsi_conn_detect,
1275 .get_modes = dsi_connector_get_modes,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -07001276 .put_modes = dsi_connector_put_modes,
Clarence Ip3649f8b2016-10-31 09:59:44 -04001277 .mode_valid = dsi_conn_mode_valid,
1278 .get_info = dsi_display_get_info,
Lloyd Atkinson8c49c582016-11-18 14:23:54 -05001279 .set_backlight = dsi_display_set_backlight,
Lloyd Atkinson05d75512017-01-17 14:45:51 -05001280 .soft_reset = dsi_display_soft_reset,
Veera Sundaram Sankaranbb2bf9a2017-03-29 18:56:47 -07001281 .pre_kickoff = dsi_conn_pre_kickoff,
Jeykumar Sankaran2b098072017-03-16 17:25:59 -07001282 .clk_ctrl = dsi_display_clk_ctrl,
Clarence Ipd57b0622017-07-10 11:28:57 -04001283 .set_power = dsi_display_set_power,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -07001284 .get_mode_info = dsi_conn_get_mode_info,
1285 .get_dst_format = dsi_display_get_dst_format,
Sandeep Panda98d6ab22017-09-05 08:03:16 +05301286 .post_kickoff = dsi_conn_post_kickoff,
1287 .check_status = dsi_display_check_status,
Govinda Rajulu Chennab95b9c32017-10-13 15:00:32 -04001288 .enable_event = dsi_conn_enable_event,
1289 .cmd_transfer = dsi_display_cmd_transfer,
Sandeep Panda8693e8f2018-03-08 08:16:44 +05301290 .cont_splash_config = dsi_display_cont_splash_config,
Kalyan Thota6a9f3b72018-01-18 18:00:02 +05301291 .get_panel_vfp = dsi_display_get_panel_vfp,
Clarence Ip3649f8b2016-10-31 09:59:44 -04001292 };
1293 static const struct sde_connector_ops wb_ops = {
1294 .post_init = sde_wb_connector_post_init,
Alan Kwong769fba92017-11-13 16:50:36 -05001295 .set_info_blob = sde_wb_connector_set_info_blob,
Clarence Ip3649f8b2016-10-31 09:59:44 -04001296 .detect = sde_wb_connector_detect,
1297 .get_modes = sde_wb_connector_get_modes,
1298 .set_property = sde_wb_connector_set_property,
1299 .get_info = sde_wb_get_info,
Jeykumar Sankaran2b098072017-03-16 17:25:59 -07001300 .soft_reset = NULL,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -07001301 .get_mode_info = sde_wb_get_mode_info,
Sandeep Panda98d6ab22017-09-05 08:03:16 +05301302 .get_dst_format = NULL,
1303 .check_status = NULL,
Govinda Rajulu Chennab95b9c32017-10-13 15:00:32 -04001304 .cmd_transfer = NULL,
Sandeep Panda8693e8f2018-03-08 08:16:44 +05301305 .cont_splash_config = NULL,
Kalyan Thota6a9f3b72018-01-18 18:00:02 +05301306 .get_panel_vfp = NULL,
Clarence Ip3649f8b2016-10-31 09:59:44 -04001307 };
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001308 static const struct sde_connector_ops dp_ops = {
Narender Ankam96d70542019-12-12 14:42:59 +05301309 .set_info_blob = dp_connnector_set_info_blob,
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001310 .post_init = dp_connector_post_init,
1311 .detect = dp_connector_detect,
1312 .get_modes = dp_connector_get_modes,
1313 .mode_valid = dp_connector_mode_valid,
1314 .get_info = dp_connector_get_info,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -07001315 .get_mode_info = dp_connector_get_mode_info,
Ajay Singh Parmar315e5852017-11-23 21:47:32 -08001316 .post_open = dp_connector_post_open,
Sandeep Panda98d6ab22017-09-05 08:03:16 +05301317 .check_status = NULL,
Ajay Singh Parmar87af50b2017-12-22 22:22:55 -08001318 .config_hdr = dp_connector_config_hdr,
Govinda Rajulu Chennab95b9c32017-10-13 15:00:32 -04001319 .cmd_transfer = NULL,
Sandeep Panda8693e8f2018-03-08 08:16:44 +05301320 .cont_splash_config = NULL,
Kalyan Thota6a9f3b72018-01-18 18:00:02 +05301321 .get_panel_vfp = NULL,
Chirag Khuranace2aa512019-11-20 18:27:03 +05301322 .mode_needs_full_range = dp_connector_mode_needs_full_range,
Narender Ankam7f2aa152020-02-11 13:20:36 +05301323 .mode_is_cea_mode = dp_connector_mode_is_cea_mode,
Narender Ankam1afbd172020-03-16 17:27:44 +05301324 .get_csc_type = dp_connector_get_csc_type,
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001325 };
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001326 static const struct sde_connector_ops ext_bridge_ops = {
1327 .set_info_blob = dsi_conn_set_info_blob,
1328 .mode_valid = dsi_conn_mode_valid,
1329 .get_info = dsi_display_ext_bridge_get_info,
1330 .soft_reset = dsi_display_soft_reset,
1331 .clk_ctrl = dsi_display_clk_ctrl,
1332 .get_mode_info = dsi_conn_ext_bridge_get_mode_info,
1333 .get_dst_format = dsi_display_get_dst_format,
1334 .enable_event = dsi_conn_enable_event,
1335 .cmd_transfer = NULL,
Sandeep Panda8693e8f2018-03-08 08:16:44 +05301336 .cont_splash_config = NULL,
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001337 };
Clarence Ip3649f8b2016-10-31 09:59:44 -04001338 struct msm_display_info info;
1339 struct drm_encoder *encoder;
1340 void *display, *connector;
1341 int i, max_encoders;
1342 int rc = 0;
1343
1344 if (!dev || !priv || !sde_kms) {
1345 SDE_ERROR("invalid argument(s)\n");
1346 return -EINVAL;
1347 }
1348
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001349 max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count +
1350 sde_kms->dp_display_count;
Clarence Ip3649f8b2016-10-31 09:59:44 -04001351 if (max_encoders > ARRAY_SIZE(priv->encoders)) {
1352 max_encoders = ARRAY_SIZE(priv->encoders);
1353 SDE_ERROR("capping number of displays to %d", max_encoders);
1354 }
1355
1356 /* dsi */
1357 for (i = 0; i < sde_kms->dsi_display_count &&
1358 priv->num_encoders < max_encoders; ++i) {
1359 display = sde_kms->dsi_displays[i];
1360 encoder = NULL;
1361
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001362 if (!dsi_display_has_ext_bridge(display)) {
1363 memset(&info, 0x0, sizeof(info));
1364 rc = dsi_display_get_info(&info, display);
1365 if (rc) {
1366 SDE_ERROR("dsi get_info %d failed\n", i);
1367 continue;
1368 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001369
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001370 encoder = sde_encoder_init(dev, &info);
1371 if (IS_ERR_OR_NULL(encoder)) {
1372 SDE_ERROR("encoder init failed for dsi %d\n",
1373 i);
1374 continue;
1375 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001376
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001377 rc = dsi_display_drm_bridge_init(display, encoder);
1378 if (rc) {
1379 SDE_ERROR("dsi bridge %d init failed, %d\n",
1380 i, rc);
1381 sde_encoder_destroy(encoder);
1382 continue;
1383 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001384
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001385 connector = sde_connector_init(dev,
1386 encoder,
1387 NULL,
1388 display,
1389 &dsi_ops,
1390 DRM_CONNECTOR_POLL_HPD,
1391 DRM_MODE_CONNECTOR_DSI);
1392 if (connector) {
1393 priv->encoders[priv->num_encoders++] = encoder;
1394 } else {
1395 SDE_ERROR("dsi %d connector init failed\n", i);
1396 dsi_display_drm_bridge_deinit(display);
1397 sde_encoder_destroy(encoder);
1398 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001399 } else {
Ray Zhanga8a5dfe2018-01-22 10:29:01 +08001400 memset(&info, 0x0, sizeof(info));
1401 rc = dsi_display_ext_bridge_get_info(&info, display);
1402 if (rc) {
1403 SDE_ERROR("ext get_info %d failed\n", i);
1404 continue;
1405 }
1406
1407 encoder = sde_encoder_init(dev, &info);
1408 if (IS_ERR_OR_NULL(encoder)) {
1409 SDE_ERROR("encoder init failed for ext %d\n",
1410 i);
1411 continue;
1412 }
1413
1414 rc = dsi_display_drm_bridge_init(display, encoder);
1415 if (rc) {
1416 SDE_ERROR("dsi bridge %d init failed for ext\n",
1417 i);
1418 sde_encoder_destroy(encoder);
1419 continue;
1420 }
1421
1422 connector = sde_connector_init(dev,
1423 encoder,
1424 NULL,
1425 display,
1426 &ext_bridge_ops,
1427 DRM_CONNECTOR_POLL_HPD,
1428 DRM_MODE_CONNECTOR_DSI);
1429 if (connector) {
1430 priv->encoders[priv->num_encoders++] = encoder;
1431 } else {
1432 SDE_ERROR("connector init %d failed for ext\n",
1433 i);
1434 dsi_display_drm_bridge_deinit(display);
1435 sde_encoder_destroy(encoder);
1436 continue;
1437 }
1438
1439 rc = dsi_display_drm_ext_bridge_init(display,
1440 encoder, connector);
1441 if (rc) {
1442 struct drm_connector *conn = connector;
1443
1444 SDE_ERROR("ext bridge %d init failed, %d\n",
1445 i, rc);
1446 conn->funcs->destroy(connector);
1447 dsi_display_drm_bridge_deinit(display);
1448 sde_encoder_destroy(encoder);
1449 continue;
1450 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001451 }
1452 }
1453
1454 /* wb */
1455 for (i = 0; i < sde_kms->wb_display_count &&
1456 priv->num_encoders < max_encoders; ++i) {
1457 display = sde_kms->wb_displays[i];
1458 encoder = NULL;
1459
1460 memset(&info, 0x0, sizeof(info));
1461 rc = sde_wb_get_info(&info, display);
1462 if (rc) {
1463 SDE_ERROR("wb get_info %d failed\n", i);
1464 continue;
1465 }
1466
1467 encoder = sde_encoder_init(dev, &info);
1468 if (IS_ERR_OR_NULL(encoder)) {
1469 SDE_ERROR("encoder init failed for wb %d\n", i);
1470 continue;
1471 }
1472
1473 rc = sde_wb_drm_init(display, encoder);
1474 if (rc) {
1475 SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
1476 sde_encoder_destroy(encoder);
1477 continue;
1478 }
1479
1480 connector = sde_connector_init(dev,
1481 encoder,
1482 0,
1483 display,
1484 &wb_ops,
1485 DRM_CONNECTOR_POLL_HPD,
1486 DRM_MODE_CONNECTOR_VIRTUAL);
1487 if (connector) {
1488 priv->encoders[priv->num_encoders++] = encoder;
1489 } else {
1490 SDE_ERROR("wb %d connector init failed\n", i);
1491 sde_wb_drm_deinit(display);
1492 sde_encoder_destroy(encoder);
1493 }
1494 }
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001495 /* dp */
1496 for (i = 0; i < sde_kms->dp_display_count &&
1497 priv->num_encoders < max_encoders; ++i) {
1498 display = sde_kms->dp_displays[i];
1499 encoder = NULL;
1500
1501 memset(&info, 0x0, sizeof(info));
1502 rc = dp_connector_get_info(&info, display);
1503 if (rc) {
1504 SDE_ERROR("dp get_info %d failed\n", i);
1505 continue;
1506 }
1507
1508 encoder = sde_encoder_init(dev, &info);
1509 if (IS_ERR_OR_NULL(encoder)) {
1510 SDE_ERROR("dp encoder init failed %d\n", i);
1511 continue;
1512 }
1513
1514 rc = dp_drm_bridge_init(display, encoder);
1515 if (rc) {
1516 SDE_ERROR("dp bridge %d init failed, %d\n", i, rc);
1517 sde_encoder_destroy(encoder);
1518 continue;
1519 }
1520
1521 connector = sde_connector_init(dev,
1522 encoder,
1523 NULL,
1524 display,
1525 &dp_ops,
1526 DRM_CONNECTOR_POLL_HPD,
1527 DRM_MODE_CONNECTOR_DisplayPort);
1528 if (connector) {
1529 priv->encoders[priv->num_encoders++] = encoder;
1530 } else {
1531 SDE_ERROR("dp %d connector init failed\n", i);
1532 dp_drm_bridge_deinit(display);
1533 sde_encoder_destroy(encoder);
1534 }
1535 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001536
1537 return 0;
1538}
1539
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001540static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
1541{
1542 struct msm_drm_private *priv;
1543 int i;
1544
1545 if (!sde_kms) {
1546 SDE_ERROR("invalid sde_kms\n");
1547 return;
1548 } else if (!sde_kms->dev) {
1549 SDE_ERROR("invalid dev\n");
1550 return;
1551 } else if (!sde_kms->dev->dev_private) {
1552 SDE_ERROR("invalid dev_private\n");
1553 return;
1554 }
1555 priv = sde_kms->dev->dev_private;
1556
1557 for (i = 0; i < priv->num_crtcs; i++)
1558 priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
Clarence Ip17162b52016-11-24 17:06:29 -05001559 priv->num_crtcs = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001560
1561 for (i = 0; i < priv->num_planes; i++)
1562 priv->planes[i]->funcs->destroy(priv->planes[i]);
Clarence Ip17162b52016-11-24 17:06:29 -05001563 priv->num_planes = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001564
1565 for (i = 0; i < priv->num_connectors; i++)
1566 priv->connectors[i]->funcs->destroy(priv->connectors[i]);
Clarence Ip17162b52016-11-24 17:06:29 -05001567 priv->num_connectors = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001568
1569 for (i = 0; i < priv->num_encoders; i++)
1570 priv->encoders[i]->funcs->destroy(priv->encoders[i]);
Clarence Ip17162b52016-11-24 17:06:29 -05001571 priv->num_encoders = 0;
1572
1573 _sde_kms_release_displays(sde_kms);
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001574}
1575
1576static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001577{
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001578 struct drm_device *dev;
Dhaval Patel44f12472016-08-29 12:19:47 -07001579 struct drm_plane *primary_planes[MAX_PLANES], *plane;
1580 struct drm_crtc *crtc;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001581
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001582 struct msm_drm_private *priv;
1583 struct sde_mdss_cfg *catalog;
Dhaval Patel44f12472016-08-29 12:19:47 -07001584
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001585 int primary_planes_idx = 0, i, ret;
1586 int max_crtc_count;
1587
1588 u32 sspp_id[MAX_PLANES];
1589 u32 master_plane_id[MAX_PLANES];
1590 u32 num_virt_planes = 0;
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001591
Clarence Ipdd395242016-09-09 10:47:17 -04001592 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001593 SDE_ERROR("invalid sde_kms\n");
1594 return -EINVAL;
1595 }
1596
1597 dev = sde_kms->dev;
1598 priv = dev->dev_private;
1599 catalog = sde_kms->catalog;
1600
Abhinav Kumar2316fb92017-01-30 23:07:08 -08001601 ret = sde_core_irq_domain_add(sde_kms);
1602 if (ret)
1603 goto fail_irq;
Clarence Ip3649f8b2016-10-31 09:59:44 -04001604 /*
1605 * Query for underlying display drivers, and create connectors,
1606 * bridges and encoders for them.
1607 */
1608 if (!_sde_kms_get_displays(sde_kms))
1609 (void)_sde_kms_setup_displays(dev, priv, sde_kms);
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001610
1611 max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001612
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001613 /* Create the planes */
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001614 for (i = 0; i < catalog->sspp_count; i++) {
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001615 bool primary = true;
1616
1617 if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001618 || primary_planes_idx >= max_crtc_count)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001619 primary = false;
1620
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001621 plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001622 (1UL << max_crtc_count) - 1, 0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001623 if (IS_ERR(plane)) {
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001624 SDE_ERROR("sde_plane_init failed\n");
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001625 ret = PTR_ERR(plane);
1626 goto fail;
1627 }
1628 priv->planes[priv->num_planes++] = plane;
1629
1630 if (primary)
1631 primary_planes[primary_planes_idx++] = plane;
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001632
1633 if (sde_hw_sspp_multirect_enabled(&catalog->sspp[i]) &&
1634 sde_is_custom_client()) {
1635 int priority =
1636 catalog->sspp[i].sblk->smart_dma_priority;
1637 sspp_id[priority - 1] = catalog->sspp[i].id;
1638 master_plane_id[priority - 1] = plane->base.id;
1639 num_virt_planes++;
1640 }
1641 }
1642
1643 /* Initialize smart DMA virtual planes */
1644 for (i = 0; i < num_virt_planes; i++) {
1645 plane = sde_plane_init(dev, sspp_id[i], false,
1646 (1UL << max_crtc_count) - 1, master_plane_id[i]);
1647 if (IS_ERR(plane)) {
1648 SDE_ERROR("sde_plane for virtual SSPP init failed\n");
1649 ret = PTR_ERR(plane);
1650 goto fail;
1651 }
1652 priv->planes[priv->num_planes++] = plane;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001653 }
1654
Dhaval Patel44f12472016-08-29 12:19:47 -07001655 max_crtc_count = min(max_crtc_count, primary_planes_idx);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001656
Dhaval Patel44f12472016-08-29 12:19:47 -07001657 /* Create one CRTC per encoder */
1658 for (i = 0; i < max_crtc_count; i++) {
Lloyd Atkinsonac933642016-09-14 11:52:00 -04001659 crtc = sde_crtc_init(dev, primary_planes[i]);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001660 if (IS_ERR(crtc)) {
1661 ret = PTR_ERR(crtc);
1662 goto fail;
1663 }
1664 priv->crtcs[priv->num_crtcs++] = crtc;
1665 }
1666
Clarence Ipdd395242016-09-09 10:47:17 -04001667 if (sde_is_custom_client()) {
1668 /* All CRTCs are compatible with all planes */
1669 for (i = 0; i < priv->num_planes; i++)
1670 priv->planes[i]->possible_crtcs =
1671 (1 << priv->num_crtcs) - 1;
1672 }
1673
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001674 /* All CRTCs are compatible with all encoders */
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001675 for (i = 0; i < priv->num_encoders; i++)
1676 priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
1677
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001678 return 0;
1679fail:
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001680 _sde_kms_drm_obj_destroy(sde_kms);
Abhinav Kumar2316fb92017-01-30 23:07:08 -08001681fail_irq:
1682 sde_core_irq_domain_fini(sde_kms);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001683 return ret;
1684}
1685
Alan Kwong4dd64c82017-02-04 18:41:51 -08001686/**
Dhaval Patel2a3c37a2017-10-25 12:30:36 -07001687 * sde_kms_timeline_status - provides current timeline status
1688 * This API should be called without mode config lock.
1689 * @dev: Pointer to drm device
1690 */
1691void sde_kms_timeline_status(struct drm_device *dev)
1692{
1693 struct drm_crtc *crtc;
1694 struct drm_connector *conn;
1695
1696 if (!dev) {
1697 SDE_ERROR("invalid drm device node\n");
1698 return;
1699 }
1700
1701 drm_for_each_crtc(crtc, dev)
1702 sde_crtc_timeline_status(crtc);
1703
Kalyan Thota436abff2018-06-15 16:23:16 +05301704 if (mutex_is_locked(&dev->mode_config.mutex)) {
1705 /*
1706 *Probably locked from last close dumping status anyway
1707 */
1708 SDE_ERROR("dumping conn_timeline without mode_config lock\n");
1709 drm_for_each_connector(conn, dev)
1710 sde_conn_timeline_status(conn);
1711 return;
1712 }
1713
Dhaval Patel2a3c37a2017-10-25 12:30:36 -07001714 mutex_lock(&dev->mode_config.mutex);
1715 drm_for_each_connector(conn, dev)
1716 sde_conn_timeline_status(conn);
1717 mutex_unlock(&dev->mode_config.mutex);
1718}
1719
1720/**
Alan Kwong4dd64c82017-02-04 18:41:51 -08001721 * struct sde_kms_fbo_fb - framebuffer creation list
1722 * @list: list of framebuffer attached to framebuffer object
1723 * @fb: Pointer to framebuffer attached to framebuffer object
1724 */
1725struct sde_kms_fbo_fb {
1726 struct list_head list;
1727 struct drm_framebuffer *fb;
1728};
1729
1730struct drm_framebuffer *sde_kms_fbo_create_fb(struct drm_device *dev,
1731 struct sde_kms_fbo *fbo)
1732{
1733 struct drm_framebuffer *fb = NULL;
1734 struct sde_kms_fbo_fb *fbo_fb;
1735 struct drm_mode_fb_cmd2 mode_cmd = {0};
1736 u32 base_offset = 0;
1737 int i, ret;
1738
1739 if (!dev) {
1740 SDE_ERROR("invalid drm device node\n");
1741 return NULL;
1742 }
1743
1744 fbo_fb = kzalloc(sizeof(struct sde_kms_fbo_fb), GFP_KERNEL);
1745 if (!fbo_fb)
1746 return NULL;
1747
1748 mode_cmd.pixel_format = fbo->pixel_format;
1749 mode_cmd.width = fbo->width;
1750 mode_cmd.height = fbo->height;
1751 mode_cmd.flags = fbo->flags;
1752
1753 for (i = 0; i < fbo->nplane; i++) {
1754 mode_cmd.offsets[i] = base_offset;
1755 mode_cmd.pitches[i] = fbo->layout.plane_pitch[i];
1756 mode_cmd.modifier[i] = fbo->modifier[i];
1757 base_offset += fbo->layout.plane_size[i];
1758 SDE_DEBUG("offset[%d]:%x\n", i, mode_cmd.offsets[i]);
1759 }
1760
1761 fb = msm_framebuffer_init(dev, &mode_cmd, fbo->bo);
1762 if (IS_ERR(fb)) {
1763 ret = PTR_ERR(fb);
1764 fb = NULL;
1765 SDE_ERROR("failed to allocate fb %d\n", ret);
1766 goto fail;
1767 }
1768
1769 /* need to take one reference for gem object */
1770 for (i = 0; i < fbo->nplane; i++)
1771 drm_gem_object_reference(fbo->bo[i]);
1772
1773 SDE_DEBUG("register private fb:%d\n", fb->base.id);
1774
1775 INIT_LIST_HEAD(&fbo_fb->list);
1776 fbo_fb->fb = fb;
1777 drm_framebuffer_reference(fbo_fb->fb);
1778 list_add_tail(&fbo_fb->list, &fbo->fb_list);
1779
1780 return fb;
1781
1782fail:
1783 kfree(fbo_fb);
1784 return NULL;
1785}
1786
1787static void sde_kms_fbo_destroy(struct sde_kms_fbo *fbo)
1788{
1789 struct msm_drm_private *priv;
1790 struct sde_kms *sde_kms;
1791 struct drm_device *dev;
1792 struct sde_kms_fbo_fb *curr, *next;
1793 int i;
1794
1795 if (!fbo) {
1796 SDE_ERROR("invalid drm device node\n");
1797 return;
1798 }
1799 dev = fbo->dev;
1800
1801 if (!dev || !dev->dev_private) {
1802 SDE_ERROR("invalid drm device node\n");
1803 return;
1804 }
1805 priv = dev->dev_private;
1806
1807 if (!priv->kms) {
1808 SDE_ERROR("invalid kms handle\n");
1809 return;
1810 }
1811 sde_kms = to_sde_kms(priv->kms);
1812
1813 SDE_DEBUG("%dx%d@%c%c%c%c/%llx/%x\n", fbo->width, fbo->height,
1814 fbo->pixel_format >> 0, fbo->pixel_format >> 8,
1815 fbo->pixel_format >> 16, fbo->pixel_format >> 24,
1816 fbo->modifier[0], fbo->flags);
1817
1818 list_for_each_entry_safe(curr, next, &fbo->fb_list, list) {
1819 SDE_DEBUG("unregister private fb:%d\n", curr->fb->base.id);
1820 drm_framebuffer_unregister_private(curr->fb);
1821 drm_framebuffer_unreference(curr->fb);
1822 list_del(&curr->list);
1823 kfree(curr);
1824 }
1825
1826 for (i = 0; i < fbo->layout.num_planes; i++) {
1827 if (fbo->bo[i]) {
1828 mutex_lock(&dev->struct_mutex);
1829 drm_gem_object_unreference(fbo->bo[i]);
1830 mutex_unlock(&dev->struct_mutex);
1831 fbo->bo[i] = NULL;
1832 }
1833 }
1834
1835 if (fbo->dma_buf) {
1836 dma_buf_put(fbo->dma_buf);
1837 fbo->dma_buf = NULL;
1838 }
1839
Alan Kwong54125bb2017-02-26 16:01:36 -08001840 if (sde_kms->iclient && fbo->ihandle) {
1841 ion_free(sde_kms->iclient, fbo->ihandle);
1842 fbo->ihandle = NULL;
Alan Kwong4dd64c82017-02-04 18:41:51 -08001843 }
1844}
1845
Clarence Ipd02440b2017-05-21 18:10:01 -04001846static void sde_kms_set_gem_flags(struct msm_gem_object *msm_obj,
1847 uint32_t flags)
1848{
1849 if (msm_obj)
1850 msm_obj->flags |= flags;
1851}
1852
Alan Kwong4dd64c82017-02-04 18:41:51 -08001853struct sde_kms_fbo *sde_kms_fbo_alloc(struct drm_device *dev, u32 width,
1854 u32 height, u32 pixel_format, u64 modifier[4], u32 flags)
1855{
1856 struct msm_drm_private *priv;
1857 struct sde_kms *sde_kms;
1858 struct sde_kms_fbo *fbo;
1859 int i, ret;
1860
1861 if (!dev || !dev->dev_private) {
1862 SDE_ERROR("invalid drm device node\n");
1863 return NULL;
1864 }
1865 priv = dev->dev_private;
1866
1867 if (!priv->kms) {
1868 SDE_ERROR("invalid kms handle\n");
1869 return NULL;
1870 }
1871 sde_kms = to_sde_kms(priv->kms);
1872
1873 SDE_DEBUG("%dx%d@%c%c%c%c/%llx/%x\n", width, height,
1874 pixel_format >> 0, pixel_format >> 8,
1875 pixel_format >> 16, pixel_format >> 24,
1876 modifier[0], flags);
1877
1878 fbo = kzalloc(sizeof(struct sde_kms_fbo), GFP_KERNEL);
1879 if (!fbo)
1880 return NULL;
1881
1882 atomic_set(&fbo->refcount, 0);
1883 INIT_LIST_HEAD(&fbo->fb_list);
1884 fbo->dev = dev;
1885 fbo->width = width;
1886 fbo->height = height;
1887 fbo->pixel_format = pixel_format;
1888 fbo->flags = flags;
1889 for (i = 0; i < ARRAY_SIZE(fbo->modifier); i++)
1890 fbo->modifier[i] = modifier[i];
1891 fbo->nplane = drm_format_num_planes(fbo->pixel_format);
1892 fbo->fmt = sde_get_sde_format_ext(fbo->pixel_format, fbo->modifier,
1893 fbo->nplane);
1894 if (!fbo->fmt) {
1895 ret = -EINVAL;
1896 SDE_ERROR("failed to find pixel format\n");
1897 goto done;
1898 }
1899
1900 ret = sde_format_get_plane_sizes(fbo->fmt, fbo->width, fbo->height,
Narendra Muppalla58a64e22017-07-24 10:54:47 -07001901 &fbo->layout, fbo->layout.plane_pitch);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001902 if (ret) {
1903 SDE_ERROR("failed to get plane sizes\n");
1904 goto done;
1905 }
1906
1907 /* allocate backing buffer object */
Alan Kwong54125bb2017-02-26 16:01:36 -08001908 if (sde_kms->iclient) {
1909 u32 heap_id = fbo->flags & DRM_MODE_FB_SECURE ?
Alan Kwong3f2a5152017-08-25 16:19:43 -04001910 ION_HEAP(ION_SECURE_HEAP_ID) :
Alan Kwong54125bb2017-02-26 16:01:36 -08001911 ION_HEAP(ION_SYSTEM_HEAP_ID);
Alan Kwong3f2a5152017-08-25 16:19:43 -04001912 u32 iflags = fbo->flags & DRM_MODE_FB_SECURE ?
1913 (ION_FLAG_SECURE | ION_FLAG_CP_PIXEL) : 0;
Alan Kwong54125bb2017-02-26 16:01:36 -08001914
1915 fbo->ihandle = ion_alloc(sde_kms->iclient,
Alan Kwong3f2a5152017-08-25 16:19:43 -04001916 fbo->layout.total_size, SZ_4K, heap_id, iflags);
Alan Kwong54125bb2017-02-26 16:01:36 -08001917 if (IS_ERR_OR_NULL(fbo->ihandle)) {
1918 SDE_ERROR("failed to alloc ion memory\n");
1919 ret = PTR_ERR(fbo->ihandle);
1920 fbo->ihandle = NULL;
1921 goto done;
1922 }
1923
1924 fbo->dma_buf = ion_share_dma_buf(sde_kms->iclient,
1925 fbo->ihandle);
1926 if (IS_ERR(fbo->dma_buf)) {
1927 SDE_ERROR("failed to share ion memory\n");
1928 ret = -ENOMEM;
1929 fbo->dma_buf = NULL;
1930 goto done;
1931 }
1932
1933 fbo->bo[0] = dev->driver->gem_prime_import(dev,
1934 fbo->dma_buf);
1935 if (IS_ERR(fbo->bo[0])) {
1936 SDE_ERROR("failed to import ion memory\n");
1937 ret = PTR_ERR(fbo->bo[0]);
1938 fbo->bo[0] = NULL;
1939 goto done;
1940 }
Clarence Ipd02440b2017-05-21 18:10:01 -04001941
1942 /* insert extra bo flags */
1943 sde_kms_set_gem_flags(to_msm_bo(fbo->bo[0]), MSM_BO_KEEPATTRS);
Alan Kwong54125bb2017-02-26 16:01:36 -08001944 } else {
1945 mutex_lock(&dev->struct_mutex);
1946 fbo->bo[0] = msm_gem_new(dev, fbo->layout.total_size,
Clarence Ipd02440b2017-05-21 18:10:01 -04001947 MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_KEEPATTRS);
Alan Kwong54125bb2017-02-26 16:01:36 -08001948 if (IS_ERR(fbo->bo[0])) {
1949 mutex_unlock(&dev->struct_mutex);
1950 SDE_ERROR("failed to new gem buffer\n");
1951 ret = PTR_ERR(fbo->bo[0]);
1952 fbo->bo[0] = NULL;
1953 goto done;
1954 }
Alan Kwong4dd64c82017-02-04 18:41:51 -08001955 mutex_unlock(&dev->struct_mutex);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001956 }
1957
Alan Kwong54125bb2017-02-26 16:01:36 -08001958 mutex_lock(&dev->struct_mutex);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001959 for (i = 1; i < fbo->layout.num_planes; i++) {
1960 fbo->bo[i] = fbo->bo[0];
1961 drm_gem_object_reference(fbo->bo[i]);
1962 }
1963 mutex_unlock(&dev->struct_mutex);
1964
1965done:
1966 if (ret) {
1967 sde_kms_fbo_destroy(fbo);
1968 kfree(fbo);
1969 fbo = NULL;
1970 } else {
1971 sde_kms_fbo_reference(fbo);
1972 }
1973
1974 return fbo;
1975}
1976
1977int sde_kms_fbo_reference(struct sde_kms_fbo *fbo)
1978{
1979 if (!fbo) {
1980 SDE_ERROR("invalid parameters\n");
1981 return -EINVAL;
1982 }
1983
1984 SDE_DEBUG("%pS refcount:%d\n", __builtin_return_address(0),
1985 atomic_read(&fbo->refcount));
1986
1987 atomic_inc(&fbo->refcount);
1988
1989 return 0;
1990}
1991
1992void sde_kms_fbo_unreference(struct sde_kms_fbo *fbo)
1993{
1994 if (!fbo) {
1995 SDE_ERROR("invalid parameters\n");
1996 return;
1997 }
1998
1999 SDE_DEBUG("%pS refcount:%d\n", __builtin_return_address(0),
2000 atomic_read(&fbo->refcount));
2001
2002 if (!atomic_read(&fbo->refcount)) {
2003 SDE_ERROR("invalid refcount\n");
2004 return;
2005 } else if (atomic_dec_return(&fbo->refcount) == 0) {
2006 sde_kms_fbo_destroy(fbo);
2007 }
2008}
2009
Alan Kwong5a3ac752016-10-16 01:02:35 -04002010static int sde_kms_postinit(struct msm_kms *kms)
2011{
2012 struct sde_kms *sde_kms = to_sde_kms(kms);
2013 struct drm_device *dev;
Dhaval Patel91399a52017-11-27 22:21:27 -08002014 struct drm_crtc *crtc;
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07002015 int rc;
Alan Kwong5a3ac752016-10-16 01:02:35 -04002016
2017 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
2018 SDE_ERROR("invalid sde_kms\n");
2019 return -EINVAL;
2020 }
2021
2022 dev = sde_kms->dev;
2023
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07002024 rc = _sde_debugfs_init(sde_kms);
2025 if (rc)
2026 SDE_ERROR("sde_debugfs init failed: %d\n", rc);
2027
Dhaval Patel91399a52017-11-27 22:21:27 -08002028 drm_for_each_crtc(crtc, dev)
2029 sde_crtc_post_init(dev, crtc);
2030
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07002031 return rc;
Alan Kwong5a3ac752016-10-16 01:02:35 -04002032}
2033
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002034static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002035 struct drm_encoder *encoder)
2036{
2037 return rate;
2038}
2039
Clarence Ip17162b52016-11-24 17:06:29 -05002040static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
2041 struct platform_device *pdev)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002042{
Clarence Ip17162b52016-11-24 17:06:29 -05002043 struct drm_device *dev;
2044 struct msm_drm_private *priv;
Alan Kwong5d324e42016-07-28 22:56:18 -04002045 int i;
2046
Clarence Ip17162b52016-11-24 17:06:29 -05002047 if (!sde_kms || !pdev)
2048 return;
2049
2050 dev = sde_kms->dev;
2051 if (!dev)
2052 return;
2053
2054 priv = dev->dev_private;
2055 if (!priv)
2056 return;
2057
Alan Kwong23afc2d92017-09-15 10:59:06 -04002058 if (sde_kms->genpd_init) {
2059 sde_kms->genpd_init = false;
2060 pm_genpd_remove(&sde_kms->genpd);
2061 of_genpd_del_provider(pdev->dev.of_node);
2062 }
2063
Clarence Ip17162b52016-11-24 17:06:29 -05002064 if (sde_kms->hw_intr)
2065 sde_hw_intr_destroy(sde_kms->hw_intr);
2066 sde_kms->hw_intr = NULL;
2067
Clarence Ip7f0de632017-05-31 14:59:14 -04002068 if (sde_kms->power_event)
2069 sde_power_handle_unregister_event(
2070 &priv->phandle, sde_kms->power_event);
2071
Clarence Ip17162b52016-11-24 17:06:29 -05002072 _sde_kms_release_displays(sde_kms);
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08002073 (void)_sde_kms_release_splash_buffer(
2074 sde_kms->splash_data.splash_base,
2075 sde_kms->splash_data.splash_size);
Clarence Ip17162b52016-11-24 17:06:29 -05002076
2077 /* safe to call these more than once during shutdown */
2078 _sde_debugfs_destroy(sde_kms);
2079 _sde_kms_mmu_destroy(sde_kms);
2080
Alan Kwong54125bb2017-02-26 16:01:36 -08002081 if (sde_kms->iclient) {
2082 ion_client_destroy(sde_kms->iclient);
2083 sde_kms->iclient = NULL;
2084 }
2085
Lloyd Atkinson79f08802017-01-09 17:37:18 -05002086 if (sde_kms->catalog) {
2087 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
2088 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
Alan Kwong5d324e42016-07-28 22:56:18 -04002089
Lloyd Atkinson79f08802017-01-09 17:37:18 -05002090 if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
2091 sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
2092 }
Alan Kwong5d324e42016-07-28 22:56:18 -04002093 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002094
Clarence Ip17162b52016-11-24 17:06:29 -05002095 if (sde_kms->rm_init)
2096 sde_rm_destroy(&sde_kms->rm);
2097 sde_kms->rm_init = false;
2098
2099 if (sde_kms->catalog)
2100 sde_hw_catalog_deinit(sde_kms->catalog);
2101 sde_kms->catalog = NULL;
2102
2103 if (sde_kms->core_client)
2104 sde_power_client_destroy(&priv->phandle, sde_kms->core_client);
2105 sde_kms->core_client = NULL;
2106
2107 if (sde_kms->vbif[VBIF_NRT])
2108 msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
2109 sde_kms->vbif[VBIF_NRT] = NULL;
2110
2111 if (sde_kms->vbif[VBIF_RT])
2112 msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
2113 sde_kms->vbif[VBIF_RT] = NULL;
2114
2115 if (sde_kms->mmio)
2116 msm_iounmap(pdev, sde_kms->mmio);
2117 sde_kms->mmio = NULL;
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -07002118
2119 sde_reg_dma_deinit();
Clarence Ip17162b52016-11-24 17:06:29 -05002120}
2121
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07002122int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only)
2123{
2124 int i;
2125
2126 if (!sde_kms)
2127 return -EINVAL;
2128
2129 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
2130 struct msm_mmu *mmu;
2131 struct msm_gem_address_space *aspace = sde_kms->aspace[i];
2132
2133 if (!aspace)
2134 continue;
2135
2136 mmu = sde_kms->aspace[i]->mmu;
2137
2138 if (secure_only &&
2139 !aspace->mmu->funcs->is_domain_secure(mmu))
2140 continue;
2141
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07002142 /* cleanup aspace before detaching */
2143 msm_gem_aspace_domain_attach_detach_update(aspace, true);
2144
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07002145 SDE_DEBUG("Detaching domain:%d\n", i);
2146 aspace->mmu->funcs->detach(mmu, (const char **)iommu_ports,
2147 ARRAY_SIZE(iommu_ports));
2148
2149 aspace->domain_attached = false;
2150 }
2151
2152 return 0;
2153}
2154
2155int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only)
2156{
2157 int i;
2158
2159 if (!sde_kms)
2160 return -EINVAL;
2161
2162 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
2163 struct msm_mmu *mmu;
2164 struct msm_gem_address_space *aspace = sde_kms->aspace[i];
2165
2166 if (!aspace)
2167 continue;
2168
2169 mmu = sde_kms->aspace[i]->mmu;
2170
2171 if (secure_only &&
2172 !aspace->mmu->funcs->is_domain_secure(mmu))
2173 continue;
2174
2175 SDE_DEBUG("Attaching domain:%d\n", i);
2176 aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
2177 ARRAY_SIZE(iommu_ports));
2178
2179 aspace->domain_attached = true;
Veera Sundaram Sankaranb024ae42018-05-24 10:05:54 -07002180 msm_gem_aspace_domain_attach_detach_update(aspace, false);
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07002181 }
2182
2183 return 0;
2184}
2185
Clarence Ip17162b52016-11-24 17:06:29 -05002186static void sde_kms_destroy(struct msm_kms *kms)
2187{
2188 struct sde_kms *sde_kms;
2189 struct drm_device *dev;
2190
2191 if (!kms) {
2192 SDE_ERROR("invalid kms\n");
2193 return;
2194 }
2195
2196 sde_kms = to_sde_kms(kms);
2197 dev = sde_kms->dev;
2198 if (!dev) {
2199 SDE_ERROR("invalid device\n");
2200 return;
2201 }
2202
2203 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002204 kfree(sde_kms);
2205}
2206
Veera Sundaram Sankarane2bf6862017-08-01 13:55:12 -07002207static void _sde_kms_plane_force_remove(struct drm_plane *plane,
2208 struct drm_atomic_state *state)
2209{
2210 struct drm_plane_state *plane_state;
2211 int ret = 0;
2212
2213 if (!plane->crtc)
2214 return;
2215
2216 plane_state = drm_atomic_get_plane_state(state, plane);
2217 if (IS_ERR(plane_state)) {
2218 ret = PTR_ERR(plane_state);
2219 SDE_ERROR("error %d getting plane %d state\n",
2220 ret, plane->base.id);
2221 return;
2222 }
2223
2224 plane->old_fb = plane->fb;
2225
2226 SDE_DEBUG("disabling plane %d\n", plane->base.id);
2227
2228 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
2229 if (ret != 0)
2230 SDE_ERROR("error %d disabling plane %d\n", ret,
2231 plane->base.id);
2232}
2233
2234static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file,
2235 struct drm_atomic_state *state)
2236{
2237 struct drm_device *dev = sde_kms->dev;
2238 struct drm_framebuffer *fb, *tfb;
2239 struct list_head fbs;
2240 struct drm_plane *plane;
2241 int ret = 0;
2242 u32 plane_mask = 0;
2243
2244 INIT_LIST_HEAD(&fbs);
2245
2246 list_for_each_entry_safe(fb, tfb, &file->fbs, filp_head) {
2247 if (drm_framebuffer_read_refcount(fb) > 1) {
2248 list_move_tail(&fb->filp_head, &fbs);
2249
2250 drm_for_each_plane(plane, dev) {
2251 if (plane->fb == fb) {
2252 plane_mask |=
2253 1 << drm_plane_index(plane);
2254 _sde_kms_plane_force_remove(
2255 plane, state);
2256 }
2257 }
2258 } else {
2259 list_del_init(&fb->filp_head);
2260 drm_framebuffer_unreference(fb);
2261 }
2262 }
2263
2264 if (list_empty(&fbs)) {
2265 SDE_DEBUG("skip commit as no fb(s)\n");
2266 drm_atomic_state_free(state);
2267 return 0;
2268 }
2269
2270 SDE_DEBUG("committing after removing all the pipes\n");
2271 ret = drm_atomic_commit(state);
2272
2273 if (ret) {
2274 /*
2275 * move the fbs back to original list, so it would be
2276 * handled during drm_release
2277 */
2278 list_for_each_entry_safe(fb, tfb, &fbs, filp_head)
2279 list_move_tail(&fb->filp_head, &file->fbs);
2280
2281 SDE_ERROR("atomic commit failed in preclose, ret:%d\n", ret);
2282 goto end;
2283 }
2284
2285 while (!list_empty(&fbs)) {
2286 fb = list_first_entry(&fbs, typeof(*fb), filp_head);
2287
2288 list_del_init(&fb->filp_head);
2289 drm_framebuffer_unreference(fb);
2290 }
2291
2292end:
2293 drm_atomic_clean_old_fb(dev, plane_mask, ret);
2294
2295 return ret;
2296}
2297
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002298static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
2299{
2300 struct sde_kms *sde_kms = to_sde_kms(kms);
2301 struct drm_device *dev = sde_kms->dev;
2302 struct msm_drm_private *priv = dev->dev_private;
2303 unsigned int i;
Veera Sundaram Sankarane2bf6862017-08-01 13:55:12 -07002304 struct drm_atomic_state *state = NULL;
2305 int ret = 0;
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002306
Dhaval Patelc68896d2018-06-13 13:55:46 -07002307 /* cancel pending flip event */
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002308 for (i = 0; i < priv->num_crtcs; i++)
Dhaval Patelc68896d2018-06-13 13:55:46 -07002309 sde_crtc_complete_flip(priv->crtcs[i], file);
Veera Sundaram Sankarane2bf6862017-08-01 13:55:12 -07002310
2311 drm_modeset_lock_all(dev);
2312 state = drm_atomic_state_alloc(dev);
2313 if (!state) {
2314 ret = -ENOMEM;
2315 goto end;
2316 }
2317
2318 state->acquire_ctx = dev->mode_config.acquire_ctx;
2319
2320 for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
2321 ret = _sde_kms_remove_fbs(sde_kms, file, state);
2322 if (ret != -EDEADLK)
2323 break;
2324 drm_atomic_state_clear(state);
2325 drm_atomic_legacy_backoff(state);
2326 }
2327
2328end:
2329 if ((ret != 0) && state)
2330 drm_atomic_state_free(state);
2331
2332 SDE_DEBUG("sde preclose done, ret:%d\n", ret);
2333 drm_modeset_unlock_all(dev);
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002334}
2335
Lloyd Atkinsone08229c2017-10-02 17:53:30 -04002336static int _sde_kms_helper_reset_custom_properties(struct sde_kms *sde_kms,
2337 struct drm_atomic_state *state)
2338{
2339 struct drm_device *dev = sde_kms->dev;
2340 struct drm_plane *plane;
2341 struct drm_plane_state *plane_state;
2342 struct drm_crtc *crtc;
2343 struct drm_crtc_state *crtc_state;
2344 struct drm_connector *conn;
2345 struct drm_connector_state *conn_state;
2346 int ret = 0;
2347
2348 drm_for_each_plane(plane, dev) {
2349 plane_state = drm_atomic_get_plane_state(state, plane);
2350 if (IS_ERR(plane_state)) {
2351 ret = PTR_ERR(plane_state);
2352 SDE_ERROR("error %d getting plane %d state\n",
2353 ret, DRMID(plane));
2354 return ret;
2355 }
2356
2357 ret = sde_plane_helper_reset_custom_properties(plane,
2358 plane_state);
2359 if (ret) {
2360 SDE_ERROR("error %d resetting plane props %d\n",
2361 ret, DRMID(plane));
2362 return ret;
2363 }
2364 }
2365 drm_for_each_crtc(crtc, dev) {
2366 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2367 if (IS_ERR(crtc_state)) {
2368 ret = PTR_ERR(crtc_state);
2369 SDE_ERROR("error %d getting crtc %d state\n",
2370 ret, DRMID(crtc));
2371 return ret;
2372 }
2373
2374 ret = sde_crtc_helper_reset_custom_properties(crtc, crtc_state);
2375 if (ret) {
2376 SDE_ERROR("error %d resetting crtc props %d\n",
2377 ret, DRMID(crtc));
2378 return ret;
2379 }
2380 }
2381
2382 drm_for_each_connector(conn, dev) {
2383 conn_state = drm_atomic_get_connector_state(state, conn);
2384 if (IS_ERR(conn_state)) {
2385 ret = PTR_ERR(conn_state);
2386 SDE_ERROR("error %d getting connector %d state\n",
2387 ret, DRMID(conn));
2388 return ret;
2389 }
2390
2391 ret = sde_connector_helper_reset_custom_properties(conn,
2392 conn_state);
2393 if (ret) {
2394 SDE_ERROR("error %d resetting connector props %d\n",
2395 ret, DRMID(conn));
2396 return ret;
2397 }
2398 }
2399
2400 return ret;
2401}
2402
2403static void sde_kms_lastclose(struct msm_kms *kms)
2404{
2405 struct sde_kms *sde_kms;
2406 struct drm_device *dev;
2407 struct drm_atomic_state *state;
2408 int ret, i;
2409
2410 if (!kms) {
2411 SDE_ERROR("invalid argument\n");
2412 return;
2413 }
2414
2415 sde_kms = to_sde_kms(kms);
2416 dev = sde_kms->dev;
2417
2418 state = drm_atomic_state_alloc(dev);
2419 if (!state)
2420 return;
2421
2422 state->acquire_ctx = dev->mode_config.acquire_ctx;
2423
2424 for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
2425 /* add reset of custom properties to the state */
2426 ret = _sde_kms_helper_reset_custom_properties(sde_kms, state);
2427 if (ret)
2428 break;
2429
2430 ret = drm_atomic_commit(state);
2431 if (ret != -EDEADLK)
2432 break;
2433
2434 drm_atomic_state_clear(state);
2435 drm_atomic_legacy_backoff(state);
2436 SDE_DEBUG("deadlock backoff on attempt %d\n", i);
2437 }
2438
2439 if (ret) {
2440 /**
2441 * on success, atomic state object ownership transfers to
2442 * framework, otherwise, free it here
2443 */
2444 drm_atomic_state_free(state);
2445 SDE_ERROR("failed to run last close: %d\n", ret);
2446 }
2447}
2448
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002449static int sde_kms_check_secure_transition(struct msm_kms *kms,
2450 struct drm_atomic_state *state)
2451{
2452 struct sde_kms *sde_kms;
2453 struct drm_device *dev;
2454 struct drm_crtc *crtc;
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002455 struct drm_crtc *cur_crtc = NULL, *global_crtc = NULL;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002456 struct drm_crtc_state *crtc_state;
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002457 int active_crtc_cnt = 0, global_active_crtc_cnt = 0;
2458 bool sec_session = false, global_sec_session = false;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002459 int i;
2460
2461 if (!kms || !state) {
2462 return -EINVAL;
2463 SDE_ERROR("invalid arguments\n");
2464 }
2465
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002466 sde_kms = to_sde_kms(kms);
2467 dev = sde_kms->dev;
2468
2469 /* iterate state object for active secure/non-secure crtc */
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002470 for_each_crtc_in_state(state, crtc, crtc_state, i) {
2471 if (!crtc_state->active)
2472 continue;
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002473
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002474 active_crtc_cnt++;
2475 if (sde_crtc_get_secure_level(crtc, crtc_state) ==
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002476 SDE_DRM_SEC_ONLY)
2477 sec_session = true;
2478
2479 cur_crtc = crtc;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002480 }
2481
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002482 /* iterate global list for active and secure crtc */
2483 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002484 if (!crtc->state->active)
2485 continue;
2486
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002487 global_active_crtc_cnt++;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002488 if (sde_crtc_get_secure_level(crtc, crtc->state) ==
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002489 SDE_DRM_SEC_ONLY)
2490 global_sec_session = true;
2491
2492 global_crtc = crtc;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002493 }
2494
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002495 /*
2496 * - fail secure crtc commit, if any other crtc session is already
2497 * in progress
2498 * - fail non-secure crtc commit, if any secure crtc session is already
2499 * in progress
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002500 */
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002501 if (global_sec_session || sec_session) {
2502 if ((global_active_crtc_cnt >
2503 MAX_ALLOWED_CRTC_CNT_DURING_SECURE) ||
2504 (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE)) {
2505 SDE_ERROR(
2506 "Secure check failed global_active:%d active:%d\n",
2507 global_active_crtc_cnt, active_crtc_cnt);
2508 return -EPERM;
2509
2510 /*
2511 * As only one crtc is allowed during secure session, the crtc
2512 * in this commit should match with the global crtc, if it
2513 * exists
2514 */
2515 } else if (global_crtc && (global_crtc != cur_crtc)) {
2516 SDE_ERROR(
2517 "crtc%d-sec%d not allowed during crtc%d-sec%d\n",
Veera Sundaram Sankaran4db71f22017-11-16 14:33:10 -08002518 cur_crtc ? cur_crtc->base.id : -1, sec_session,
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002519 global_crtc->base.id, global_sec_session);
2520 return -EPERM;
2521 }
2522
2523 }
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002524
2525 return 0;
2526}
2527
2528static int sde_kms_atomic_check(struct msm_kms *kms,
2529 struct drm_atomic_state *state)
2530{
2531 struct sde_kms *sde_kms;
2532 struct drm_device *dev;
2533 int ret;
2534
2535 if (!kms || !state)
2536 return -EINVAL;
2537
2538 sde_kms = to_sde_kms(kms);
2539 dev = sde_kms->dev;
2540
Clarence Ipd86f6e42017-08-08 18:31:00 -04002541 if (sde_kms_is_suspend_blocked(dev)) {
2542 SDE_DEBUG("suspended, skip atomic_check\n");
2543 return -EBUSY;
2544 }
2545
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002546 ret = drm_atomic_helper_check(dev, state);
2547 if (ret)
2548 return ret;
2549 /*
2550 * Check if any secure transition(moving CRTC between secure and
2551 * non-secure state and vice-versa) is allowed or not. when moving
2552 * to secure state, planes with fb_mode set to dir_translated only can
2553 * be staged on the CRTC, and only one CRTC can be active during
2554 * Secure state
2555 */
2556 return sde_kms_check_secure_transition(kms, state);
2557}
2558
Jordan Croused8e96522017-02-13 10:14:16 -07002559static struct msm_gem_address_space*
2560_sde_kms_get_address_space(struct msm_kms *kms,
2561 unsigned int domain)
2562{
2563 struct sde_kms *sde_kms;
2564
2565 if (!kms) {
2566 SDE_ERROR("invalid kms\n");
2567 return NULL;
2568 }
2569
2570 sde_kms = to_sde_kms(kms);
2571 if (!sde_kms) {
2572 SDE_ERROR("invalid sde_kms\n");
2573 return NULL;
2574 }
2575
2576 if (domain >= MSM_SMMU_DOMAIN_MAX)
2577 return NULL;
2578
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07002579 return (sde_kms->aspace[domain] &&
2580 sde_kms->aspace[domain]->domain_attached) ?
2581 sde_kms->aspace[domain] : NULL;
Jordan Croused8e96522017-02-13 10:14:16 -07002582}
2583
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07002584static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
2585{
2586 struct drm_device *dev = NULL;
2587 struct sde_kms *sde_kms = NULL;
Padmanabhan Komanduru71aec2d2017-08-30 20:07:59 +05302588 struct drm_connector *connector = NULL;
2589 struct sde_connector *sde_conn = NULL;
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07002590
2591 if (!kms) {
2592 SDE_ERROR("invalid kms\n");
2593 return;
2594 }
2595
2596 sde_kms = to_sde_kms(kms);
2597 dev = sde_kms->dev;
2598
2599 if (!dev) {
2600 SDE_ERROR("invalid device\n");
2601 return;
2602 }
2603
Padmanabhan Komanduru71aec2d2017-08-30 20:07:59 +05302604 if (!dev->mode_config.poll_enabled)
2605 return;
2606
2607 mutex_lock(&dev->mode_config.mutex);
2608 drm_for_each_connector(connector, dev) {
2609 /* Only handle HPD capable connectors. */
2610 if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
2611 continue;
2612
2613 sde_conn = to_sde_connector(connector);
2614
Ajay Singh Parmar315e5852017-11-23 21:47:32 -08002615 if (sde_conn->ops.post_open)
2616 sde_conn->ops.post_open(sde_conn->display);
Padmanabhan Komanduru71aec2d2017-08-30 20:07:59 +05302617 }
2618 mutex_unlock(&dev->mode_config.mutex);
2619
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07002620}
2621
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002622static int sde_kms_cont_splash_config(struct msm_kms *kms)
2623{
2624 void *display;
2625 struct dsi_display *dsi_display;
2626 struct msm_display_info info;
2627 struct drm_encoder *encoder = NULL;
2628 struct drm_crtc *crtc = NULL;
2629 int i, rc = 0;
2630 struct drm_display_mode *drm_mode = NULL;
2631 struct drm_device *dev;
2632 struct msm_drm_private *priv;
2633 struct sde_kms *sde_kms;
Jeykumar Sankaran74d7c382017-11-10 17:20:17 -08002634 struct list_head *connector_list = NULL;
2635 struct drm_connector *conn_iter = NULL;
2636 struct drm_connector *connector = NULL;
Sandeep Panda8693e8f2018-03-08 08:16:44 +05302637 struct sde_connector *sde_conn = NULL;
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002638
2639 if (!kms) {
2640 SDE_ERROR("invalid kms\n");
2641 return -EINVAL;
2642 }
2643
2644 sde_kms = to_sde_kms(kms);
2645 dev = sde_kms->dev;
2646 if (!dev || !dev->platformdev) {
2647 SDE_ERROR("invalid device\n");
2648 return -EINVAL;
2649 }
2650
Chandan Uddaraju9efbbe32017-11-09 23:57:05 -08002651 if (!sde_kms->splash_data.cont_splash_en) {
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002652 DRM_INFO("cont_splash feature not enabled\n");
2653 return rc;
2654 }
2655
2656 /* Currently, we only support one dsi display configuration */
2657 /* dsi */
2658 for (i = 0; i < sde_kms->dsi_display_count; ++i) {
2659 display = sde_kms->dsi_displays[i];
2660 dsi_display = (struct dsi_display *)display;
2661 SDE_DEBUG("display->name = %s\n", dsi_display->name);
2662
2663 if (dsi_display->bridge->base.encoder) {
2664 encoder = dsi_display->bridge->base.encoder;
2665 SDE_DEBUG("encoder name = %s\n", encoder->name);
2666 }
2667 memset(&info, 0x0, sizeof(info));
2668 rc = dsi_display_get_info(&info, display);
2669 if (rc) {
2670 SDE_ERROR("dsi get_info %d failed\n", i);
2671 encoder = NULL;
2672 continue;
2673 }
2674 SDE_DEBUG("info.is_connected = %s, info.is_primary = %s\n",
2675 ((info.is_connected) ? "true" : "false"),
2676 ((info.is_primary) ? "true" : "false"));
Jayant Shekhar3b6b3262018-08-06 18:36:02 +05302677
2678 /**
2679 * Since we are supporting one DSI for splash, use the display
2680 * which is marked as primary.
2681 */
2682 if (!info.is_primary)
2683 continue;
2684 else
2685 break;
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002686 }
2687
2688 if (!encoder) {
2689 SDE_ERROR("encoder not initialized\n");
2690 return -EINVAL;
2691 }
2692
2693 priv = sde_kms->dev->dev_private;
2694 encoder->crtc = priv->crtcs[0];
2695 crtc = encoder->crtc;
2696 SDE_DEBUG("crtc id = %d\n", crtc->base.id);
2697
Jeykumar Sankaran74d7c382017-11-10 17:20:17 -08002698
2699 mutex_lock(&dev->mode_config.mutex);
2700 connector_list = &dev->mode_config.connector_list;
Shubhashree Dharcc9091d2018-05-10 15:56:42 +05302701 if (connector_list) {
2702 list_for_each_entry(conn_iter, connector_list, head) {
2703 /**
2704 * SDE_KMS doesn't attach more than one encoder to
2705 * a DSI connector. So it is safe to check only with
2706 * the first encoder entry. Revisit this logic if we
2707 * ever have to support continuous splash for
2708 * external displays in MST configuration.
2709 */
2710 if (conn_iter &&
2711 (conn_iter->encoder_ids[0] == encoder->base.id)) {
2712 connector = conn_iter;
2713 break;
2714 }
Jeykumar Sankaran74d7c382017-11-10 17:20:17 -08002715 }
2716 }
Jeykumar Sankaran74d7c382017-11-10 17:20:17 -08002717 if (!connector) {
2718 SDE_ERROR("connector not initialized\n");
2719 mutex_unlock(&dev->mode_config.mutex);
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002720 return -EINVAL;
2721 }
Jeykumar Sankaran74d7c382017-11-10 17:20:17 -08002722
2723 if (connector->funcs->fill_modes) {
2724 connector->funcs->fill_modes(connector,
2725 dev->mode_config.max_width,
2726 dev->mode_config.max_height);
2727 } else {
2728 SDE_ERROR("fill_modes api not defined\n");
2729 mutex_unlock(&dev->mode_config.mutex);
2730 return -EINVAL;
2731 }
2732 mutex_unlock(&dev->mode_config.mutex);
2733
2734 crtc->state->encoder_mask = (1 << drm_encoder_index(encoder));
2735
2736 /* currently consider modes[0] as the preferred mode */
2737 drm_mode = list_first_entry(&connector->modes,
2738 struct drm_display_mode, head);
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002739 SDE_DEBUG("drm_mode->name = %s, id=%d, type=0x%x, flags=0x%x\n",
2740 drm_mode->name, drm_mode->base.id,
2741 drm_mode->type, drm_mode->flags);
2742
2743 /* Update CRTC drm structure */
2744 crtc->state->active = true;
2745 rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode);
2746 if (rc) {
2747 SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc);
2748 return rc;
2749 }
2750 drm_mode_copy(&crtc->state->adjusted_mode, drm_mode);
2751 drm_mode_copy(&crtc->mode, drm_mode);
2752
2753 /* Update encoder structure */
2754 sde_encoder_update_caps_for_cont_splash(encoder);
2755
2756 sde_crtc_update_cont_splash_mixer_settings(crtc);
2757
Sandeep Panda8693e8f2018-03-08 08:16:44 +05302758 sde_conn = to_sde_connector(connector);
2759 if (sde_conn && sde_conn->ops.cont_splash_config)
2760 sde_conn->ops.cont_splash_config(sde_conn->display);
2761
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002762 return rc;
2763}
2764
Kalyan Thota2f0444a2018-04-20 17:50:33 +05302765static bool sde_kms_check_for_splash(struct msm_kms *kms)
2766{
2767 struct sde_kms *sde_kms;
2768
2769 if (!kms) {
2770 SDE_ERROR("invalid kms\n");
2771 return false;
2772 }
2773
2774 sde_kms = to_sde_kms(kms);
2775 return sde_kms->splash_data.cont_splash_en;
2776}
2777
Prashant Singhaf73d452018-11-12 10:52:34 -08002778static void _sde_kms_null_commit(struct drm_device *dev,
2779 struct drm_encoder *enc)
2780{
2781 struct drm_modeset_acquire_ctx ctx;
2782 struct drm_connector *conn = NULL;
2783 struct drm_connector *tmp_conn = NULL;
2784 struct drm_atomic_state *state = NULL;
2785 struct drm_crtc_state *crtc_state = NULL;
2786 struct drm_connector_state *conn_state = NULL;
2787 int retry_cnt = 0;
2788 int ret = 0;
2789
2790 drm_modeset_acquire_init(&ctx, 0);
2791
2792retry:
2793 ret = drm_modeset_lock_all_ctx(dev, &ctx);
2794 if (ret == -EDEADLK && retry_cnt < SDE_KMS_MODESET_LOCK_MAX_TRIALS) {
2795 drm_modeset_backoff(&ctx);
2796 retry_cnt++;
2797 udelay(SDE_KMS_MODESET_LOCK_TIMEOUT_US);
2798 goto retry;
2799 } else if (WARN_ON(ret)) {
2800 goto end;
2801 }
2802
2803 state = drm_atomic_state_alloc(dev);
2804 if (!state) {
2805 DRM_ERROR("failed to allocate atomic state, %d\n", ret);
2806 goto end;
2807 }
2808
2809 state->acquire_ctx = &ctx;
2810 drm_for_each_connector(tmp_conn, dev) {
2811 if (enc == tmp_conn->state->best_encoder) {
2812 conn = tmp_conn;
2813 break;
2814 }
2815 }
2816
2817 if (!conn) {
2818 SDE_ERROR("error in finding conn for enc:%d\n", DRMID(enc));
2819 goto end;
2820 }
2821
2822 crtc_state = drm_atomic_get_crtc_state(state, enc->crtc);
2823 conn_state = drm_atomic_get_connector_state(state, conn);
2824 if (IS_ERR(conn_state)) {
2825 SDE_ERROR("error %d getting connector %d state\n",
2826 ret, DRMID(conn));
2827 goto end;
2828 }
2829
2830 crtc_state->active = true;
2831 ret = drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
2832
2833 ret = drm_atomic_commit(state);
2834 if (ret)
2835 SDE_ERROR("Commit failed with %d error\n", ret);
2836end:
2837 if (state)
2838 drm_atomic_state_free(state);
2839
2840 drm_modeset_drop_locks(&ctx);
2841 drm_modeset_acquire_fini(&ctx);
2842}
2843
Clarence Ipd86f6e42017-08-08 18:31:00 -04002844static int sde_kms_pm_suspend(struct device *dev)
2845{
2846 struct drm_device *ddev;
2847 struct drm_modeset_acquire_ctx ctx;
2848 struct drm_connector *conn;
2849 struct drm_atomic_state *state;
Prashant Singhaf73d452018-11-12 10:52:34 -08002850 struct drm_encoder *enc;
Clarence Ipd86f6e42017-08-08 18:31:00 -04002851 struct sde_kms *sde_kms;
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002852 int ret = 0, num_crtcs = 0;
Clarence Ipd86f6e42017-08-08 18:31:00 -04002853
2854 if (!dev)
2855 return -EINVAL;
2856
2857 ddev = dev_get_drvdata(dev);
2858 if (!ddev || !ddev_to_msm_kms(ddev))
2859 return -EINVAL;
2860
2861 sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
2862 SDE_EVT32(0);
2863
2864 /* disable hot-plug polling */
2865 drm_kms_helper_poll_disable(ddev);
2866
Prashant Singhaf73d452018-11-12 10:52:34 -08002867 /* if a display stuck in CS trigger a null commit to complete handoff */
2868 drm_for_each_encoder(enc, ddev) {
2869 if (sde_kms && sde_kms->splash_data.cont_splash_en && enc->crtc)
2870 _sde_kms_null_commit(ddev, enc);
2871 }
2872
Clarence Ipd86f6e42017-08-08 18:31:00 -04002873 /* acquire modeset lock(s) */
2874 drm_modeset_acquire_init(&ctx, 0);
2875
2876retry:
2877 ret = drm_modeset_lock_all_ctx(ddev, &ctx);
2878 if (ret)
2879 goto unlock;
2880
2881 /* save current state for resume */
2882 if (sde_kms->suspend_state)
2883 drm_atomic_state_free(sde_kms->suspend_state);
2884 sde_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
2885 if (IS_ERR_OR_NULL(sde_kms->suspend_state)) {
2886 DRM_ERROR("failed to back up suspend state\n");
2887 sde_kms->suspend_state = NULL;
2888 goto unlock;
2889 }
2890
2891 /* create atomic state to disable all CRTCs */
2892 state = drm_atomic_state_alloc(ddev);
2893 if (IS_ERR_OR_NULL(state)) {
2894 DRM_ERROR("failed to allocate crtc disable state\n");
2895 goto unlock;
2896 }
2897
2898 state->acquire_ctx = &ctx;
2899 drm_for_each_connector(conn, ddev) {
2900 struct drm_crtc_state *crtc_state;
2901 uint64_t lp;
2902
2903 if (!conn->state || !conn->state->crtc ||
2904 conn->dpms != DRM_MODE_DPMS_ON)
2905 continue;
2906
2907 lp = sde_connector_get_lp(conn);
2908 if (lp == SDE_MODE_DPMS_LP1) {
2909 /* transition LP1->LP2 on pm suspend */
2910 ret = sde_connector_set_property_for_commit(conn, state,
2911 CONNECTOR_PROP_LP, SDE_MODE_DPMS_LP2);
2912 if (ret) {
2913 DRM_ERROR("failed to set lp2 for conn %d\n",
2914 conn->base.id);
2915 drm_atomic_state_free(state);
2916 goto unlock;
2917 }
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002918 }
2919
2920 if (lp != SDE_MODE_DPMS_LP2) {
Clarence Ipd86f6e42017-08-08 18:31:00 -04002921 /* force CRTC to be inactive */
2922 crtc_state = drm_atomic_get_crtc_state(state,
2923 conn->state->crtc);
2924 if (IS_ERR_OR_NULL(crtc_state)) {
2925 DRM_ERROR("failed to get crtc %d state\n",
2926 conn->state->crtc->base.id);
2927 drm_atomic_state_free(state);
2928 goto unlock;
2929 }
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002930
2931 if (lp != SDE_MODE_DPMS_LP1)
2932 crtc_state->active = false;
2933 ++num_crtcs;
Clarence Ipd86f6e42017-08-08 18:31:00 -04002934 }
2935 }
2936
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002937 /* check for nothing to do */
2938 if (num_crtcs == 0) {
2939 DRM_DEBUG("all crtcs are already in the off state\n");
2940 drm_atomic_state_free(state);
Dhaval Patel8a7c3282017-12-05 00:41:58 -08002941 sde_kms->suspend_block = true;
2942 goto unlock;
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002943 }
2944
Clarence Ipd86f6e42017-08-08 18:31:00 -04002945 /* commit the "disable all" state */
2946 ret = drm_atomic_commit(state);
2947 if (ret < 0) {
2948 DRM_ERROR("failed to disable crtcs, %d\n", ret);
2949 drm_atomic_state_free(state);
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002950 goto unlock;
Clarence Ipd86f6e42017-08-08 18:31:00 -04002951 }
2952
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002953 sde_kms->suspend_block = true;
2954
Dhaval Patel8a7c3282017-12-05 00:41:58 -08002955 drm_for_each_connector(conn, ddev) {
2956 uint64_t lp;
2957
2958 lp = sde_connector_get_lp(conn);
2959 if (lp != SDE_MODE_DPMS_LP2)
2960 continue;
2961
2962 ret = sde_encoder_wait_for_event(conn->encoder,
2963 MSM_ENC_TX_COMPLETE);
2964 if (ret && ret != -EWOULDBLOCK)
2965 SDE_ERROR(
2966 "[enc: %d] wait for commit done returned %d\n",
2967 conn->encoder->base.id, ret);
2968 else if (!ret)
2969 sde_encoder_idle_request(conn->encoder);
2970 }
Clarence Ipd86f6e42017-08-08 18:31:00 -04002971unlock:
2972 if (ret == -EDEADLK) {
2973 drm_modeset_backoff(&ctx);
2974 goto retry;
2975 }
2976 drm_modeset_drop_locks(&ctx);
2977 drm_modeset_acquire_fini(&ctx);
2978
2979 return 0;
2980}
2981
2982static int sde_kms_pm_resume(struct device *dev)
2983{
2984 struct drm_device *ddev;
2985 struct sde_kms *sde_kms;
2986 int ret;
2987
2988 if (!dev)
2989 return -EINVAL;
2990
2991 ddev = dev_get_drvdata(dev);
2992 if (!ddev || !ddev_to_msm_kms(ddev))
2993 return -EINVAL;
2994
2995 sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
2996
2997 SDE_EVT32(sde_kms->suspend_state != NULL);
2998
2999 drm_mode_config_reset(ddev);
3000
3001 drm_modeset_lock_all(ddev);
3002
3003 sde_kms->suspend_block = false;
3004
3005 if (sde_kms->suspend_state) {
3006 sde_kms->suspend_state->acquire_ctx =
3007 ddev->mode_config.acquire_ctx;
3008 ret = drm_atomic_commit(sde_kms->suspend_state);
3009 if (ret < 0) {
3010 DRM_ERROR("failed to restore state, %d\n", ret);
3011 drm_atomic_state_free(sde_kms->suspend_state);
3012 }
3013 sde_kms->suspend_state = NULL;
3014 }
3015 drm_modeset_unlock_all(ddev);
3016
3017 /* enable hot-plug polling */
3018 drm_kms_helper_poll_enable(ddev);
3019
3020 return 0;
3021}
3022
Ben Chan78647cd2016-06-26 22:02:47 -04003023static const struct msm_kms_funcs kms_funcs = {
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003024 .hw_init = sde_kms_hw_init,
Alan Kwong5a3ac752016-10-16 01:02:35 -04003025 .postinit = sde_kms_postinit,
Ben Chan78647cd2016-06-26 22:02:47 -04003026 .irq_preinstall = sde_irq_preinstall,
3027 .irq_postinstall = sde_irq_postinstall,
3028 .irq_uninstall = sde_irq_uninstall,
3029 .irq = sde_irq,
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04003030 .preclose = sde_kms_preclose,
Lloyd Atkinsone08229c2017-10-02 17:53:30 -04003031 .lastclose = sde_kms_lastclose,
Clarence Ip24f80662016-06-13 19:05:32 -04003032 .prepare_fence = sde_kms_prepare_fence,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003033 .prepare_commit = sde_kms_prepare_commit,
3034 .commit = sde_kms_commit,
3035 .complete_commit = sde_kms_complete_commit,
3036 .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07003037 .wait_for_tx_complete = sde_kms_wait_for_frame_transfer_complete,
Alan Kwongf5dd86c2016-08-09 18:08:17 -04003038 .enable_vblank = sde_kms_enable_vblank,
3039 .disable_vblank = sde_kms_disable_vblank,
Lloyd Atkinsonfa2489c2016-05-25 15:16:03 -04003040 .check_modified_format = sde_format_check_modified_format,
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07003041 .atomic_check = sde_kms_atomic_check,
Clarence Ip4ce59322016-06-26 22:27:51 -04003042 .get_format = sde_get_msm_format,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003043 .round_pixclk = sde_kms_round_pixclk,
Clarence Ipd86f6e42017-08-08 18:31:00 -04003044 .pm_suspend = sde_kms_pm_suspend,
3045 .pm_resume = sde_kms_pm_resume,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003046 .destroy = sde_kms_destroy,
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07003047 .cont_splash_config = sde_kms_cont_splash_config,
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07003048 .register_events = _sde_kms_register_events,
Jordan Croused8e96522017-02-13 10:14:16 -07003049 .get_address_space = _sde_kms_get_address_space,
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07003050 .postopen = _sde_kms_post_open,
Kalyan Thota2f0444a2018-04-20 17:50:33 +05303051 .check_for_splash = sde_kms_check_for_splash,
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003052};
3053
Dhaval Patel3949f032016-06-20 16:24:33 -07003054/* the caller api needs to turn on clock before calling it */
Clarence Ip17162b52016-11-24 17:06:29 -05003055static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003056{
Dhaval Patel88739332017-04-11 11:08:04 -07003057 sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003058}
3059
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003060static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
3061{
3062 struct msm_mmu *mmu;
3063 int i;
3064
Jordan Croused8e96522017-02-13 10:14:16 -07003065 for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
3066 if (!sde_kms->aspace[i])
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003067 continue;
3068
Jordan Croused8e96522017-02-13 10:14:16 -07003069 mmu = sde_kms->aspace[i]->mmu;
3070
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003071 mmu->funcs->detach(mmu, (const char **)iommu_ports,
3072 ARRAY_SIZE(iommu_ports));
Jordan Crouse12bf3622017-02-13 10:14:11 -07003073 msm_gem_address_space_destroy(sde_kms->aspace[i]);
3074
Jordan Croused8e96522017-02-13 10:14:16 -07003075 sde_kms->aspace[i] = NULL;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003076 }
3077
3078 return 0;
3079}
3080
3081static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003082{
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003083 struct msm_mmu *mmu;
3084 int i, ret;
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003085 int early_map = 1;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003086
Alan Kwong112a84f2016-05-24 20:49:21 -04003087 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
Jordan Crouse12bf3622017-02-13 10:14:11 -07003088 struct msm_gem_address_space *aspace;
3089
Alan Kwong112a84f2016-05-24 20:49:21 -04003090 mmu = msm_smmu_new(sde_kms->dev->dev, i);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003091 if (IS_ERR(mmu)) {
3092 ret = PTR_ERR(mmu);
Dhaval Patel5473cd22017-03-19 21:38:08 -07003093 SDE_DEBUG("failed to init iommu id %d: rc:%d\n",
3094 i, ret);
Dhaval Patel5200c602017-01-17 15:53:37 -08003095 continue;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003096 }
3097
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003098 /*
3099 * Before attaching SMMU, we need to honor continuous splash
3100 * use case where hardware tries to fetch buffer from physical
3101 * address. To facilitate this requirement we need to have a
3102 * one to one mapping on SMMU until we have our first frame.
3103 */
Gopikrishnaiah Anandanb38d3292018-02-28 19:25:15 -08003104 if (i == MSM_SMMU_DOMAIN_UNSECURE) {
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003105 ret = mmu->funcs->set_attribute(mmu,
3106 DOMAIN_ATTR_EARLY_MAP,
3107 &early_map);
3108 if (ret) {
3109 SDE_ERROR("failed to set map att: %d\n", ret);
3110 goto fail;
3111 }
3112 }
3113
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07003114 aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
Jordan Crouse12bf3622017-02-13 10:14:11 -07003115 mmu, "sde");
3116 if (IS_ERR(aspace)) {
3117 ret = PTR_ERR(aspace);
3118 mmu->funcs->destroy(mmu);
3119 goto fail;
3120 }
3121
3122 sde_kms->aspace[i] = aspace;
3123
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003124 ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
3125 ARRAY_SIZE(iommu_ports));
3126 if (ret) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003127 SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
Jordan Crouse12bf3622017-02-13 10:14:11 -07003128 msm_gem_address_space_destroy(aspace);
3129 goto fail;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003130 }
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07003131 aspace->domain_attached = true;
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003132 early_map = 0;
Gopikrishnaiah Anandanb38d3292018-02-28 19:25:15 -08003133
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003134 /* Mapping splash memory block */
3135 if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
Gopikrishnaiah Anandanb38d3292018-02-28 19:25:15 -08003136 sde_kms->splash_data.splash_base) {
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003137 ret = _sde_kms_splash_smmu_map(sde_kms->dev, mmu,
3138 &sde_kms->splash_data);
3139 if (ret) {
3140 SDE_ERROR("failed to map ret:%d\n", ret);
3141 goto fail;
3142 }
Gopikrishnaiah Anandanb38d3292018-02-28 19:25:15 -08003143 }
3144
3145 /*
3146 * Turning off early map after generating one to one
3147 * mapping for splash address space.
3148 */
3149 ret = mmu->funcs->set_attribute(mmu, DOMAIN_ATTR_EARLY_MAP,
3150 &early_map);
3151 if (ret) {
3152 SDE_ERROR("failed to set map att ret:%d\n", ret);
3153 goto early_map_fail;
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003154 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003155 }
3156
3157 return 0;
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003158early_map_fail:
3159 mmu->funcs->one_to_one_unmap(mmu, sde_kms->splash_data.splash_base,
3160 sde_kms->splash_data.splash_size);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003161fail:
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003162 mmu->funcs->destroy(mmu);
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003163 _sde_kms_mmu_destroy(sde_kms);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003164
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003165 return ret;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003166}
3167
Clarence Ip7f0de632017-05-31 14:59:14 -04003168static void sde_kms_handle_power_event(u32 event_type, void *usr)
3169{
3170 struct sde_kms *sde_kms = usr;
Harsh Sahu08a4a742017-09-18 11:42:39 -07003171 struct msm_kms *msm_kms;
Clarence Ip7f0de632017-05-31 14:59:14 -04003172
Harsh Sahu08a4a742017-09-18 11:42:39 -07003173 msm_kms = &sde_kms->base;
Clarence Ip7f0de632017-05-31 14:59:14 -04003174 if (!sde_kms)
3175 return;
3176
Harsh Sahu08a4a742017-09-18 11:42:39 -07003177 SDE_DEBUG("event_type:%d\n", event_type);
3178 SDE_EVT32_VERBOSE(event_type);
3179
3180 if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
3181 sde_irq_update(msm_kms, true);
Clarence Ip7f0de632017-05-31 14:59:14 -04003182 sde_vbif_init_memtypes(sde_kms);
Dhaval Patel30874eb2018-05-31 13:33:31 -07003183 sde_kms->first_kickoff = true;
Harsh Sahu08a4a742017-09-18 11:42:39 -07003184 } else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
3185 sde_irq_update(msm_kms, false);
Dhaval Patel30874eb2018-05-31 13:33:31 -07003186 sde_kms->first_kickoff = false;
Harsh Sahu08a4a742017-09-18 11:42:39 -07003187 }
Clarence Ip7f0de632017-05-31 14:59:14 -04003188}
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003189
Alan Kwong23afc2d92017-09-15 10:59:06 -04003190#define genpd_to_sde_kms(domain) container_of(domain, struct sde_kms, genpd)
3191
3192static int sde_kms_pd_enable(struct generic_pm_domain *genpd)
3193{
3194 struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
3195 struct drm_device *dev;
3196 struct msm_drm_private *priv;
3197 int rc;
3198
3199 SDE_DEBUG("\n");
3200
3201 dev = sde_kms->dev;
3202 if (!dev)
3203 return -EINVAL;
3204
3205 priv = dev->dev_private;
3206 if (!priv)
3207 return -EINVAL;
3208
3209 SDE_EVT32(genpd->device_count);
3210
3211 rc = sde_power_resource_enable(&priv->phandle, priv->pclient, true);
3212
3213 return rc;
3214}
3215
3216static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
3217{
3218 struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
3219 struct drm_device *dev;
3220 struct msm_drm_private *priv;
3221 int rc;
3222
3223 SDE_DEBUG("\n");
3224
3225 dev = sde_kms->dev;
3226 if (!dev)
3227 return -EINVAL;
3228
3229 priv = dev->dev_private;
3230 if (!priv)
3231 return -EINVAL;
3232
3233 SDE_EVT32(genpd->device_count);
3234
3235 rc = sde_power_resource_enable(&priv->phandle, priv->pclient, false);
3236
3237 return rc;
3238}
3239
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -07003240static int _sde_kms_get_splash_data(struct sde_splash_data *data)
3241{
3242 int ret = 0;
3243 struct device_node *parent, *node;
3244 struct resource r;
3245
3246 if (!data)
3247 return -EINVAL;
3248
3249 parent = of_find_node_by_path("/reserved-memory");
3250 if (!parent) {
3251 SDE_ERROR("failed to find reserved-memory node\n");
3252 return -EINVAL;
3253 }
3254
3255 node = of_find_node_by_name(parent, "cont_splash_region");
3256 if (!node) {
3257 SDE_ERROR("failed to find splash memory reservation\n");
3258 return -EINVAL;
3259 }
3260
3261 if (of_address_to_resource(node, 0, &r)) {
3262 SDE_ERROR("failed to find data for splash memory\n");
3263 return -EINVAL;
3264 }
3265
3266 data->splash_base = (unsigned long)r.start;
3267 data->splash_size = (r.end - r.start) + 1;
3268
3269 pr_info("found continuous splash base address:%lx size:%x\n",
3270 data->splash_base,
3271 data->splash_size);
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -07003272 return ret;
3273}
3274
Clarence Ip17162b52016-11-24 17:06:29 -05003275static int sde_kms_hw_init(struct msm_kms *kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003276{
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003277 struct sde_kms *sde_kms;
Clarence Ip17162b52016-11-24 17:06:29 -05003278 struct drm_device *dev;
Dhaval Patel3949f032016-06-20 16:24:33 -07003279 struct msm_drm_private *priv;
Chandan Uddaraju9efbbe32017-11-09 23:57:05 -08003280 struct sde_rm *rm = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05003281 int i, rc = -EINVAL;
Dhaval Patel3949f032016-06-20 16:24:33 -07003282
Clarence Ip17162b52016-11-24 17:06:29 -05003283 if (!kms) {
3284 SDE_ERROR("invalid kms\n");
3285 goto end;
3286 }
3287
3288 sde_kms = to_sde_kms(kms);
3289 dev = sde_kms->dev;
3290 if (!dev || !dev->platformdev) {
3291 SDE_ERROR("invalid device\n");
Dhaval Patel3949f032016-06-20 16:24:33 -07003292 goto end;
3293 }
3294
3295 priv = dev->dev_private;
Clarence Ip17162b52016-11-24 17:06:29 -05003296 if (!priv) {
3297 SDE_ERROR("invalid private data\n");
Dhaval Patel3949f032016-06-20 16:24:33 -07003298 goto end;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003299 }
3300
Dhaval Patela2430842017-06-15 14:32:36 -07003301 sde_kms->mmio = msm_ioremap(dev->platformdev, "mdp_phys", "mdp_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05003302 if (IS_ERR(sde_kms->mmio)) {
3303 rc = PTR_ERR(sde_kms->mmio);
3304 SDE_ERROR("mdp register memory map failed: %d\n", rc);
3305 sde_kms->mmio = NULL;
3306 goto error;
3307 }
Lakshmi Narayana Kalavala89b6cbe2018-05-11 11:28:12 -07003308 DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio);
Dhaval Patela2430842017-06-15 14:32:36 -07003309 sde_kms->mmio_len = msm_iomap_size(dev->platformdev, "mdp_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05003310
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003311 rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
3312 sde_kms->mmio_len);
3313 if (rc)
3314 SDE_ERROR("dbg base register kms failed: %d\n", rc);
3315
Dhaval Patela2430842017-06-15 14:32:36 -07003316 sde_kms->vbif[VBIF_RT] = msm_ioremap(dev->platformdev, "vbif_phys",
3317 "vbif_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05003318 if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
3319 rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
3320 SDE_ERROR("vbif register memory map failed: %d\n", rc);
3321 sde_kms->vbif[VBIF_RT] = NULL;
3322 goto error;
3323 }
Dhaval Patela2430842017-06-15 14:32:36 -07003324 sde_kms->vbif_len[VBIF_RT] = msm_iomap_size(dev->platformdev,
3325 "vbif_phys");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003326 rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
3327 sde_kms->vbif_len[VBIF_RT]);
3328 if (rc)
3329 SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
3330
Dhaval Patela2430842017-06-15 14:32:36 -07003331 sde_kms->vbif[VBIF_NRT] = msm_ioremap(dev->platformdev, "vbif_nrt_phys",
3332 "vbif_nrt_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05003333 if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
3334 sde_kms->vbif[VBIF_NRT] = NULL;
3335 SDE_DEBUG("VBIF NRT is not defined");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003336 } else {
Dhaval Patela2430842017-06-15 14:32:36 -07003337 sde_kms->vbif_len[VBIF_NRT] = msm_iomap_size(dev->platformdev,
3338 "vbif_nrt_phys");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003339 rc = sde_dbg_reg_register_base("vbif_nrt",
3340 sde_kms->vbif[VBIF_NRT],
3341 sde_kms->vbif_len[VBIF_NRT]);
3342 if (rc)
3343 SDE_ERROR("dbg base register vbif_nrt failed: %d\n",
3344 rc);
Clarence Ip17162b52016-11-24 17:06:29 -05003345 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003346
Dhaval Patela2430842017-06-15 14:32:36 -07003347 sde_kms->reg_dma = msm_ioremap(dev->platformdev, "regdma_phys",
3348 "regdma_phys");
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08003349 if (IS_ERR(sde_kms->reg_dma)) {
3350 sde_kms->reg_dma = NULL;
3351 SDE_DEBUG("REG_DMA is not defined");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003352 } else {
Dhaval Patela2430842017-06-15 14:32:36 -07003353 sde_kms->reg_dma_len = msm_iomap_size(dev->platformdev,
3354 "regdma_phys");
Gopikrishnaiah Anandanbc5aa792017-08-23 18:30:08 -07003355 rc = sde_dbg_reg_register_base("reg_dma",
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04003356 sde_kms->reg_dma,
3357 sde_kms->reg_dma_len);
3358 if (rc)
3359 SDE_ERROR("dbg base register reg_dma failed: %d\n",
3360 rc);
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08003361 }
3362
Dhaval Patel3949f032016-06-20 16:24:33 -07003363 sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
3364 if (IS_ERR_OR_NULL(sde_kms->core_client)) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003365 rc = PTR_ERR(sde_kms->core_client);
Dhaval Patel5398f602017-03-25 18:25:18 -07003366 if (!sde_kms->core_client)
3367 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003368 SDE_ERROR("sde power client create failed: %d\n", rc);
3369 sde_kms->core_client = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05003370 goto error;
Dhaval Patel3949f032016-06-20 16:24:33 -07003371 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003372
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -07003373 rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003374 if (rc)
Vara Reddyc90c7fe2017-11-10 17:02:02 -08003375 SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -07003376
Raviteja Tamatame7f5fe32019-01-22 12:13:18 +05303377 for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
3378 priv->phandle.data_bus_handle[i].ab_rt =
3379 SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA;
3380 priv->phandle.data_bus_handle[i].ib_rt =
3381 SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA;
3382 }
3383
Dhaval Patel3949f032016-06-20 16:24:33 -07003384 rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
3385 true);
3386 if (rc) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003387 SDE_ERROR("resource enable failed: %d\n", rc);
Clarence Ip17162b52016-11-24 17:06:29 -05003388 goto error;
Dhaval Patel3949f032016-06-20 16:24:33 -07003389 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003390
Dhaval Patelb0a25be2017-12-11 22:38:26 -08003391 for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
3392 sde_power_data_bus_set_quota(&priv->phandle,
3393 sde_kms->core_client,
3394 SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i,
3395 SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA,
3396 SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA);
3397
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003398 _sde_kms_core_hw_rev_init(sde_kms);
Lloyd Atkinson11f34442016-08-11 11:19:52 -04003399
Dhaval Patelb271b842016-10-19 21:41:22 -07003400 pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
3401
Dhaval Patel8bf7ff32016-07-20 18:13:24 -07003402 sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
Dhaval Patel3949f032016-06-20 16:24:33 -07003403 if (IS_ERR_OR_NULL(sde_kms->catalog)) {
Dhaval Patel3949f032016-06-20 16:24:33 -07003404 rc = PTR_ERR(sde_kms->catalog);
Dhaval Patel5398f602017-03-25 18:25:18 -07003405 if (!sde_kms->catalog)
3406 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003407 SDE_ERROR("catalog init failed: %d\n", rc);
3408 sde_kms->catalog = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05003409 goto power_error;
Dhaval Patel3949f032016-06-20 16:24:33 -07003410 }
3411
Gopikrishnaiah Anandan61a9e672018-02-14 15:05:33 -08003412 sde_kms->splash_data.resource_handoff_pending = true;
3413
3414 rc = _sde_kms_mmu_init(sde_kms);
3415 if (rc) {
3416 SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
3417 goto power_error;
3418 }
3419
3420 /* Initialize reg dma block which is a singleton */
3421 rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
3422 sde_kms->dev);
3423 if (rc) {
3424 SDE_ERROR("failed: reg dma init failed\n");
3425 goto power_error;
3426 }
3427
Lloyd Atkinson274cc462017-02-21 11:52:06 -05003428 sde_dbg_init_dbg_buses(sde_kms->core_rev);
3429
Chandan Uddaraju9efbbe32017-11-09 23:57:05 -08003430 rm = &sde_kms->rm;
3431 rc = sde_rm_init(rm, sde_kms->catalog, sde_kms->mmio,
3432 sde_kms->dev);
3433 if (rc) {
3434 SDE_ERROR("rm init failed: %d\n", rc);
3435 goto power_error;
3436 }
3437
3438 sde_kms->rm_init = true;
3439
Chandan Uddarajufa184062017-11-28 17:26:31 -08003440 sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
3441 if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
3442 rc = PTR_ERR(sde_kms->hw_intr);
3443 SDE_ERROR("hw_intr init failed: %d\n", rc);
3444 sde_kms->hw_intr = NULL;
3445 goto hw_intr_init_err;
3446 }
3447
Vara Reddyc90c7fe2017-11-10 17:02:02 -08003448 /*
3449 * Attempt continuous splash handoff only if reserved
3450 * splash memory is found.
3451 */
Shashank Babu Chinta Venkataacb1bc92017-11-06 11:55:49 -08003452 if (sde_kms->splash_data.splash_base)
Chandan Uddarajufa184062017-11-28 17:26:31 -08003453 sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
Chandan Uddaraju9efbbe32017-11-09 23:57:05 -08003454 &sde_kms->splash_data,
3455 sde_kms->catalog);
Gopikrishnaiah Anandane69dc592017-03-29 14:00:55 -07003456
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003457 sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
3458 if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
3459 rc = PTR_ERR(sde_kms->hw_mdp);
Dhaval Patel5398f602017-03-25 18:25:18 -07003460 if (!sde_kms->hw_mdp)
3461 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003462 SDE_ERROR("failed to get hw_mdp: %d\n", rc);
3463 sde_kms->hw_mdp = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05003464 goto power_error;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003465 }
Dhaval Patel3949f032016-06-20 16:24:33 -07003466
Alan Kwong5d324e42016-07-28 22:56:18 -04003467 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
3468 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
3469
3470 sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
3471 sde_kms->vbif[vbif_idx], sde_kms->catalog);
3472 if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003473 rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
Dhaval Patel5398f602017-03-25 18:25:18 -07003474 if (!sde_kms->hw_vbif[vbif_idx])
3475 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003476 SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
Alan Kwong5d324e42016-07-28 22:56:18 -04003477 sde_kms->hw_vbif[vbif_idx] = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05003478 goto power_error;
Alan Kwong5d324e42016-07-28 22:56:18 -04003479 }
3480 }
3481
Alan Kwong54125bb2017-02-26 16:01:36 -08003482 sde_kms->iclient = msm_ion_client_create(dev->unique);
3483 if (IS_ERR(sde_kms->iclient)) {
3484 rc = PTR_ERR(sde_kms->iclient);
3485 SDE_DEBUG("msm_ion_client not available: %d\n", rc);
3486 sde_kms->iclient = NULL;
3487 }
3488
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003489
Alan Kwong67a3f792016-11-01 23:16:53 -04003490 rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
Dhaval Patel446446e2017-04-21 19:38:17 -07003491 &priv->phandle, priv->pclient, "core_clk");
Alan Kwong67a3f792016-11-01 23:16:53 -04003492 if (rc) {
3493 SDE_ERROR("failed to init perf %d\n", rc);
3494 goto perf_err;
3495 }
3496
Clarence Ip4ce59322016-06-26 22:27:51 -04003497 /*
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003498 * _sde_kms_drm_obj_init should create the DRM related objects
3499 * i.e. CRTCs, planes, encoders, connectors and so forth
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003500 */
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003501 rc = _sde_kms_drm_obj_init(sde_kms);
3502 if (rc) {
3503 SDE_ERROR("modeset init failed: %d\n", rc);
Alan Kwong67a3f792016-11-01 23:16:53 -04003504 goto drm_obj_init_err;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04003505 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003506
Dhaval Patel79797b12018-02-13 19:58:05 -08003507 dev->mode_config.min_width = sde_kms->catalog->min_display_width;
3508 dev->mode_config.min_height = sde_kms->catalog->min_display_height;
3509 dev->mode_config.max_width = sde_kms->catalog->max_display_width;
3510 dev->mode_config.max_height = sde_kms->catalog->max_display_height;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003511
Dhaval Patel6c9cb2b2017-12-14 23:08:32 -08003512 mutex_init(&sde_kms->secure_transition_lock);
Raviteja Tamatam62c934f2018-11-29 21:55:19 +05303513 mutex_init(&sde_kms->vblank_ctl_global_lock);
3514
Dhaval Patel6c9cb2b2017-12-14 23:08:32 -08003515 atomic_set(&sde_kms->detach_sec_cb, 0);
3516 atomic_set(&sde_kms->detach_all_cb, 0);
3517
Lloyd Atkinsonfa2489c2016-05-25 15:16:03 -04003518 /*
3519 * Support format modifiers for compression etc.
3520 */
3521 dev->mode_config.allow_fb_modifiers = true;
3522
Clarence Ip7f0de632017-05-31 14:59:14 -04003523 /*
3524 * Handle (re)initializations during power enable
3525 */
3526 sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
3527 sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
Harsh Sahu08a4a742017-09-18 11:42:39 -07003528 SDE_POWER_EVENT_POST_ENABLE |
3529 SDE_POWER_EVENT_PRE_DISABLE,
Clarence Ip7f0de632017-05-31 14:59:14 -04003530 sde_kms_handle_power_event, sde_kms, "kms");
3531
Alan Kwong23afc2d92017-09-15 10:59:06 -04003532 /* initialize power domain if defined */
3533 if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) {
3534 sde_kms->genpd.name = dev->unique;
3535 sde_kms->genpd.power_off = sde_kms_pd_disable;
3536 sde_kms->genpd.power_on = sde_kms_pd_enable;
3537
3538 rc = pm_genpd_init(&sde_kms->genpd, NULL, true);
3539 if (rc < 0) {
3540 SDE_ERROR("failed to init genpd provider %s: %d\n",
3541 sde_kms->genpd.name, rc);
3542 goto genpd_err;
3543 }
3544
3545 rc = of_genpd_add_provider_simple(dev->dev->of_node,
3546 &sde_kms->genpd);
3547 if (rc < 0) {
3548 SDE_ERROR("failed to add genpd provider %s: %d\n",
3549 sde_kms->genpd.name, rc);
3550 pm_genpd_remove(&sde_kms->genpd);
3551 goto genpd_err;
3552 }
3553
3554 sde_kms->genpd_init = true;
3555 SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
3556 }
3557
Raviteja Tamatam30fa3e32018-01-04 21:00:22 +05303558 if (sde_kms->splash_data.cont_splash_en) {
Chandan Uddaraju18f09402017-09-29 11:54:29 -07003559 SDE_DEBUG("Skipping MDP Resources disable\n");
Raviteja Tamatam30fa3e32018-01-04 21:00:22 +05303560 } else {
3561 for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
3562 sde_power_data_bus_set_quota(&priv->phandle,
3563 sde_kms->core_client,
3564 SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i,
3565 SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
3566 SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
3567
Chandan Uddaraju18f09402017-09-29 11:54:29 -07003568 sde_power_resource_enable(&priv->phandle,
3569 sde_kms->core_client, false);
Raviteja Tamatam30fa3e32018-01-04 21:00:22 +05303570 }
Clarence Ip17162b52016-11-24 17:06:29 -05003571 return 0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003572
Alan Kwong23afc2d92017-09-15 10:59:06 -04003573genpd_err:
Alan Kwong67a3f792016-11-01 23:16:53 -04003574drm_obj_init_err:
3575 sde_core_perf_destroy(&sde_kms->perf);
Abhinav Kumar2316fb92017-01-30 23:07:08 -08003576hw_intr_init_err:
Alan Kwong67a3f792016-11-01 23:16:53 -04003577perf_err:
Clarence Ip17162b52016-11-24 17:06:29 -05003578power_error:
Dhaval Patel3949f032016-06-20 16:24:33 -07003579 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
Clarence Ip17162b52016-11-24 17:06:29 -05003580error:
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04003581 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
Dhaval Patel3949f032016-06-20 16:24:33 -07003582end:
Clarence Ip17162b52016-11-24 17:06:29 -05003583 return rc;
3584}
3585
3586struct msm_kms *sde_kms_init(struct drm_device *dev)
3587{
3588 struct msm_drm_private *priv;
3589 struct sde_kms *sde_kms;
3590
3591 if (!dev || !dev->dev_private) {
3592 SDE_ERROR("drm device node invalid\n");
3593 return ERR_PTR(-EINVAL);
3594 }
3595
3596 priv = dev->dev_private;
3597
3598 sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
3599 if (!sde_kms) {
3600 SDE_ERROR("failed to allocate sde kms\n");
3601 return ERR_PTR(-ENOMEM);
3602 }
3603
3604 msm_kms_init(&sde_kms->base, &kms_funcs);
3605 sde_kms->dev = dev;
3606
3607 return &sde_kms->base;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003608}
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07003609
3610static int _sde_kms_register_events(struct msm_kms *kms,
3611 struct drm_mode_object *obj, u32 event, bool en)
3612{
3613 int ret = 0;
3614 struct drm_crtc *crtc = NULL;
3615 struct drm_connector *conn = NULL;
3616 struct sde_kms *sde_kms = NULL;
3617
3618 if (!kms || !obj) {
3619 SDE_ERROR("invalid argument kms %pK obj %pK\n", kms, obj);
3620 return -EINVAL;
3621 }
3622
3623 sde_kms = to_sde_kms(kms);
3624 switch (obj->type) {
3625 case DRM_MODE_OBJECT_CRTC:
3626 crtc = obj_to_crtc(obj);
3627 ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
3628 break;
3629 case DRM_MODE_OBJECT_CONNECTOR:
3630 conn = obj_to_connector(obj);
3631 ret = sde_connector_register_custom_event(sde_kms, conn, event,
3632 en);
3633 break;
3634 }
3635
3636 return ret;
3637}
Sandeep Panda11b20d82017-06-19 12:57:27 +05303638
3639int sde_kms_handle_recovery(struct drm_encoder *encoder)
3640{
3641 SDE_EVT32(DRMID(encoder), MSM_ENC_ACTIVE_REGION);
3642 return sde_encoder_wait_for_event(encoder, MSM_ENC_ACTIVE_REGION);
3643}