blob: a54e39a90110bd3c9f2d32e174eebb49d6193a9e [file] [log] [blame]
Dhaval Patel14d46ce2017-01-17 16:28:12 -08001/*
2 * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07005 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -08006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07009 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -080010 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070017 */
18
Alan Kwong5d324e42016-07-28 22:56:18 -040019#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
20
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070021#include <drm/drm_crtc.h>
Clarence Ip4ce59322016-06-26 22:27:51 -040022#include <linux/debugfs.h>
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -070023#include <linux/of_address.h>
Dhaval Patel04c7e8e2016-09-26 20:14:31 -070024#include <linux/of_irq.h>
Alan Kwong4dd64c82017-02-04 18:41:51 -080025#include <linux/dma-buf.h>
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -070026#include <linux/memblock.h>
27#include <linux/bootmem.h>
Clarence Ip4ce59322016-06-26 22:27:51 -040028
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070029#include "msm_drv.h"
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040030#include "msm_mmu.h"
Clarence Ipd02440b2017-05-21 18:10:01 -040031#include "msm_gem.h"
Clarence Ip3649f8b2016-10-31 09:59:44 -040032
33#include "dsi_display.h"
34#include "dsi_drm.h"
35#include "sde_wb.h"
Padmanabhan Komanduru63758612017-05-23 01:47:18 -070036#include "dp_display.h"
37#include "dp_drm.h"
Clarence Ip3649f8b2016-10-31 09:59:44 -040038
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070039#include "sde_kms.h"
Alan Kwongf5dd86c2016-08-09 18:08:17 -040040#include "sde_core_irq.h"
Clarence Ip4ce59322016-06-26 22:27:51 -040041#include "sde_formats.h"
Alan Kwong5d324e42016-07-28 22:56:18 -040042#include "sde_hw_vbif.h"
Alan Kwong83285fb2016-10-21 20:51:17 -040043#include "sde_vbif.h"
44#include "sde_encoder.h"
45#include "sde_plane.h"
46#include "sde_crtc.h"
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -080047#include "sde_reg_dma.h"
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070048
Alan Kwong1a00e4d2016-07-18 09:42:30 -040049#define CREATE_TRACE_POINTS
50#include "sde_trace.h"
51
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040052static const char * const iommu_ports[] = {
53 "mdp_0",
54};
55
Clarence Ip4ce59322016-06-26 22:27:51 -040056/**
57 * Controls size of event log buffer. Specified as a power of 2.
58 */
59#define SDE_EVTLOG_SIZE 1024
60
61/*
62 * To enable overall DRM driver logging
63 * # echo 0x2 > /sys/module/drm/parameters/debug
64 *
65 * To enable DRM driver h/w logging
Dhaval Patel6c666622017-03-21 23:02:59 -070066 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
Clarence Ip4ce59322016-06-26 22:27:51 -040067 *
68 * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
69 */
70#define SDE_DEBUGFS_DIR "msm_sde"
71#define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
72
Clarence Ipdd395242016-09-09 10:47:17 -040073/**
74 * sdecustom - enable certain driver customizations for sde clients
75 * Enabling this modifies the standard DRM behavior slightly and assumes
76 * that the clients have specific knowledge about the modifications that
77 * are involved, so don't enable this unless you know what you're doing.
78 *
79 * Parts of the driver that are affected by this setting may be located by
80 * searching for invocations of the 'sde_is_custom_client()' function.
81 *
82 * This is disabled by default.
83 */
Clarence Ipb1b3c802016-10-03 16:49:38 -040084static bool sdecustom = true;
Clarence Ipdd395242016-09-09 10:47:17 -040085module_param(sdecustom, bool, 0400);
86MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
87
Clarence Ip17162b52016-11-24 17:06:29 -050088static int sde_kms_hw_init(struct msm_kms *kms);
89static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -070090static int _sde_kms_mmu_init(struct sde_kms *sde_kms);
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -070091static int _sde_kms_register_events(struct msm_kms *kms,
92 struct drm_mode_object *obj, u32 event, bool en);
Clarence Ipdd395242016-09-09 10:47:17 -040093bool sde_is_custom_client(void)
94{
95 return sdecustom;
96}
97
Alan Kwongf0fd8512016-10-24 21:39:26 -040098#ifdef CONFIG_DEBUG_FS
99static int _sde_danger_signal_status(struct seq_file *s,
100 bool danger_status)
101{
102 struct sde_kms *kms = (struct sde_kms *)s->private;
103 struct msm_drm_private *priv;
104 struct sde_danger_safe_status status;
105 int i;
Alan Kwong1124f1f2017-11-10 18:14:39 -0500106 int rc;
Alan Kwongf0fd8512016-10-24 21:39:26 -0400107
108 if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
109 SDE_ERROR("invalid arg(s)\n");
110 return 0;
111 }
112
113 priv = kms->dev->dev_private;
114 memset(&status, 0, sizeof(struct sde_danger_safe_status));
115
Alan Kwong1124f1f2017-11-10 18:14:39 -0500116 rc = sde_power_resource_enable(&priv->phandle, kms->core_client, true);
117 if (rc) {
118 SDE_ERROR("failed to enable power resource %d\n", rc);
119 SDE_EVT32(rc, SDE_EVTLOG_ERROR);
120 return rc;
121 }
122
Alan Kwongf0fd8512016-10-24 21:39:26 -0400123 if (danger_status) {
124 seq_puts(s, "\nDanger signal status:\n");
125 if (kms->hw_mdp->ops.get_danger_status)
126 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
127 &status);
128 } else {
129 seq_puts(s, "\nSafe signal status:\n");
130 if (kms->hw_mdp->ops.get_danger_status)
131 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
132 &status);
133 }
134 sde_power_resource_enable(&priv->phandle, kms->core_client, false);
135
136 seq_printf(s, "MDP : 0x%x\n", status.mdp);
137
138 for (i = SSPP_VIG0; i < SSPP_MAX; i++)
139 seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
140 status.sspp[i]);
141 seq_puts(s, "\n");
142
143 for (i = WB_0; i < WB_MAX; i++)
144 seq_printf(s, "WB%d : 0x%x \t", i - WB_0,
145 status.wb[i]);
146 seq_puts(s, "\n");
147
148 return 0;
149}
150
151#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
152static int __prefix ## _open(struct inode *inode, struct file *file) \
153{ \
154 return single_open(file, __prefix ## _show, inode->i_private); \
155} \
156static const struct file_operations __prefix ## _fops = { \
157 .owner = THIS_MODULE, \
158 .open = __prefix ## _open, \
159 .release = single_release, \
160 .read = seq_read, \
161 .llseek = seq_lseek, \
162}
163
164static int sde_debugfs_danger_stats_show(struct seq_file *s, void *v)
165{
166 return _sde_danger_signal_status(s, true);
167}
168DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_danger_stats);
169
170static int sde_debugfs_safe_stats_show(struct seq_file *s, void *v)
171{
172 return _sde_danger_signal_status(s, false);
173}
174DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_safe_stats);
175
176static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms)
177{
178 debugfs_remove_recursive(sde_kms->debugfs_danger);
179 sde_kms->debugfs_danger = NULL;
180}
181
182static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
183 struct dentry *parent)
184{
185 sde_kms->debugfs_danger = debugfs_create_dir("danger",
186 parent);
187 if (!sde_kms->debugfs_danger) {
188 SDE_ERROR("failed to create danger debugfs\n");
189 return -EINVAL;
190 }
191
Lloyd Atkinson8de415a2017-05-23 11:31:16 -0400192 debugfs_create_file("danger_status", 0600, sde_kms->debugfs_danger,
Alan Kwongf0fd8512016-10-24 21:39:26 -0400193 sde_kms, &sde_debugfs_danger_stats_fops);
Lloyd Atkinson8de415a2017-05-23 11:31:16 -0400194 debugfs_create_file("safe_status", 0600, sde_kms->debugfs_danger,
Alan Kwongf0fd8512016-10-24 21:39:26 -0400195 sde_kms, &sde_debugfs_safe_stats_fops);
196
197 return 0;
198}
199
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400200static int _sde_debugfs_show_regset32(struct seq_file *s, void *data)
Clarence Ip4ce59322016-06-26 22:27:51 -0400201{
Clarence Ipaac9f332016-08-31 15:46:35 -0400202 struct sde_debugfs_regset32 *regset;
203 struct sde_kms *sde_kms;
204 struct drm_device *dev;
205 struct msm_drm_private *priv;
Clarence Ip4ce59322016-06-26 22:27:51 -0400206 void __iomem *base;
Clarence Ipaac9f332016-08-31 15:46:35 -0400207 uint32_t i, addr;
Clarence Ip4ce59322016-06-26 22:27:51 -0400208
Clarence Ipaac9f332016-08-31 15:46:35 -0400209 if (!s || !s->private)
210 return 0;
Clarence Ip4ce59322016-06-26 22:27:51 -0400211
Clarence Ipaac9f332016-08-31 15:46:35 -0400212 regset = s->private;
213
214 sde_kms = regset->sde_kms;
215 if (!sde_kms || !sde_kms->mmio)
216 return 0;
217
218 dev = sde_kms->dev;
219 if (!dev)
220 return 0;
221
222 priv = dev->dev_private;
223 if (!priv)
224 return 0;
225
226 base = sde_kms->mmio + regset->offset;
227
228 /* insert padding spaces, if needed */
229 if (regset->offset & 0xF) {
230 seq_printf(s, "[%x]", regset->offset & ~0xF);
231 for (i = 0; i < (regset->offset & 0xF); i += 4)
232 seq_puts(s, " ");
233 }
234
235 if (sde_power_resource_enable(&priv->phandle,
236 sde_kms->core_client, true)) {
237 seq_puts(s, "failed to enable sde clocks\n");
238 return 0;
239 }
240
241 /* main register output */
242 for (i = 0; i < regset->blk_len; i += 4) {
243 addr = regset->offset + i;
244 if ((addr & 0xF) == 0x0)
245 seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
246 seq_printf(s, " %08x", readl_relaxed(base + i));
247 }
248 seq_puts(s, "\n");
249 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
Clarence Ip4ce59322016-06-26 22:27:51 -0400250
251 return 0;
252}
253
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400254static int sde_debugfs_open_regset32(struct inode *inode,
255 struct file *file)
Clarence Ip4ce59322016-06-26 22:27:51 -0400256{
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400257 return single_open(file, _sde_debugfs_show_regset32, inode->i_private);
Clarence Ip4ce59322016-06-26 22:27:51 -0400258}
259
260static const struct file_operations sde_fops_regset32 = {
261 .open = sde_debugfs_open_regset32,
262 .read = seq_read,
263 .llseek = seq_lseek,
264 .release = single_release,
265};
266
267void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
Clarence Ipaac9f332016-08-31 15:46:35 -0400268 uint32_t offset, uint32_t length, struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400269{
270 if (regset) {
271 regset->offset = offset;
272 regset->blk_len = length;
Clarence Ipaac9f332016-08-31 15:46:35 -0400273 regset->sde_kms = sde_kms;
Clarence Ip4ce59322016-06-26 22:27:51 -0400274 }
275}
276
277void *sde_debugfs_create_regset32(const char *name, umode_t mode,
278 void *parent, struct sde_debugfs_regset32 *regset)
279{
Clarence Ipaac9f332016-08-31 15:46:35 -0400280 if (!name || !regset || !regset->sde_kms || !regset->blk_len)
Clarence Ip4ce59322016-06-26 22:27:51 -0400281 return NULL;
282
283 /* make sure offset is a multiple of 4 */
284 regset->offset = round_down(regset->offset, 4);
285
286 return debugfs_create_file(name, mode, parent,
287 regset, &sde_fops_regset32);
288}
289
290void *sde_debugfs_get_root(struct sde_kms *sde_kms)
291{
Dhaval Patel6c666622017-03-21 23:02:59 -0700292 struct msm_drm_private *priv;
293
294 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
295 return NULL;
296
297 priv = sde_kms->dev->dev_private;
298 return priv->debug_root;
Clarence Ip4ce59322016-06-26 22:27:51 -0400299}
300
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400301static int _sde_debugfs_init(struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400302{
303 void *p;
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700304 int rc;
305 void *debugfs_root;
Clarence Ip4ce59322016-06-26 22:27:51 -0400306
307 p = sde_hw_util_get_log_mask_ptr();
308
309 if (!sde_kms || !p)
310 return -EINVAL;
311
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700312 debugfs_root = sde_debugfs_get_root(sde_kms);
313 if (!debugfs_root)
314 return -EINVAL;
Clarence Ip4ce59322016-06-26 22:27:51 -0400315
316 /* allow debugfs_root to be NULL */
Lloyd Atkinson8de415a2017-05-23 11:31:16 -0400317 debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0600, debugfs_root, p);
Alan Kwongcd1c09f2016-11-04 20:37:30 -0400318
Lloyd Atkinson09e64bf2017-04-13 14:09:59 -0700319 (void) sde_debugfs_danger_init(sde_kms, debugfs_root);
320 (void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
321 (void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
Alan Kwongcd1c09f2016-11-04 20:37:30 -0400322
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700323 rc = sde_core_perf_debugfs_init(&sde_kms->perf, debugfs_root);
324 if (rc) {
325 SDE_ERROR("failed to init perf %d\n", rc);
326 return rc;
327 }
Alan Kwongf0fd8512016-10-24 21:39:26 -0400328
Clarence Ip4ce59322016-06-26 22:27:51 -0400329 return 0;
330}
331
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400332static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400333{
334 /* don't need to NULL check debugfs_root */
335 if (sde_kms) {
Alan Kwong748e833d2016-10-26 12:34:48 -0400336 sde_debugfs_vbif_destroy(sde_kms);
Alan Kwongf0fd8512016-10-24 21:39:26 -0400337 sde_debugfs_danger_destroy(sde_kms);
Lloyd Atkinson09e64bf2017-04-13 14:09:59 -0700338 sde_debugfs_core_irq_destroy(sde_kms);
Clarence Ip4ce59322016-06-26 22:27:51 -0400339 }
340}
Alan Kwongf0fd8512016-10-24 21:39:26 -0400341#else
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700342static int _sde_debugfs_init(struct sde_kms *sde_kms)
343{
344 return 0;
345}
346
347static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
348{
Alan Kwongf0fd8512016-10-24 21:39:26 -0400349}
350#endif
Clarence Ip4ce59322016-06-26 22:27:51 -0400351
Alan Kwongf5dd86c2016-08-09 18:08:17 -0400352static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
353{
354 return sde_crtc_vblank(crtc, true);
355}
356
357static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
358{
359 sde_crtc_vblank(crtc, false);
360}
361
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700362static void sde_kms_wait_for_frame_transfer_complete(struct msm_kms *kms,
Jeykumar Sankarandfaeec92017-06-06 15:21:51 -0700363 struct drm_crtc *crtc)
364{
365 struct drm_encoder *encoder;
366 struct drm_device *dev;
367 int ret;
368
369 if (!kms || !crtc || !crtc->state || !crtc->dev) {
370 SDE_ERROR("invalid params\n");
371 return;
372 }
373
374 if (!crtc->state->enable) {
375 SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
376 return;
377 }
378
379 if (!crtc->state->active) {
380 SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
381 return;
382 }
383
384 dev = crtc->dev;
385
386 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
387 if (encoder->crtc != crtc)
388 continue;
389 /*
390 * Video Mode - Wait for VSYNC
391 * Cmd Mode - Wait for PP_DONE. Will be no-op if transfer is
392 * complete
393 */
394 SDE_EVT32_VERBOSE(DRMID(crtc));
395 ret = sde_encoder_wait_for_event(encoder, MSM_ENC_TX_COMPLETE);
396 if (ret && ret != -EWOULDBLOCK) {
397 SDE_ERROR(
398 "[crtc: %d][enc: %d] wait for commit done returned %d\n",
399 crtc->base.id, encoder->base.id, ret);
400 break;
401 }
402 }
403}
404
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700405static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
406 struct drm_atomic_state *state)
407{
408 struct drm_crtc *crtc;
409 struct drm_crtc_state *old_crtc_state;
410
411 struct drm_plane *plane;
412 struct drm_plane_state *plane_state;
413 struct sde_kms *sde_kms = to_sde_kms(kms);
414 struct drm_device *dev = sde_kms->dev;
415 int i, ops = 0, ret = 0;
416 bool old_valid_fb = false;
417
418 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
419 if (!crtc->state || !crtc->state->active)
420 continue;
421 /*
422 * It is safe to assume only one active crtc,
423 * and compatible translation modes on the
424 * planes staged on this crtc.
425 * otherwise validation would have failed.
426 * For this CRTC,
427 */
428
429 /*
430 * 1. Check if old state on the CRTC has planes
431 * staged with valid fbs
432 */
433 for_each_plane_in_state(state, plane, plane_state, i) {
434 if (!plane_state->crtc)
435 continue;
436 if (plane_state->fb) {
437 old_valid_fb = true;
438 break;
439 }
440 }
441
442 /*
443 * 2.Get the operations needed to be performed before
444 * secure transition can be initiated.
445 */
446 ops = sde_crtc_get_secure_transition_ops(crtc,
Veera Sundaram Sankaranfd792402017-10-13 12:50:41 -0700447 old_crtc_state, old_valid_fb);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700448 if (ops < 0) {
449 SDE_ERROR("invalid secure operations %x\n", ops);
450 return ops;
451 }
452
453 if (!ops)
454 goto no_ops;
455
456 SDE_DEBUG("%d:secure operations(%x) started on state:%pK\n",
Veera Sundaram Sankaranfd792402017-10-13 12:50:41 -0700457 crtc->base.id, ops, crtc->state);
458 SDE_EVT32(DRMID(crtc), ops, crtc->state, old_valid_fb);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700459
460 /* 3. Perform operations needed for secure transition */
461 if (ops & SDE_KMS_OPS_WAIT_FOR_TX_DONE) {
462 SDE_DEBUG("wait_for_transfer_done\n");
463 sde_kms_wait_for_frame_transfer_complete(kms, crtc);
464 }
465 if (ops & SDE_KMS_OPS_CLEANUP_PLANE_FB) {
466 SDE_DEBUG("cleanup planes\n");
467 drm_atomic_helper_cleanup_planes(dev, state);
468 }
469 if (ops & SDE_KMS_OPS_CRTC_SECURE_STATE_CHANGE) {
470 SDE_DEBUG("secure ctrl\n");
471 sde_crtc_secure_ctrl(crtc, false);
472 }
473 if (ops & SDE_KMS_OPS_PREPARE_PLANE_FB) {
474 SDE_DEBUG("prepare planes %d",
475 crtc->state->plane_mask);
476 drm_atomic_crtc_for_each_plane(plane,
477 crtc) {
478 const struct drm_plane_helper_funcs *funcs;
479
480 plane_state = plane->state;
481 funcs = plane->helper_private;
482
483 SDE_DEBUG("psde:%d FB[%u]\n",
484 plane->base.id,
485 plane->fb->base.id);
486 if (!funcs)
487 continue;
488
489 if (funcs->prepare_fb(plane, plane_state)) {
490 ret = funcs->prepare_fb(plane,
491 plane_state);
492 if (ret)
493 return ret;
494 }
495 }
496 }
Veera Sundaram Sankaranfd792402017-10-13 12:50:41 -0700497 SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700498 SDE_DEBUG("secure operations completed\n");
499 }
500
501no_ops:
502 return 0;
503}
504
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -0700505static int _sde_kms_release_splash_buffer(unsigned int mem_addr,
506 unsigned int size)
507{
508 unsigned long pfn_start, pfn_end, pfn_idx;
509 int ret = 0;
510
511 if (!mem_addr || !size)
512 SDE_ERROR("invalid params\n");
513
514 pfn_start = mem_addr >> PAGE_SHIFT;
515 pfn_end = (mem_addr + size) >> PAGE_SHIFT;
516
517 ret = memblock_free(mem_addr, size);
518 if (ret) {
519 SDE_ERROR("continuous splash memory free failed:%d\n", ret);
520 return ret;
521 }
522 for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
523 free_reserved_page(pfn_to_page(pfn_idx));
524
525 return ret;
526
527}
528
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700529static void sde_kms_prepare_commit(struct msm_kms *kms,
530 struct drm_atomic_state *state)
531{
532 struct sde_kms *sde_kms;
533 struct msm_drm_private *priv;
534 struct drm_device *dev;
535 struct drm_encoder *encoder;
Alan Kwong12def592017-10-26 17:48:35 -0400536 struct drm_crtc *crtc;
537 struct drm_crtc_state *crtc_state;
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -0700538 int i, rc = 0;
539 struct drm_plane *plane;
540 bool commit_no_planes = true;
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700541
542 if (!kms)
543 return;
544 sde_kms = to_sde_kms(kms);
545 dev = sde_kms->dev;
546
547 if (!dev || !dev->dev_private)
548 return;
549 priv = dev->dev_private;
550
Alan Kwong1124f1f2017-11-10 18:14:39 -0500551 rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
552 true);
553 if (rc) {
554 SDE_ERROR("failed to enable power resource %d\n", rc);
555 SDE_EVT32(rc, SDE_EVTLOG_ERROR);
556 return;
557 }
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700558
Alan Kwong12def592017-10-26 17:48:35 -0400559 for_each_crtc_in_state(state, crtc, crtc_state, i) {
560 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
561 head) {
562 if (encoder->crtc != crtc)
563 continue;
564
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700565 sde_encoder_prepare_commit(encoder);
Alan Kwong12def592017-10-26 17:48:35 -0400566 }
567 }
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700568
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -0700569 if (sde_kms->splash_data.smmu_handoff_pending) {
570 list_for_each_entry(plane, &dev->mode_config.plane_list, head)
571 if (plane->state != NULL &&
572 plane->state->crtc != NULL)
573 commit_no_planes = false;
574 }
575
576 if (sde_kms->splash_data.smmu_handoff_pending && commit_no_planes) {
577
578 rc = sde_unstage_pipe_for_cont_splash(&sde_kms->splash_data,
579 sde_kms->mmio);
580 if (rc)
581 SDE_ERROR("pipe staging failed: %d\n", rc);
582
583 rc = _sde_kms_release_splash_buffer(
584 sde_kms->splash_data.splash_base,
585 sde_kms->splash_data.splash_size);
586 if (rc)
587 SDE_ERROR("release of splash memory failed %d\n", rc);
588
589 sde_kms->splash_data.smmu_handoff_pending = false;
590 }
591
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700592 /*
593 * NOTE: for secure use cases we want to apply the new HW
594 * configuration only after completing preparation for secure
595 * transitions prepare below if any transtions is required.
596 */
597 sde_kms_prepare_secure_transition(kms, state);
598}
599
600static void sde_kms_commit(struct msm_kms *kms,
601 struct drm_atomic_state *old_state)
602{
Alan Kwong1124f1f2017-11-10 18:14:39 -0500603 struct sde_kms *sde_kms;
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700604 struct drm_crtc *crtc;
605 struct drm_crtc_state *old_crtc_state;
606 int i;
607
Alan Kwong1124f1f2017-11-10 18:14:39 -0500608 if (!kms || !old_state)
609 return;
610 sde_kms = to_sde_kms(kms);
611
612 if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
613 SDE_ERROR("power resource is not enabled\n");
614 return;
615 }
616
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700617 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
618 if (crtc->state->active) {
619 SDE_EVT32(DRMID(crtc));
Clarence Ip569d5af2017-10-14 21:09:01 -0400620 sde_crtc_commit_kickoff(crtc, old_crtc_state);
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700621 }
622 }
623}
624
625static void sde_kms_complete_commit(struct msm_kms *kms,
626 struct drm_atomic_state *old_state)
627{
628 struct sde_kms *sde_kms;
629 struct msm_drm_private *priv;
630 struct drm_crtc *crtc;
631 struct drm_crtc_state *old_crtc_state;
Raviteja Tamatam68892de2017-06-20 04:47:19 +0530632 struct drm_connector *connector;
633 struct drm_connector_state *old_conn_state;
634 int i, rc = 0;
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700635
636 if (!kms || !old_state)
637 return;
638 sde_kms = to_sde_kms(kms);
639
640 if (!sde_kms->dev || !sde_kms->dev->dev_private)
641 return;
642 priv = sde_kms->dev->dev_private;
643
Alan Kwong1124f1f2017-11-10 18:14:39 -0500644 if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
645 SDE_ERROR("power resource is not enabled\n");
646 return;
647 }
648
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700649 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
650 sde_crtc_complete_commit(crtc, old_crtc_state);
651
Raviteja Tamatam68892de2017-06-20 04:47:19 +0530652 for_each_connector_in_state(old_state, connector, old_conn_state, i) {
653 struct sde_connector *c_conn;
654
655 c_conn = to_sde_connector(connector);
656 if (!c_conn->ops.post_kickoff)
657 continue;
658 rc = c_conn->ops.post_kickoff(connector);
659 if (rc) {
660 pr_err("Connector Post kickoff failed rc=%d\n",
661 rc);
662 }
663 }
664
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700665 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
666
667 SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
Chandan Uddaraju18f09402017-09-29 11:54:29 -0700668
669 if (sde_kms->cont_splash_en) {
670 SDE_DEBUG("Disabling cont_splash feature\n");
671 sde_kms->cont_splash_en = false;
672 sde_power_resource_enable(&priv->phandle,
673 sde_kms->core_client, false);
674 SDE_DEBUG("removing Vote for MDP Resources\n");
675 }
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700676}
677
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400678static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
Abhijit Kulkarni40e38162016-06-26 22:12:09 -0400679 struct drm_crtc *crtc)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400680{
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400681 struct drm_encoder *encoder;
Narendra Muppallaec11a0a2017-06-15 15:35:17 -0700682 struct drm_device *dev;
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400683 int ret;
684
Alan Kwongf34ef982016-09-29 20:53:53 -0400685 if (!kms || !crtc || !crtc->state) {
686 SDE_ERROR("invalid params\n");
687 return;
688 }
689
Narendra Muppallaec11a0a2017-06-15 15:35:17 -0700690 dev = crtc->dev;
691
Alan Kwongf34ef982016-09-29 20:53:53 -0400692 if (!crtc->state->enable) {
693 SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
694 return;
695 }
696
697 if (!crtc->state->active) {
698 SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
699 return;
700 }
701
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400702 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
703 if (encoder->crtc != crtc)
704 continue;
705 /*
Dhaval Patel6c666622017-03-21 23:02:59 -0700706 * Wait for post-flush if necessary to delay before
707 * plane_cleanup. For example, wait for vsync in case of video
708 * mode panels. This may be a no-op for command mode panels.
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400709 */
Dhaval Patel6c666622017-03-21 23:02:59 -0700710 SDE_EVT32_VERBOSE(DRMID(crtc));
Jeykumar Sankarandfaeec92017-06-06 15:21:51 -0700711 ret = sde_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400712 if (ret && ret != -EWOULDBLOCK) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -0400713 SDE_ERROR("wait for commit done returned %d\n", ret);
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400714 break;
715 }
716 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400717}
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400718
Clarence Ip24f80662016-06-13 19:05:32 -0400719static void sde_kms_prepare_fence(struct msm_kms *kms,
Clarence Ip0d0e96d2016-10-24 18:13:13 -0400720 struct drm_atomic_state *old_state)
Clarence Ip24f80662016-06-13 19:05:32 -0400721{
722 struct drm_crtc *crtc;
Clarence Ip0d0e96d2016-10-24 18:13:13 -0400723 struct drm_crtc_state *old_crtc_state;
724 int i, rc;
Clarence Ip24f80662016-06-13 19:05:32 -0400725
Clarence Ip0d0e96d2016-10-24 18:13:13 -0400726 if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
727 SDE_ERROR("invalid argument(s)\n");
728 return;
729 }
730
731retry:
732 /* attempt to acquire ww mutex for connection */
733 rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
734 old_state->acquire_ctx);
735
736 if (rc == -EDEADLK) {
737 drm_modeset_backoff(old_state->acquire_ctx);
738 goto retry;
739 }
740
741 /* old_state actually contains updated crtc pointers */
Veera Sundaram Sankaran675ff622017-06-21 21:44:46 -0700742 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
743 if (crtc->state->active)
744 sde_crtc_prepare_commit(crtc, old_crtc_state);
745 }
Clarence Ip24f80662016-06-13 19:05:32 -0400746}
747
Clarence Ip3649f8b2016-10-31 09:59:44 -0400748/**
749 * _sde_kms_get_displays - query for underlying display handles and cache them
750 * @sde_kms: Pointer to sde kms structure
751 * Returns: Zero on success
752 */
753static int _sde_kms_get_displays(struct sde_kms *sde_kms)
754{
755 int rc = -ENOMEM;
756
757 if (!sde_kms) {
758 SDE_ERROR("invalid sde kms\n");
759 return -EINVAL;
760 }
761
762 /* dsi */
763 sde_kms->dsi_displays = NULL;
764 sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
765 if (sde_kms->dsi_display_count) {
766 sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
767 sizeof(void *),
768 GFP_KERNEL);
769 if (!sde_kms->dsi_displays) {
770 SDE_ERROR("failed to allocate dsi displays\n");
771 goto exit_deinit_dsi;
772 }
773 sde_kms->dsi_display_count =
774 dsi_display_get_active_displays(sde_kms->dsi_displays,
775 sde_kms->dsi_display_count);
776 }
777
778 /* wb */
779 sde_kms->wb_displays = NULL;
780 sde_kms->wb_display_count = sde_wb_get_num_of_displays();
781 if (sde_kms->wb_display_count) {
782 sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
783 sizeof(void *),
784 GFP_KERNEL);
785 if (!sde_kms->wb_displays) {
786 SDE_ERROR("failed to allocate wb displays\n");
787 goto exit_deinit_wb;
788 }
789 sde_kms->wb_display_count =
790 wb_display_get_displays(sde_kms->wb_displays,
791 sde_kms->wb_display_count);
792 }
Padmanabhan Komanduru63758612017-05-23 01:47:18 -0700793
794 /* dp */
795 sde_kms->dp_displays = NULL;
796 sde_kms->dp_display_count = dp_display_get_num_of_displays();
797 if (sde_kms->dp_display_count) {
798 sde_kms->dp_displays = kcalloc(sde_kms->dp_display_count,
799 sizeof(void *), GFP_KERNEL);
800 if (!sde_kms->dp_displays) {
801 SDE_ERROR("failed to allocate dp displays\n");
802 goto exit_deinit_dp;
803 }
804 sde_kms->dp_display_count =
805 dp_display_get_displays(sde_kms->dp_displays,
806 sde_kms->dp_display_count);
807 }
Clarence Ip3649f8b2016-10-31 09:59:44 -0400808 return 0;
809
Padmanabhan Komanduru63758612017-05-23 01:47:18 -0700810exit_deinit_dp:
811 kfree(sde_kms->dp_displays);
812 sde_kms->dp_display_count = 0;
813 sde_kms->dp_displays = NULL;
814
Clarence Ip3649f8b2016-10-31 09:59:44 -0400815exit_deinit_wb:
816 kfree(sde_kms->wb_displays);
817 sde_kms->wb_display_count = 0;
818 sde_kms->wb_displays = NULL;
819
820exit_deinit_dsi:
821 kfree(sde_kms->dsi_displays);
822 sde_kms->dsi_display_count = 0;
823 sde_kms->dsi_displays = NULL;
824 return rc;
825}
826
827/**
828 * _sde_kms_release_displays - release cache of underlying display handles
829 * @sde_kms: Pointer to sde kms structure
830 */
831static void _sde_kms_release_displays(struct sde_kms *sde_kms)
832{
833 if (!sde_kms) {
834 SDE_ERROR("invalid sde kms\n");
835 return;
836 }
837
838 kfree(sde_kms->wb_displays);
839 sde_kms->wb_displays = NULL;
840 sde_kms->wb_display_count = 0;
841
842 kfree(sde_kms->dsi_displays);
843 sde_kms->dsi_displays = NULL;
844 sde_kms->dsi_display_count = 0;
845}
846
847/**
848 * _sde_kms_setup_displays - create encoders, bridges and connectors
849 * for underlying displays
850 * @dev: Pointer to drm device structure
851 * @priv: Pointer to private drm device data
852 * @sde_kms: Pointer to sde kms structure
853 * Returns: Zero on success
854 */
855static int _sde_kms_setup_displays(struct drm_device *dev,
856 struct msm_drm_private *priv,
857 struct sde_kms *sde_kms)
858{
859 static const struct sde_connector_ops dsi_ops = {
Alan Kwong769fba92017-11-13 16:50:36 -0500860 .set_info_blob = dsi_conn_set_info_blob,
Clarence Ip3649f8b2016-10-31 09:59:44 -0400861 .detect = dsi_conn_detect,
862 .get_modes = dsi_connector_get_modes,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -0700863 .put_modes = dsi_connector_put_modes,
Clarence Ip3649f8b2016-10-31 09:59:44 -0400864 .mode_valid = dsi_conn_mode_valid,
865 .get_info = dsi_display_get_info,
Lloyd Atkinson8c49c582016-11-18 14:23:54 -0500866 .set_backlight = dsi_display_set_backlight,
Lloyd Atkinson05d75512017-01-17 14:45:51 -0500867 .soft_reset = dsi_display_soft_reset,
Veera Sundaram Sankaranbb2bf9a2017-03-29 18:56:47 -0700868 .pre_kickoff = dsi_conn_pre_kickoff,
Jeykumar Sankaran2b098072017-03-16 17:25:59 -0700869 .clk_ctrl = dsi_display_clk_ctrl,
Clarence Ipd57b0622017-07-10 11:28:57 -0400870 .set_power = dsi_display_set_power,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -0700871 .get_mode_info = dsi_conn_get_mode_info,
872 .get_dst_format = dsi_display_get_dst_format,
Sandeep Panda98d6ab22017-09-05 08:03:16 +0530873 .post_kickoff = dsi_conn_post_kickoff,
874 .check_status = dsi_display_check_status,
Sandeep Panda11b20d82017-06-19 12:57:27 +0530875 .enable_event = dsi_conn_enable_event
Clarence Ip3649f8b2016-10-31 09:59:44 -0400876 };
877 static const struct sde_connector_ops wb_ops = {
878 .post_init = sde_wb_connector_post_init,
Alan Kwong769fba92017-11-13 16:50:36 -0500879 .set_info_blob = sde_wb_connector_set_info_blob,
Clarence Ip3649f8b2016-10-31 09:59:44 -0400880 .detect = sde_wb_connector_detect,
881 .get_modes = sde_wb_connector_get_modes,
882 .set_property = sde_wb_connector_set_property,
883 .get_info = sde_wb_get_info,
Jeykumar Sankaran2b098072017-03-16 17:25:59 -0700884 .soft_reset = NULL,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -0700885 .get_mode_info = sde_wb_get_mode_info,
Sandeep Panda98d6ab22017-09-05 08:03:16 +0530886 .get_dst_format = NULL,
887 .check_status = NULL,
Clarence Ip3649f8b2016-10-31 09:59:44 -0400888 };
Padmanabhan Komanduru63758612017-05-23 01:47:18 -0700889 static const struct sde_connector_ops dp_ops = {
890 .post_init = dp_connector_post_init,
891 .detect = dp_connector_detect,
892 .get_modes = dp_connector_get_modes,
893 .mode_valid = dp_connector_mode_valid,
894 .get_info = dp_connector_get_info,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -0700895 .get_mode_info = dp_connector_get_mode_info,
Ajay Singh Parmar315e5852017-11-23 21:47:32 -0800896 .post_open = dp_connector_post_open,
Sandeep Panda98d6ab22017-09-05 08:03:16 +0530897 .check_status = NULL,
Ajay Singh Parmar9c842d42017-09-21 13:02:05 -0700898 .pre_kickoff = dp_connector_pre_kickoff,
Padmanabhan Komanduru63758612017-05-23 01:47:18 -0700899 };
Clarence Ip3649f8b2016-10-31 09:59:44 -0400900 struct msm_display_info info;
901 struct drm_encoder *encoder;
902 void *display, *connector;
903 int i, max_encoders;
904 int rc = 0;
905
906 if (!dev || !priv || !sde_kms) {
907 SDE_ERROR("invalid argument(s)\n");
908 return -EINVAL;
909 }
910
Padmanabhan Komanduru63758612017-05-23 01:47:18 -0700911 max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count +
912 sde_kms->dp_display_count;
Clarence Ip3649f8b2016-10-31 09:59:44 -0400913 if (max_encoders > ARRAY_SIZE(priv->encoders)) {
914 max_encoders = ARRAY_SIZE(priv->encoders);
915 SDE_ERROR("capping number of displays to %d", max_encoders);
916 }
917
918 /* dsi */
919 for (i = 0; i < sde_kms->dsi_display_count &&
920 priv->num_encoders < max_encoders; ++i) {
921 display = sde_kms->dsi_displays[i];
922 encoder = NULL;
923
924 memset(&info, 0x0, sizeof(info));
925 rc = dsi_display_get_info(&info, display);
926 if (rc) {
927 SDE_ERROR("dsi get_info %d failed\n", i);
928 continue;
929 }
930
931 encoder = sde_encoder_init(dev, &info);
932 if (IS_ERR_OR_NULL(encoder)) {
933 SDE_ERROR("encoder init failed for dsi %d\n", i);
934 continue;
935 }
936
937 rc = dsi_display_drm_bridge_init(display, encoder);
938 if (rc) {
939 SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
940 sde_encoder_destroy(encoder);
941 continue;
942 }
943
944 connector = sde_connector_init(dev,
945 encoder,
946 0,
947 display,
948 &dsi_ops,
949 DRM_CONNECTOR_POLL_HPD,
950 DRM_MODE_CONNECTOR_DSI);
951 if (connector) {
952 priv->encoders[priv->num_encoders++] = encoder;
953 } else {
954 SDE_ERROR("dsi %d connector init failed\n", i);
955 dsi_display_drm_bridge_deinit(display);
956 sde_encoder_destroy(encoder);
957 }
958 }
959
960 /* wb */
961 for (i = 0; i < sde_kms->wb_display_count &&
962 priv->num_encoders < max_encoders; ++i) {
963 display = sde_kms->wb_displays[i];
964 encoder = NULL;
965
966 memset(&info, 0x0, sizeof(info));
967 rc = sde_wb_get_info(&info, display);
968 if (rc) {
969 SDE_ERROR("wb get_info %d failed\n", i);
970 continue;
971 }
972
973 encoder = sde_encoder_init(dev, &info);
974 if (IS_ERR_OR_NULL(encoder)) {
975 SDE_ERROR("encoder init failed for wb %d\n", i);
976 continue;
977 }
978
979 rc = sde_wb_drm_init(display, encoder);
980 if (rc) {
981 SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
982 sde_encoder_destroy(encoder);
983 continue;
984 }
985
986 connector = sde_connector_init(dev,
987 encoder,
988 0,
989 display,
990 &wb_ops,
991 DRM_CONNECTOR_POLL_HPD,
992 DRM_MODE_CONNECTOR_VIRTUAL);
993 if (connector) {
994 priv->encoders[priv->num_encoders++] = encoder;
995 } else {
996 SDE_ERROR("wb %d connector init failed\n", i);
997 sde_wb_drm_deinit(display);
998 sde_encoder_destroy(encoder);
999 }
1000 }
Padmanabhan Komanduru63758612017-05-23 01:47:18 -07001001 /* dp */
1002 for (i = 0; i < sde_kms->dp_display_count &&
1003 priv->num_encoders < max_encoders; ++i) {
1004 display = sde_kms->dp_displays[i];
1005 encoder = NULL;
1006
1007 memset(&info, 0x0, sizeof(info));
1008 rc = dp_connector_get_info(&info, display);
1009 if (rc) {
1010 SDE_ERROR("dp get_info %d failed\n", i);
1011 continue;
1012 }
1013
1014 encoder = sde_encoder_init(dev, &info);
1015 if (IS_ERR_OR_NULL(encoder)) {
1016 SDE_ERROR("dp encoder init failed %d\n", i);
1017 continue;
1018 }
1019
1020 rc = dp_drm_bridge_init(display, encoder);
1021 if (rc) {
1022 SDE_ERROR("dp bridge %d init failed, %d\n", i, rc);
1023 sde_encoder_destroy(encoder);
1024 continue;
1025 }
1026
1027 connector = sde_connector_init(dev,
1028 encoder,
1029 NULL,
1030 display,
1031 &dp_ops,
1032 DRM_CONNECTOR_POLL_HPD,
1033 DRM_MODE_CONNECTOR_DisplayPort);
1034 if (connector) {
1035 priv->encoders[priv->num_encoders++] = encoder;
1036 } else {
1037 SDE_ERROR("dp %d connector init failed\n", i);
1038 dp_drm_bridge_deinit(display);
1039 sde_encoder_destroy(encoder);
1040 }
1041 }
Clarence Ip3649f8b2016-10-31 09:59:44 -04001042
1043 return 0;
1044}
1045
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001046static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
1047{
1048 struct msm_drm_private *priv;
1049 int i;
1050
1051 if (!sde_kms) {
1052 SDE_ERROR("invalid sde_kms\n");
1053 return;
1054 } else if (!sde_kms->dev) {
1055 SDE_ERROR("invalid dev\n");
1056 return;
1057 } else if (!sde_kms->dev->dev_private) {
1058 SDE_ERROR("invalid dev_private\n");
1059 return;
1060 }
1061 priv = sde_kms->dev->dev_private;
1062
1063 for (i = 0; i < priv->num_crtcs; i++)
1064 priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
Clarence Ip17162b52016-11-24 17:06:29 -05001065 priv->num_crtcs = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001066
1067 for (i = 0; i < priv->num_planes; i++)
1068 priv->planes[i]->funcs->destroy(priv->planes[i]);
Clarence Ip17162b52016-11-24 17:06:29 -05001069 priv->num_planes = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001070
1071 for (i = 0; i < priv->num_connectors; i++)
1072 priv->connectors[i]->funcs->destroy(priv->connectors[i]);
Clarence Ip17162b52016-11-24 17:06:29 -05001073 priv->num_connectors = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001074
1075 for (i = 0; i < priv->num_encoders; i++)
1076 priv->encoders[i]->funcs->destroy(priv->encoders[i]);
Clarence Ip17162b52016-11-24 17:06:29 -05001077 priv->num_encoders = 0;
1078
1079 _sde_kms_release_displays(sde_kms);
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001080}
1081
1082static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001083{
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001084 struct drm_device *dev;
Dhaval Patel44f12472016-08-29 12:19:47 -07001085 struct drm_plane *primary_planes[MAX_PLANES], *plane;
1086 struct drm_crtc *crtc;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001087
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001088 struct msm_drm_private *priv;
1089 struct sde_mdss_cfg *catalog;
Dhaval Patel44f12472016-08-29 12:19:47 -07001090
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001091 int primary_planes_idx = 0, i, ret;
1092 int max_crtc_count;
1093
1094 u32 sspp_id[MAX_PLANES];
1095 u32 master_plane_id[MAX_PLANES];
1096 u32 num_virt_planes = 0;
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001097
Clarence Ipdd395242016-09-09 10:47:17 -04001098 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001099 SDE_ERROR("invalid sde_kms\n");
1100 return -EINVAL;
1101 }
1102
1103 dev = sde_kms->dev;
1104 priv = dev->dev_private;
1105 catalog = sde_kms->catalog;
1106
Abhinav Kumar2316fb92017-01-30 23:07:08 -08001107 ret = sde_core_irq_domain_add(sde_kms);
1108 if (ret)
1109 goto fail_irq;
Clarence Ip3649f8b2016-10-31 09:59:44 -04001110 /*
1111 * Query for underlying display drivers, and create connectors,
1112 * bridges and encoders for them.
1113 */
1114 if (!_sde_kms_get_displays(sde_kms))
1115 (void)_sde_kms_setup_displays(dev, priv, sde_kms);
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001116
1117 max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001118
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001119 /* Create the planes */
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001120 for (i = 0; i < catalog->sspp_count; i++) {
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001121 bool primary = true;
1122
1123 if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001124 || primary_planes_idx >= max_crtc_count)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001125 primary = false;
1126
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001127 plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001128 (1UL << max_crtc_count) - 1, 0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001129 if (IS_ERR(plane)) {
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001130 SDE_ERROR("sde_plane_init failed\n");
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001131 ret = PTR_ERR(plane);
1132 goto fail;
1133 }
1134 priv->planes[priv->num_planes++] = plane;
1135
1136 if (primary)
1137 primary_planes[primary_planes_idx++] = plane;
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001138
1139 if (sde_hw_sspp_multirect_enabled(&catalog->sspp[i]) &&
1140 sde_is_custom_client()) {
1141 int priority =
1142 catalog->sspp[i].sblk->smart_dma_priority;
1143 sspp_id[priority - 1] = catalog->sspp[i].id;
1144 master_plane_id[priority - 1] = plane->base.id;
1145 num_virt_planes++;
1146 }
1147 }
1148
1149 /* Initialize smart DMA virtual planes */
1150 for (i = 0; i < num_virt_planes; i++) {
1151 plane = sde_plane_init(dev, sspp_id[i], false,
1152 (1UL << max_crtc_count) - 1, master_plane_id[i]);
1153 if (IS_ERR(plane)) {
1154 SDE_ERROR("sde_plane for virtual SSPP init failed\n");
1155 ret = PTR_ERR(plane);
1156 goto fail;
1157 }
1158 priv->planes[priv->num_planes++] = plane;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001159 }
1160
Dhaval Patel44f12472016-08-29 12:19:47 -07001161 max_crtc_count = min(max_crtc_count, primary_planes_idx);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001162
Dhaval Patel44f12472016-08-29 12:19:47 -07001163 /* Create one CRTC per encoder */
1164 for (i = 0; i < max_crtc_count; i++) {
Lloyd Atkinsonac933642016-09-14 11:52:00 -04001165 crtc = sde_crtc_init(dev, primary_planes[i]);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001166 if (IS_ERR(crtc)) {
1167 ret = PTR_ERR(crtc);
1168 goto fail;
1169 }
1170 priv->crtcs[priv->num_crtcs++] = crtc;
1171 }
1172
Clarence Ipdd395242016-09-09 10:47:17 -04001173 if (sde_is_custom_client()) {
1174 /* All CRTCs are compatible with all planes */
1175 for (i = 0; i < priv->num_planes; i++)
1176 priv->planes[i]->possible_crtcs =
1177 (1 << priv->num_crtcs) - 1;
1178 }
1179
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001180 /* All CRTCs are compatible with all encoders */
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001181 for (i = 0; i < priv->num_encoders; i++)
1182 priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
1183
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001184 return 0;
1185fail:
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001186 _sde_kms_drm_obj_destroy(sde_kms);
Abhinav Kumar2316fb92017-01-30 23:07:08 -08001187fail_irq:
1188 sde_core_irq_domain_fini(sde_kms);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001189 return ret;
1190}
1191
Alan Kwong4dd64c82017-02-04 18:41:51 -08001192/**
Dhaval Patel2a3c37a2017-10-25 12:30:36 -07001193 * sde_kms_timeline_status - provides current timeline status
1194 * This API should be called without mode config lock.
1195 * @dev: Pointer to drm device
1196 */
1197void sde_kms_timeline_status(struct drm_device *dev)
1198{
1199 struct drm_crtc *crtc;
1200 struct drm_connector *conn;
1201
1202 if (!dev) {
1203 SDE_ERROR("invalid drm device node\n");
1204 return;
1205 }
1206
1207 drm_for_each_crtc(crtc, dev)
1208 sde_crtc_timeline_status(crtc);
1209
1210 mutex_lock(&dev->mode_config.mutex);
1211 drm_for_each_connector(conn, dev)
1212 sde_conn_timeline_status(conn);
1213 mutex_unlock(&dev->mode_config.mutex);
1214}
1215
1216/**
Alan Kwong4dd64c82017-02-04 18:41:51 -08001217 * struct sde_kms_fbo_fb - framebuffer creation list
1218 * @list: list of framebuffer attached to framebuffer object
1219 * @fb: Pointer to framebuffer attached to framebuffer object
1220 */
1221struct sde_kms_fbo_fb {
1222 struct list_head list;
1223 struct drm_framebuffer *fb;
1224};
1225
1226struct drm_framebuffer *sde_kms_fbo_create_fb(struct drm_device *dev,
1227 struct sde_kms_fbo *fbo)
1228{
1229 struct drm_framebuffer *fb = NULL;
1230 struct sde_kms_fbo_fb *fbo_fb;
1231 struct drm_mode_fb_cmd2 mode_cmd = {0};
1232 u32 base_offset = 0;
1233 int i, ret;
1234
1235 if (!dev) {
1236 SDE_ERROR("invalid drm device node\n");
1237 return NULL;
1238 }
1239
1240 fbo_fb = kzalloc(sizeof(struct sde_kms_fbo_fb), GFP_KERNEL);
1241 if (!fbo_fb)
1242 return NULL;
1243
1244 mode_cmd.pixel_format = fbo->pixel_format;
1245 mode_cmd.width = fbo->width;
1246 mode_cmd.height = fbo->height;
1247 mode_cmd.flags = fbo->flags;
1248
1249 for (i = 0; i < fbo->nplane; i++) {
1250 mode_cmd.offsets[i] = base_offset;
1251 mode_cmd.pitches[i] = fbo->layout.plane_pitch[i];
1252 mode_cmd.modifier[i] = fbo->modifier[i];
1253 base_offset += fbo->layout.plane_size[i];
1254 SDE_DEBUG("offset[%d]:%x\n", i, mode_cmd.offsets[i]);
1255 }
1256
1257 fb = msm_framebuffer_init(dev, &mode_cmd, fbo->bo);
1258 if (IS_ERR(fb)) {
1259 ret = PTR_ERR(fb);
1260 fb = NULL;
1261 SDE_ERROR("failed to allocate fb %d\n", ret);
1262 goto fail;
1263 }
1264
1265 /* need to take one reference for gem object */
1266 for (i = 0; i < fbo->nplane; i++)
1267 drm_gem_object_reference(fbo->bo[i]);
1268
1269 SDE_DEBUG("register private fb:%d\n", fb->base.id);
1270
1271 INIT_LIST_HEAD(&fbo_fb->list);
1272 fbo_fb->fb = fb;
1273 drm_framebuffer_reference(fbo_fb->fb);
1274 list_add_tail(&fbo_fb->list, &fbo->fb_list);
1275
1276 return fb;
1277
1278fail:
1279 kfree(fbo_fb);
1280 return NULL;
1281}
1282
1283static void sde_kms_fbo_destroy(struct sde_kms_fbo *fbo)
1284{
1285 struct msm_drm_private *priv;
1286 struct sde_kms *sde_kms;
1287 struct drm_device *dev;
1288 struct sde_kms_fbo_fb *curr, *next;
1289 int i;
1290
1291 if (!fbo) {
1292 SDE_ERROR("invalid drm device node\n");
1293 return;
1294 }
1295 dev = fbo->dev;
1296
1297 if (!dev || !dev->dev_private) {
1298 SDE_ERROR("invalid drm device node\n");
1299 return;
1300 }
1301 priv = dev->dev_private;
1302
1303 if (!priv->kms) {
1304 SDE_ERROR("invalid kms handle\n");
1305 return;
1306 }
1307 sde_kms = to_sde_kms(priv->kms);
1308
1309 SDE_DEBUG("%dx%d@%c%c%c%c/%llx/%x\n", fbo->width, fbo->height,
1310 fbo->pixel_format >> 0, fbo->pixel_format >> 8,
1311 fbo->pixel_format >> 16, fbo->pixel_format >> 24,
1312 fbo->modifier[0], fbo->flags);
1313
1314 list_for_each_entry_safe(curr, next, &fbo->fb_list, list) {
1315 SDE_DEBUG("unregister private fb:%d\n", curr->fb->base.id);
1316 drm_framebuffer_unregister_private(curr->fb);
1317 drm_framebuffer_unreference(curr->fb);
1318 list_del(&curr->list);
1319 kfree(curr);
1320 }
1321
1322 for (i = 0; i < fbo->layout.num_planes; i++) {
1323 if (fbo->bo[i]) {
1324 mutex_lock(&dev->struct_mutex);
1325 drm_gem_object_unreference(fbo->bo[i]);
1326 mutex_unlock(&dev->struct_mutex);
1327 fbo->bo[i] = NULL;
1328 }
1329 }
1330
1331 if (fbo->dma_buf) {
1332 dma_buf_put(fbo->dma_buf);
1333 fbo->dma_buf = NULL;
1334 }
1335
Alan Kwong54125bb2017-02-26 16:01:36 -08001336 if (sde_kms->iclient && fbo->ihandle) {
1337 ion_free(sde_kms->iclient, fbo->ihandle);
1338 fbo->ihandle = NULL;
Alan Kwong4dd64c82017-02-04 18:41:51 -08001339 }
1340}
1341
Clarence Ipd02440b2017-05-21 18:10:01 -04001342static void sde_kms_set_gem_flags(struct msm_gem_object *msm_obj,
1343 uint32_t flags)
1344{
1345 if (msm_obj)
1346 msm_obj->flags |= flags;
1347}
1348
Alan Kwong4dd64c82017-02-04 18:41:51 -08001349struct sde_kms_fbo *sde_kms_fbo_alloc(struct drm_device *dev, u32 width,
1350 u32 height, u32 pixel_format, u64 modifier[4], u32 flags)
1351{
1352 struct msm_drm_private *priv;
1353 struct sde_kms *sde_kms;
1354 struct sde_kms_fbo *fbo;
1355 int i, ret;
1356
1357 if (!dev || !dev->dev_private) {
1358 SDE_ERROR("invalid drm device node\n");
1359 return NULL;
1360 }
1361 priv = dev->dev_private;
1362
1363 if (!priv->kms) {
1364 SDE_ERROR("invalid kms handle\n");
1365 return NULL;
1366 }
1367 sde_kms = to_sde_kms(priv->kms);
1368
1369 SDE_DEBUG("%dx%d@%c%c%c%c/%llx/%x\n", width, height,
1370 pixel_format >> 0, pixel_format >> 8,
1371 pixel_format >> 16, pixel_format >> 24,
1372 modifier[0], flags);
1373
1374 fbo = kzalloc(sizeof(struct sde_kms_fbo), GFP_KERNEL);
1375 if (!fbo)
1376 return NULL;
1377
1378 atomic_set(&fbo->refcount, 0);
1379 INIT_LIST_HEAD(&fbo->fb_list);
1380 fbo->dev = dev;
1381 fbo->width = width;
1382 fbo->height = height;
1383 fbo->pixel_format = pixel_format;
1384 fbo->flags = flags;
1385 for (i = 0; i < ARRAY_SIZE(fbo->modifier); i++)
1386 fbo->modifier[i] = modifier[i];
1387 fbo->nplane = drm_format_num_planes(fbo->pixel_format);
1388 fbo->fmt = sde_get_sde_format_ext(fbo->pixel_format, fbo->modifier,
1389 fbo->nplane);
1390 if (!fbo->fmt) {
1391 ret = -EINVAL;
1392 SDE_ERROR("failed to find pixel format\n");
1393 goto done;
1394 }
1395
1396 ret = sde_format_get_plane_sizes(fbo->fmt, fbo->width, fbo->height,
Narendra Muppalla58a64e22017-07-24 10:54:47 -07001397 &fbo->layout, fbo->layout.plane_pitch);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001398 if (ret) {
1399 SDE_ERROR("failed to get plane sizes\n");
1400 goto done;
1401 }
1402
1403 /* allocate backing buffer object */
Alan Kwong54125bb2017-02-26 16:01:36 -08001404 if (sde_kms->iclient) {
1405 u32 heap_id = fbo->flags & DRM_MODE_FB_SECURE ?
Alan Kwong3f2a5152017-08-25 16:19:43 -04001406 ION_HEAP(ION_SECURE_HEAP_ID) :
Alan Kwong54125bb2017-02-26 16:01:36 -08001407 ION_HEAP(ION_SYSTEM_HEAP_ID);
Alan Kwong3f2a5152017-08-25 16:19:43 -04001408 u32 iflags = fbo->flags & DRM_MODE_FB_SECURE ?
1409 (ION_FLAG_SECURE | ION_FLAG_CP_PIXEL) : 0;
Alan Kwong54125bb2017-02-26 16:01:36 -08001410
1411 fbo->ihandle = ion_alloc(sde_kms->iclient,
Alan Kwong3f2a5152017-08-25 16:19:43 -04001412 fbo->layout.total_size, SZ_4K, heap_id, iflags);
Alan Kwong54125bb2017-02-26 16:01:36 -08001413 if (IS_ERR_OR_NULL(fbo->ihandle)) {
1414 SDE_ERROR("failed to alloc ion memory\n");
1415 ret = PTR_ERR(fbo->ihandle);
1416 fbo->ihandle = NULL;
1417 goto done;
1418 }
1419
1420 fbo->dma_buf = ion_share_dma_buf(sde_kms->iclient,
1421 fbo->ihandle);
1422 if (IS_ERR(fbo->dma_buf)) {
1423 SDE_ERROR("failed to share ion memory\n");
1424 ret = -ENOMEM;
1425 fbo->dma_buf = NULL;
1426 goto done;
1427 }
1428
1429 fbo->bo[0] = dev->driver->gem_prime_import(dev,
1430 fbo->dma_buf);
1431 if (IS_ERR(fbo->bo[0])) {
1432 SDE_ERROR("failed to import ion memory\n");
1433 ret = PTR_ERR(fbo->bo[0]);
1434 fbo->bo[0] = NULL;
1435 goto done;
1436 }
Clarence Ipd02440b2017-05-21 18:10:01 -04001437
1438 /* insert extra bo flags */
1439 sde_kms_set_gem_flags(to_msm_bo(fbo->bo[0]), MSM_BO_KEEPATTRS);
Alan Kwong54125bb2017-02-26 16:01:36 -08001440 } else {
1441 mutex_lock(&dev->struct_mutex);
1442 fbo->bo[0] = msm_gem_new(dev, fbo->layout.total_size,
Clarence Ipd02440b2017-05-21 18:10:01 -04001443 MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_KEEPATTRS);
Alan Kwong54125bb2017-02-26 16:01:36 -08001444 if (IS_ERR(fbo->bo[0])) {
1445 mutex_unlock(&dev->struct_mutex);
1446 SDE_ERROR("failed to new gem buffer\n");
1447 ret = PTR_ERR(fbo->bo[0]);
1448 fbo->bo[0] = NULL;
1449 goto done;
1450 }
Alan Kwong4dd64c82017-02-04 18:41:51 -08001451 mutex_unlock(&dev->struct_mutex);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001452 }
1453
Alan Kwong54125bb2017-02-26 16:01:36 -08001454 mutex_lock(&dev->struct_mutex);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001455 for (i = 1; i < fbo->layout.num_planes; i++) {
1456 fbo->bo[i] = fbo->bo[0];
1457 drm_gem_object_reference(fbo->bo[i]);
1458 }
1459 mutex_unlock(&dev->struct_mutex);
1460
1461done:
1462 if (ret) {
1463 sde_kms_fbo_destroy(fbo);
1464 kfree(fbo);
1465 fbo = NULL;
1466 } else {
1467 sde_kms_fbo_reference(fbo);
1468 }
1469
1470 return fbo;
1471}
1472
1473int sde_kms_fbo_reference(struct sde_kms_fbo *fbo)
1474{
1475 if (!fbo) {
1476 SDE_ERROR("invalid parameters\n");
1477 return -EINVAL;
1478 }
1479
1480 SDE_DEBUG("%pS refcount:%d\n", __builtin_return_address(0),
1481 atomic_read(&fbo->refcount));
1482
1483 atomic_inc(&fbo->refcount);
1484
1485 return 0;
1486}
1487
1488void sde_kms_fbo_unreference(struct sde_kms_fbo *fbo)
1489{
1490 if (!fbo) {
1491 SDE_ERROR("invalid parameters\n");
1492 return;
1493 }
1494
1495 SDE_DEBUG("%pS refcount:%d\n", __builtin_return_address(0),
1496 atomic_read(&fbo->refcount));
1497
1498 if (!atomic_read(&fbo->refcount)) {
1499 SDE_ERROR("invalid refcount\n");
1500 return;
1501 } else if (atomic_dec_return(&fbo->refcount) == 0) {
1502 sde_kms_fbo_destroy(fbo);
1503 }
1504}
1505
Alan Kwong5a3ac752016-10-16 01:02:35 -04001506static int sde_kms_postinit(struct msm_kms *kms)
1507{
1508 struct sde_kms *sde_kms = to_sde_kms(kms);
1509 struct drm_device *dev;
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07001510 int rc;
Alan Kwong5a3ac752016-10-16 01:02:35 -04001511
1512 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
1513 SDE_ERROR("invalid sde_kms\n");
1514 return -EINVAL;
1515 }
1516
1517 dev = sde_kms->dev;
1518
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07001519 rc = _sde_debugfs_init(sde_kms);
1520 if (rc)
1521 SDE_ERROR("sde_debugfs init failed: %d\n", rc);
1522
1523 return rc;
Alan Kwong5a3ac752016-10-16 01:02:35 -04001524}
1525
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001526static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001527 struct drm_encoder *encoder)
1528{
1529 return rate;
1530}
1531
Clarence Ip17162b52016-11-24 17:06:29 -05001532static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
1533 struct platform_device *pdev)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001534{
Clarence Ip17162b52016-11-24 17:06:29 -05001535 struct drm_device *dev;
1536 struct msm_drm_private *priv;
Alan Kwong5d324e42016-07-28 22:56:18 -04001537 int i;
1538
Clarence Ip17162b52016-11-24 17:06:29 -05001539 if (!sde_kms || !pdev)
1540 return;
1541
1542 dev = sde_kms->dev;
1543 if (!dev)
1544 return;
1545
1546 priv = dev->dev_private;
1547 if (!priv)
1548 return;
1549
Alan Kwong23afc2d92017-09-15 10:59:06 -04001550 if (sde_kms->genpd_init) {
1551 sde_kms->genpd_init = false;
1552 pm_genpd_remove(&sde_kms->genpd);
1553 of_genpd_del_provider(pdev->dev.of_node);
1554 }
1555
Clarence Ip17162b52016-11-24 17:06:29 -05001556 if (sde_kms->hw_intr)
1557 sde_hw_intr_destroy(sde_kms->hw_intr);
1558 sde_kms->hw_intr = NULL;
1559
Clarence Ip7f0de632017-05-31 14:59:14 -04001560 if (sde_kms->power_event)
1561 sde_power_handle_unregister_event(
1562 &priv->phandle, sde_kms->power_event);
1563
Clarence Ip17162b52016-11-24 17:06:29 -05001564 _sde_kms_release_displays(sde_kms);
1565
1566 /* safe to call these more than once during shutdown */
1567 _sde_debugfs_destroy(sde_kms);
1568 _sde_kms_mmu_destroy(sde_kms);
1569
Alan Kwong54125bb2017-02-26 16:01:36 -08001570 if (sde_kms->iclient) {
1571 ion_client_destroy(sde_kms->iclient);
1572 sde_kms->iclient = NULL;
1573 }
1574
Lloyd Atkinson79f08802017-01-09 17:37:18 -05001575 if (sde_kms->catalog) {
1576 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
1577 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
Alan Kwong5d324e42016-07-28 22:56:18 -04001578
Lloyd Atkinson79f08802017-01-09 17:37:18 -05001579 if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
1580 sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
1581 }
Alan Kwong5d324e42016-07-28 22:56:18 -04001582 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001583
Clarence Ip17162b52016-11-24 17:06:29 -05001584 if (sde_kms->rm_init)
1585 sde_rm_destroy(&sde_kms->rm);
1586 sde_kms->rm_init = false;
1587
1588 if (sde_kms->catalog)
1589 sde_hw_catalog_deinit(sde_kms->catalog);
1590 sde_kms->catalog = NULL;
1591
1592 if (sde_kms->core_client)
1593 sde_power_client_destroy(&priv->phandle, sde_kms->core_client);
1594 sde_kms->core_client = NULL;
1595
1596 if (sde_kms->vbif[VBIF_NRT])
1597 msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
1598 sde_kms->vbif[VBIF_NRT] = NULL;
1599
1600 if (sde_kms->vbif[VBIF_RT])
1601 msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
1602 sde_kms->vbif[VBIF_RT] = NULL;
1603
1604 if (sde_kms->mmio)
1605 msm_iounmap(pdev, sde_kms->mmio);
1606 sde_kms->mmio = NULL;
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -07001607
1608 sde_reg_dma_deinit();
Clarence Ip17162b52016-11-24 17:06:29 -05001609}
1610
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07001611int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only)
1612{
1613 int i;
1614
1615 if (!sde_kms)
1616 return -EINVAL;
1617
1618 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
1619 struct msm_mmu *mmu;
1620 struct msm_gem_address_space *aspace = sde_kms->aspace[i];
1621
1622 if (!aspace)
1623 continue;
1624
1625 mmu = sde_kms->aspace[i]->mmu;
1626
1627 if (secure_only &&
1628 !aspace->mmu->funcs->is_domain_secure(mmu))
1629 continue;
1630
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07001631 /* cleanup aspace before detaching */
1632 msm_gem_aspace_domain_attach_detach_update(aspace, true);
1633
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07001634 SDE_DEBUG("Detaching domain:%d\n", i);
1635 aspace->mmu->funcs->detach(mmu, (const char **)iommu_ports,
1636 ARRAY_SIZE(iommu_ports));
1637
1638 aspace->domain_attached = false;
1639 }
1640
1641 return 0;
1642}
1643
1644int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only)
1645{
1646 int i;
1647
1648 if (!sde_kms)
1649 return -EINVAL;
1650
1651 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
1652 struct msm_mmu *mmu;
1653 struct msm_gem_address_space *aspace = sde_kms->aspace[i];
1654
1655 if (!aspace)
1656 continue;
1657
1658 mmu = sde_kms->aspace[i]->mmu;
1659
1660 if (secure_only &&
1661 !aspace->mmu->funcs->is_domain_secure(mmu))
1662 continue;
1663
1664 SDE_DEBUG("Attaching domain:%d\n", i);
1665 aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
1666 ARRAY_SIZE(iommu_ports));
1667
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07001668 msm_gem_aspace_domain_attach_detach_update(aspace, false);
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07001669 aspace->domain_attached = true;
1670 }
1671
1672 return 0;
1673}
1674
Clarence Ip17162b52016-11-24 17:06:29 -05001675static void sde_kms_destroy(struct msm_kms *kms)
1676{
1677 struct sde_kms *sde_kms;
1678 struct drm_device *dev;
1679
1680 if (!kms) {
1681 SDE_ERROR("invalid kms\n");
1682 return;
1683 }
1684
1685 sde_kms = to_sde_kms(kms);
1686 dev = sde_kms->dev;
1687 if (!dev) {
1688 SDE_ERROR("invalid device\n");
1689 return;
1690 }
1691
1692 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001693 kfree(sde_kms);
1694}
1695
Veera Sundaram Sankarane2bf6862017-08-01 13:55:12 -07001696static void _sde_kms_plane_force_remove(struct drm_plane *plane,
1697 struct drm_atomic_state *state)
1698{
1699 struct drm_plane_state *plane_state;
1700 int ret = 0;
1701
1702 if (!plane->crtc)
1703 return;
1704
1705 plane_state = drm_atomic_get_plane_state(state, plane);
1706 if (IS_ERR(plane_state)) {
1707 ret = PTR_ERR(plane_state);
1708 SDE_ERROR("error %d getting plane %d state\n",
1709 ret, plane->base.id);
1710 return;
1711 }
1712
1713 plane->old_fb = plane->fb;
1714
1715 SDE_DEBUG("disabling plane %d\n", plane->base.id);
1716
1717 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
1718 if (ret != 0)
1719 SDE_ERROR("error %d disabling plane %d\n", ret,
1720 plane->base.id);
1721}
1722
1723static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file,
1724 struct drm_atomic_state *state)
1725{
1726 struct drm_device *dev = sde_kms->dev;
1727 struct drm_framebuffer *fb, *tfb;
1728 struct list_head fbs;
1729 struct drm_plane *plane;
1730 int ret = 0;
1731 u32 plane_mask = 0;
1732
1733 INIT_LIST_HEAD(&fbs);
1734
1735 list_for_each_entry_safe(fb, tfb, &file->fbs, filp_head) {
1736 if (drm_framebuffer_read_refcount(fb) > 1) {
1737 list_move_tail(&fb->filp_head, &fbs);
1738
1739 drm_for_each_plane(plane, dev) {
1740 if (plane->fb == fb) {
1741 plane_mask |=
1742 1 << drm_plane_index(plane);
1743 _sde_kms_plane_force_remove(
1744 plane, state);
1745 }
1746 }
1747 } else {
1748 list_del_init(&fb->filp_head);
1749 drm_framebuffer_unreference(fb);
1750 }
1751 }
1752
1753 if (list_empty(&fbs)) {
1754 SDE_DEBUG("skip commit as no fb(s)\n");
1755 drm_atomic_state_free(state);
1756 return 0;
1757 }
1758
1759 SDE_DEBUG("committing after removing all the pipes\n");
1760 ret = drm_atomic_commit(state);
1761
1762 if (ret) {
1763 /*
1764 * move the fbs back to original list, so it would be
1765 * handled during drm_release
1766 */
1767 list_for_each_entry_safe(fb, tfb, &fbs, filp_head)
1768 list_move_tail(&fb->filp_head, &file->fbs);
1769
1770 SDE_ERROR("atomic commit failed in preclose, ret:%d\n", ret);
1771 goto end;
1772 }
1773
1774 while (!list_empty(&fbs)) {
1775 fb = list_first_entry(&fbs, typeof(*fb), filp_head);
1776
1777 list_del_init(&fb->filp_head);
1778 drm_framebuffer_unreference(fb);
1779 }
1780
1781end:
1782 drm_atomic_clean_old_fb(dev, plane_mask, ret);
1783
1784 return ret;
1785}
1786
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04001787static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
1788{
1789 struct sde_kms *sde_kms = to_sde_kms(kms);
1790 struct drm_device *dev = sde_kms->dev;
1791 struct msm_drm_private *priv = dev->dev_private;
1792 unsigned int i;
Veera Sundaram Sankarane2bf6862017-08-01 13:55:12 -07001793 struct drm_atomic_state *state = NULL;
1794 int ret = 0;
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04001795
1796 for (i = 0; i < priv->num_crtcs; i++)
1797 sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
Veera Sundaram Sankarane2bf6862017-08-01 13:55:12 -07001798
1799 drm_modeset_lock_all(dev);
1800 state = drm_atomic_state_alloc(dev);
1801 if (!state) {
1802 ret = -ENOMEM;
1803 goto end;
1804 }
1805
1806 state->acquire_ctx = dev->mode_config.acquire_ctx;
1807
1808 for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
1809 ret = _sde_kms_remove_fbs(sde_kms, file, state);
1810 if (ret != -EDEADLK)
1811 break;
1812 drm_atomic_state_clear(state);
1813 drm_atomic_legacy_backoff(state);
1814 }
1815
1816end:
1817 if ((ret != 0) && state)
1818 drm_atomic_state_free(state);
1819
1820 SDE_DEBUG("sde preclose done, ret:%d\n", ret);
1821 drm_modeset_unlock_all(dev);
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04001822}
1823
Lloyd Atkinsone08229c2017-10-02 17:53:30 -04001824static int _sde_kms_helper_reset_custom_properties(struct sde_kms *sde_kms,
1825 struct drm_atomic_state *state)
1826{
1827 struct drm_device *dev = sde_kms->dev;
1828 struct drm_plane *plane;
1829 struct drm_plane_state *plane_state;
1830 struct drm_crtc *crtc;
1831 struct drm_crtc_state *crtc_state;
1832 struct drm_connector *conn;
1833 struct drm_connector_state *conn_state;
1834 int ret = 0;
1835
1836 drm_for_each_plane(plane, dev) {
1837 plane_state = drm_atomic_get_plane_state(state, plane);
1838 if (IS_ERR(plane_state)) {
1839 ret = PTR_ERR(plane_state);
1840 SDE_ERROR("error %d getting plane %d state\n",
1841 ret, DRMID(plane));
1842 return ret;
1843 }
1844
1845 ret = sde_plane_helper_reset_custom_properties(plane,
1846 plane_state);
1847 if (ret) {
1848 SDE_ERROR("error %d resetting plane props %d\n",
1849 ret, DRMID(plane));
1850 return ret;
1851 }
1852 }
1853 drm_for_each_crtc(crtc, dev) {
1854 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1855 if (IS_ERR(crtc_state)) {
1856 ret = PTR_ERR(crtc_state);
1857 SDE_ERROR("error %d getting crtc %d state\n",
1858 ret, DRMID(crtc));
1859 return ret;
1860 }
1861
1862 ret = sde_crtc_helper_reset_custom_properties(crtc, crtc_state);
1863 if (ret) {
1864 SDE_ERROR("error %d resetting crtc props %d\n",
1865 ret, DRMID(crtc));
1866 return ret;
1867 }
1868 }
1869
1870 drm_for_each_connector(conn, dev) {
1871 conn_state = drm_atomic_get_connector_state(state, conn);
1872 if (IS_ERR(conn_state)) {
1873 ret = PTR_ERR(conn_state);
1874 SDE_ERROR("error %d getting connector %d state\n",
1875 ret, DRMID(conn));
1876 return ret;
1877 }
1878
1879 ret = sde_connector_helper_reset_custom_properties(conn,
1880 conn_state);
1881 if (ret) {
1882 SDE_ERROR("error %d resetting connector props %d\n",
1883 ret, DRMID(conn));
1884 return ret;
1885 }
1886 }
1887
1888 return ret;
1889}
1890
1891static void sde_kms_lastclose(struct msm_kms *kms)
1892{
1893 struct sde_kms *sde_kms;
1894 struct drm_device *dev;
1895 struct drm_atomic_state *state;
1896 int ret, i;
1897
1898 if (!kms) {
1899 SDE_ERROR("invalid argument\n");
1900 return;
1901 }
1902
1903 sde_kms = to_sde_kms(kms);
1904 dev = sde_kms->dev;
1905
1906 state = drm_atomic_state_alloc(dev);
1907 if (!state)
1908 return;
1909
1910 state->acquire_ctx = dev->mode_config.acquire_ctx;
1911
1912 for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
1913 /* add reset of custom properties to the state */
1914 ret = _sde_kms_helper_reset_custom_properties(sde_kms, state);
1915 if (ret)
1916 break;
1917
1918 ret = drm_atomic_commit(state);
1919 if (ret != -EDEADLK)
1920 break;
1921
1922 drm_atomic_state_clear(state);
1923 drm_atomic_legacy_backoff(state);
1924 SDE_DEBUG("deadlock backoff on attempt %d\n", i);
1925 }
1926
1927 if (ret) {
1928 /**
1929 * on success, atomic state object ownership transfers to
1930 * framework, otherwise, free it here
1931 */
1932 drm_atomic_state_free(state);
1933 SDE_ERROR("failed to run last close: %d\n", ret);
1934 }
1935}
1936
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001937static int sde_kms_check_secure_transition(struct msm_kms *kms,
1938 struct drm_atomic_state *state)
1939{
1940 struct sde_kms *sde_kms;
1941 struct drm_device *dev;
1942 struct drm_crtc *crtc;
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07001943 struct drm_crtc *cur_crtc = NULL, *global_crtc = NULL;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001944 struct drm_crtc_state *crtc_state;
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07001945 int active_crtc_cnt = 0, global_active_crtc_cnt = 0;
1946 bool sec_session = false, global_sec_session = false;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001947 int i;
1948
1949 if (!kms || !state) {
1950 return -EINVAL;
1951 SDE_ERROR("invalid arguments\n");
1952 }
1953
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07001954 sde_kms = to_sde_kms(kms);
1955 dev = sde_kms->dev;
1956
1957 /* iterate state object for active secure/non-secure crtc */
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001958 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1959 if (!crtc_state->active)
1960 continue;
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07001961
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001962 active_crtc_cnt++;
1963 if (sde_crtc_get_secure_level(crtc, crtc_state) ==
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07001964 SDE_DRM_SEC_ONLY)
1965 sec_session = true;
1966
1967 cur_crtc = crtc;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001968 }
1969
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001970 /* iterate global list for active and secure crtc */
1971 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001972 if (!crtc->state->active)
1973 continue;
1974
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07001975 global_active_crtc_cnt++;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001976 if (sde_crtc_get_secure_level(crtc, crtc->state) ==
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07001977 SDE_DRM_SEC_ONLY)
1978 global_sec_session = true;
1979
1980 global_crtc = crtc;
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001981 }
1982
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07001983 /*
1984 * - fail secure crtc commit, if any other crtc session is already
1985 * in progress
1986 * - fail non-secure crtc commit, if any secure crtc session is already
1987 * in progress
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001988 */
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07001989 if (global_sec_session || sec_session) {
1990 if ((global_active_crtc_cnt >
1991 MAX_ALLOWED_CRTC_CNT_DURING_SECURE) ||
1992 (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE)) {
1993 SDE_ERROR(
1994 "Secure check failed global_active:%d active:%d\n",
1995 global_active_crtc_cnt, active_crtc_cnt);
1996 return -EPERM;
1997
1998 /*
1999 * As only one crtc is allowed during secure session, the crtc
2000 * in this commit should match with the global crtc, if it
2001 * exists
2002 */
2003 } else if (global_crtc && (global_crtc != cur_crtc)) {
2004 SDE_ERROR(
2005 "crtc%d-sec%d not allowed during crtc%d-sec%d\n",
Veera Sundaram Sankaran4db71f22017-11-16 14:33:10 -08002006 cur_crtc ? cur_crtc->base.id : -1, sec_session,
Veera Sundaram Sankaranf9fca5f2017-10-30 14:30:29 -07002007 global_crtc->base.id, global_sec_session);
2008 return -EPERM;
2009 }
2010
2011 }
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002012
2013 return 0;
2014}
2015
2016static int sde_kms_atomic_check(struct msm_kms *kms,
2017 struct drm_atomic_state *state)
2018{
2019 struct sde_kms *sde_kms;
2020 struct drm_device *dev;
2021 int ret;
2022
2023 if (!kms || !state)
2024 return -EINVAL;
2025
2026 sde_kms = to_sde_kms(kms);
2027 dev = sde_kms->dev;
2028
Clarence Ipd86f6e42017-08-08 18:31:00 -04002029 if (sde_kms_is_suspend_blocked(dev)) {
2030 SDE_DEBUG("suspended, skip atomic_check\n");
2031 return -EBUSY;
2032 }
2033
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002034 ret = drm_atomic_helper_check(dev, state);
2035 if (ret)
2036 return ret;
2037 /*
2038 * Check if any secure transition(moving CRTC between secure and
2039 * non-secure state and vice-versa) is allowed or not. when moving
2040 * to secure state, planes with fb_mode set to dir_translated only can
2041 * be staged on the CRTC, and only one CRTC can be active during
2042 * Secure state
2043 */
2044 return sde_kms_check_secure_transition(kms, state);
2045}
2046
Jordan Croused8e96522017-02-13 10:14:16 -07002047static struct msm_gem_address_space*
2048_sde_kms_get_address_space(struct msm_kms *kms,
2049 unsigned int domain)
2050{
2051 struct sde_kms *sde_kms;
2052
2053 if (!kms) {
2054 SDE_ERROR("invalid kms\n");
2055 return NULL;
2056 }
2057
2058 sde_kms = to_sde_kms(kms);
2059 if (!sde_kms) {
2060 SDE_ERROR("invalid sde_kms\n");
2061 return NULL;
2062 }
2063
2064 if (domain >= MSM_SMMU_DOMAIN_MAX)
2065 return NULL;
2066
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07002067 return (sde_kms->aspace[domain] &&
2068 sde_kms->aspace[domain]->domain_attached) ?
2069 sde_kms->aspace[domain] : NULL;
Jordan Croused8e96522017-02-13 10:14:16 -07002070}
2071
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07002072static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
2073{
2074 struct drm_device *dev = NULL;
2075 struct sde_kms *sde_kms = NULL;
Padmanabhan Komanduru71aec2d2017-08-30 20:07:59 +05302076 struct drm_connector *connector = NULL;
2077 struct sde_connector *sde_conn = NULL;
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07002078
2079 if (!kms) {
2080 SDE_ERROR("invalid kms\n");
2081 return;
2082 }
2083
2084 sde_kms = to_sde_kms(kms);
2085 dev = sde_kms->dev;
2086
2087 if (!dev) {
2088 SDE_ERROR("invalid device\n");
2089 return;
2090 }
2091
Padmanabhan Komanduru71aec2d2017-08-30 20:07:59 +05302092 if (!dev->mode_config.poll_enabled)
2093 return;
2094
2095 mutex_lock(&dev->mode_config.mutex);
2096 drm_for_each_connector(connector, dev) {
2097 /* Only handle HPD capable connectors. */
2098 if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
2099 continue;
2100
2101 sde_conn = to_sde_connector(connector);
2102
Ajay Singh Parmar315e5852017-11-23 21:47:32 -08002103 if (sde_conn->ops.post_open)
2104 sde_conn->ops.post_open(sde_conn->display);
Padmanabhan Komanduru71aec2d2017-08-30 20:07:59 +05302105 }
2106 mutex_unlock(&dev->mode_config.mutex);
2107
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07002108}
2109
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002110static int _sde_kms_gen_drm_mode(struct sde_kms *sde_kms,
2111 void *display,
2112 struct drm_display_mode *drm_mode)
2113{
2114 struct dsi_display_mode *modes = NULL;
2115 u32 count = 0;
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002116 int rc = 0;
2117
2118 rc = dsi_display_get_mode_count(display, &count);
2119 if (rc) {
2120 SDE_ERROR("failed to get num of modes, rc=%d\n", rc);
2121 return rc;
2122 }
2123
2124 SDE_DEBUG("num of modes = %d\n", count);
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002125
Lloyd Atkinson560785e2017-11-16 14:04:15 -05002126 rc = dsi_display_get_modes(display, &modes);
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002127 if (rc) {
2128 SDE_ERROR("failed to get modes, rc=%d\n", rc);
2129 count = 0;
Lloyd Atkinson560785e2017-11-16 14:04:15 -05002130 return rc;
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002131 }
2132
2133 /* TODO; currently consider modes[0] as the preferred mode */
2134 dsi_convert_to_drm_mode(&modes[0], drm_mode);
2135
2136 SDE_DEBUG("hdisplay = %d, vdisplay = %d\n",
2137 drm_mode->hdisplay, drm_mode->vdisplay);
2138 drm_mode_set_name(drm_mode);
2139 drm_mode_set_crtcinfo(drm_mode, 0);
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002140 return rc;
2141}
2142
2143static int sde_kms_cont_splash_config(struct msm_kms *kms)
2144{
2145 void *display;
2146 struct dsi_display *dsi_display;
2147 struct msm_display_info info;
2148 struct drm_encoder *encoder = NULL;
2149 struct drm_crtc *crtc = NULL;
2150 int i, rc = 0;
2151 struct drm_display_mode *drm_mode = NULL;
2152 struct drm_device *dev;
2153 struct msm_drm_private *priv;
2154 struct sde_kms *sde_kms;
2155
2156 if (!kms) {
2157 SDE_ERROR("invalid kms\n");
2158 return -EINVAL;
2159 }
2160
2161 sde_kms = to_sde_kms(kms);
2162 dev = sde_kms->dev;
2163 if (!dev || !dev->platformdev) {
2164 SDE_ERROR("invalid device\n");
2165 return -EINVAL;
2166 }
2167
2168 if (!sde_kms->cont_splash_en) {
2169 DRM_INFO("cont_splash feature not enabled\n");
2170 return rc;
2171 }
2172
2173 /* Currently, we only support one dsi display configuration */
2174 /* dsi */
2175 for (i = 0; i < sde_kms->dsi_display_count; ++i) {
2176 display = sde_kms->dsi_displays[i];
2177 dsi_display = (struct dsi_display *)display;
2178 SDE_DEBUG("display->name = %s\n", dsi_display->name);
2179
2180 if (dsi_display->bridge->base.encoder) {
2181 encoder = dsi_display->bridge->base.encoder;
2182 SDE_DEBUG("encoder name = %s\n", encoder->name);
2183 }
2184 memset(&info, 0x0, sizeof(info));
2185 rc = dsi_display_get_info(&info, display);
2186 if (rc) {
2187 SDE_ERROR("dsi get_info %d failed\n", i);
2188 encoder = NULL;
2189 continue;
2190 }
2191 SDE_DEBUG("info.is_connected = %s, info.is_primary = %s\n",
2192 ((info.is_connected) ? "true" : "false"),
2193 ((info.is_primary) ? "true" : "false"));
2194 break;
2195 }
2196
2197 if (!encoder) {
2198 SDE_ERROR("encoder not initialized\n");
2199 return -EINVAL;
2200 }
2201
2202 priv = sde_kms->dev->dev_private;
2203 encoder->crtc = priv->crtcs[0];
2204 crtc = encoder->crtc;
2205 SDE_DEBUG("crtc id = %d\n", crtc->base.id);
2206
2207 crtc->state->encoder_mask = (1 << drm_encoder_index(encoder));
2208 drm_mode = drm_mode_create(encoder->dev);
2209 if (!drm_mode) {
2210 SDE_ERROR("drm_mode create failed\n");
2211 return -EINVAL;
2212 }
2213 _sde_kms_gen_drm_mode(sde_kms, display, drm_mode);
2214 SDE_DEBUG("drm_mode->name = %s, id=%d, type=0x%x, flags=0x%x\n",
2215 drm_mode->name, drm_mode->base.id,
2216 drm_mode->type, drm_mode->flags);
2217
2218 /* Update CRTC drm structure */
2219 crtc->state->active = true;
2220 rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode);
2221 if (rc) {
2222 SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc);
2223 return rc;
2224 }
2225 drm_mode_copy(&crtc->state->adjusted_mode, drm_mode);
2226 drm_mode_copy(&crtc->mode, drm_mode);
2227
2228 /* Update encoder structure */
2229 sde_encoder_update_caps_for_cont_splash(encoder);
2230
2231 sde_crtc_update_cont_splash_mixer_settings(crtc);
2232
2233 return rc;
2234}
2235
Clarence Ipd86f6e42017-08-08 18:31:00 -04002236static int sde_kms_pm_suspend(struct device *dev)
2237{
2238 struct drm_device *ddev;
2239 struct drm_modeset_acquire_ctx ctx;
2240 struct drm_connector *conn;
2241 struct drm_atomic_state *state;
2242 struct sde_kms *sde_kms;
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002243 int ret = 0, num_crtcs = 0;
Clarence Ipd86f6e42017-08-08 18:31:00 -04002244
2245 if (!dev)
2246 return -EINVAL;
2247
2248 ddev = dev_get_drvdata(dev);
2249 if (!ddev || !ddev_to_msm_kms(ddev))
2250 return -EINVAL;
2251
2252 sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
2253 SDE_EVT32(0);
2254
2255 /* disable hot-plug polling */
2256 drm_kms_helper_poll_disable(ddev);
2257
2258 /* acquire modeset lock(s) */
2259 drm_modeset_acquire_init(&ctx, 0);
2260
2261retry:
2262 ret = drm_modeset_lock_all_ctx(ddev, &ctx);
2263 if (ret)
2264 goto unlock;
2265
2266 /* save current state for resume */
2267 if (sde_kms->suspend_state)
2268 drm_atomic_state_free(sde_kms->suspend_state);
2269 sde_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
2270 if (IS_ERR_OR_NULL(sde_kms->suspend_state)) {
2271 DRM_ERROR("failed to back up suspend state\n");
2272 sde_kms->suspend_state = NULL;
2273 goto unlock;
2274 }
2275
2276 /* create atomic state to disable all CRTCs */
2277 state = drm_atomic_state_alloc(ddev);
2278 if (IS_ERR_OR_NULL(state)) {
2279 DRM_ERROR("failed to allocate crtc disable state\n");
2280 goto unlock;
2281 }
2282
2283 state->acquire_ctx = &ctx;
2284 drm_for_each_connector(conn, ddev) {
2285 struct drm_crtc_state *crtc_state;
2286 uint64_t lp;
2287
2288 if (!conn->state || !conn->state->crtc ||
2289 conn->dpms != DRM_MODE_DPMS_ON)
2290 continue;
2291
2292 lp = sde_connector_get_lp(conn);
2293 if (lp == SDE_MODE_DPMS_LP1) {
2294 /* transition LP1->LP2 on pm suspend */
2295 ret = sde_connector_set_property_for_commit(conn, state,
2296 CONNECTOR_PROP_LP, SDE_MODE_DPMS_LP2);
2297 if (ret) {
2298 DRM_ERROR("failed to set lp2 for conn %d\n",
2299 conn->base.id);
2300 drm_atomic_state_free(state);
2301 goto unlock;
2302 }
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002303 }
2304
2305 if (lp != SDE_MODE_DPMS_LP2) {
Clarence Ipd86f6e42017-08-08 18:31:00 -04002306 /* force CRTC to be inactive */
2307 crtc_state = drm_atomic_get_crtc_state(state,
2308 conn->state->crtc);
2309 if (IS_ERR_OR_NULL(crtc_state)) {
2310 DRM_ERROR("failed to get crtc %d state\n",
2311 conn->state->crtc->base.id);
2312 drm_atomic_state_free(state);
2313 goto unlock;
2314 }
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002315
2316 if (lp != SDE_MODE_DPMS_LP1)
2317 crtc_state->active = false;
2318 ++num_crtcs;
Clarence Ipd86f6e42017-08-08 18:31:00 -04002319 }
2320 }
2321
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002322 /* check for nothing to do */
2323 if (num_crtcs == 0) {
2324 DRM_DEBUG("all crtcs are already in the off state\n");
2325 drm_atomic_state_free(state);
2326 goto suspended;
2327 }
2328
Clarence Ipd86f6e42017-08-08 18:31:00 -04002329 /* commit the "disable all" state */
2330 ret = drm_atomic_commit(state);
2331 if (ret < 0) {
2332 DRM_ERROR("failed to disable crtcs, %d\n", ret);
2333 drm_atomic_state_free(state);
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002334 goto unlock;
Clarence Ipd86f6e42017-08-08 18:31:00 -04002335 }
2336
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04002337suspended:
2338 sde_kms->suspend_block = true;
2339
Clarence Ipd86f6e42017-08-08 18:31:00 -04002340unlock:
2341 if (ret == -EDEADLK) {
2342 drm_modeset_backoff(&ctx);
2343 goto retry;
2344 }
2345 drm_modeset_drop_locks(&ctx);
2346 drm_modeset_acquire_fini(&ctx);
2347
2348 return 0;
2349}
2350
2351static int sde_kms_pm_resume(struct device *dev)
2352{
2353 struct drm_device *ddev;
2354 struct sde_kms *sde_kms;
2355 int ret;
2356
2357 if (!dev)
2358 return -EINVAL;
2359
2360 ddev = dev_get_drvdata(dev);
2361 if (!ddev || !ddev_to_msm_kms(ddev))
2362 return -EINVAL;
2363
2364 sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
2365
2366 SDE_EVT32(sde_kms->suspend_state != NULL);
2367
2368 drm_mode_config_reset(ddev);
2369
2370 drm_modeset_lock_all(ddev);
2371
2372 sde_kms->suspend_block = false;
2373
2374 if (sde_kms->suspend_state) {
2375 sde_kms->suspend_state->acquire_ctx =
2376 ddev->mode_config.acquire_ctx;
2377 ret = drm_atomic_commit(sde_kms->suspend_state);
2378 if (ret < 0) {
2379 DRM_ERROR("failed to restore state, %d\n", ret);
2380 drm_atomic_state_free(sde_kms->suspend_state);
2381 }
2382 sde_kms->suspend_state = NULL;
2383 }
2384 drm_modeset_unlock_all(ddev);
2385
2386 /* enable hot-plug polling */
2387 drm_kms_helper_poll_enable(ddev);
2388
2389 return 0;
2390}
2391
Ben Chan78647cd2016-06-26 22:02:47 -04002392static const struct msm_kms_funcs kms_funcs = {
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002393 .hw_init = sde_kms_hw_init,
Alan Kwong5a3ac752016-10-16 01:02:35 -04002394 .postinit = sde_kms_postinit,
Ben Chan78647cd2016-06-26 22:02:47 -04002395 .irq_preinstall = sde_irq_preinstall,
2396 .irq_postinstall = sde_irq_postinstall,
2397 .irq_uninstall = sde_irq_uninstall,
2398 .irq = sde_irq,
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002399 .preclose = sde_kms_preclose,
Lloyd Atkinsone08229c2017-10-02 17:53:30 -04002400 .lastclose = sde_kms_lastclose,
Clarence Ip24f80662016-06-13 19:05:32 -04002401 .prepare_fence = sde_kms_prepare_fence,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002402 .prepare_commit = sde_kms_prepare_commit,
2403 .commit = sde_kms_commit,
2404 .complete_commit = sde_kms_complete_commit,
2405 .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07002406 .wait_for_tx_complete = sde_kms_wait_for_frame_transfer_complete,
Alan Kwongf5dd86c2016-08-09 18:08:17 -04002407 .enable_vblank = sde_kms_enable_vblank,
2408 .disable_vblank = sde_kms_disable_vblank,
Lloyd Atkinsonfa2489c2016-05-25 15:16:03 -04002409 .check_modified_format = sde_format_check_modified_format,
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07002410 .atomic_check = sde_kms_atomic_check,
Clarence Ip4ce59322016-06-26 22:27:51 -04002411 .get_format = sde_get_msm_format,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002412 .round_pixclk = sde_kms_round_pixclk,
Clarence Ipd86f6e42017-08-08 18:31:00 -04002413 .pm_suspend = sde_kms_pm_suspend,
2414 .pm_resume = sde_kms_pm_resume,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002415 .destroy = sde_kms_destroy,
Chandan Uddaraju3f2cf422017-06-15 15:37:39 -07002416 .cont_splash_config = sde_kms_cont_splash_config,
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07002417 .register_events = _sde_kms_register_events,
Jordan Croused8e96522017-02-13 10:14:16 -07002418 .get_address_space = _sde_kms_get_address_space,
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07002419 .postopen = _sde_kms_post_open,
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002420};
2421
Dhaval Patel3949f032016-06-20 16:24:33 -07002422/* the caller api needs to turn on clock before calling it */
Clarence Ip17162b52016-11-24 17:06:29 -05002423static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002424{
Dhaval Patel88739332017-04-11 11:08:04 -07002425 sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002426}
2427
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002428static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
2429{
2430 struct msm_mmu *mmu;
2431 int i;
2432
Jordan Croused8e96522017-02-13 10:14:16 -07002433 for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
2434 if (!sde_kms->aspace[i])
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002435 continue;
2436
Jordan Croused8e96522017-02-13 10:14:16 -07002437 mmu = sde_kms->aspace[i]->mmu;
2438
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002439 mmu->funcs->detach(mmu, (const char **)iommu_ports,
2440 ARRAY_SIZE(iommu_ports));
Jordan Crouse12bf3622017-02-13 10:14:11 -07002441 msm_gem_address_space_destroy(sde_kms->aspace[i]);
2442
Jordan Croused8e96522017-02-13 10:14:16 -07002443 sde_kms->aspace[i] = NULL;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002444 }
2445
2446 return 0;
2447}
2448
2449static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002450{
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002451 struct msm_mmu *mmu;
2452 int i, ret;
2453
Alan Kwong112a84f2016-05-24 20:49:21 -04002454 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
Jordan Crouse12bf3622017-02-13 10:14:11 -07002455 struct msm_gem_address_space *aspace;
2456
Alan Kwong112a84f2016-05-24 20:49:21 -04002457 mmu = msm_smmu_new(sde_kms->dev->dev, i);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002458 if (IS_ERR(mmu)) {
2459 ret = PTR_ERR(mmu);
Dhaval Patel5473cd22017-03-19 21:38:08 -07002460 SDE_DEBUG("failed to init iommu id %d: rc:%d\n",
2461 i, ret);
Dhaval Patel5200c602017-01-17 15:53:37 -08002462 continue;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002463 }
2464
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07002465 aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
Jordan Crouse12bf3622017-02-13 10:14:11 -07002466 mmu, "sde");
2467 if (IS_ERR(aspace)) {
2468 ret = PTR_ERR(aspace);
2469 mmu->funcs->destroy(mmu);
2470 goto fail;
2471 }
2472
2473 sde_kms->aspace[i] = aspace;
2474
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002475 ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
2476 ARRAY_SIZE(iommu_ports));
2477 if (ret) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002478 SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
Jordan Crouse12bf3622017-02-13 10:14:11 -07002479 msm_gem_address_space_destroy(aspace);
2480 goto fail;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002481 }
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07002482 aspace->domain_attached = true;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002483 }
2484
2485 return 0;
2486fail:
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002487 _sde_kms_mmu_destroy(sde_kms);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002488
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002489 return ret;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002490}
2491
Shashank Babu Chinta Venkata5d641d42017-09-29 12:16:28 -07002492/* the caller api needs to turn on clock before calling this function */
2493static int _sde_kms_cont_splash_res_init(struct sde_kms *sde_kms)
2494{
2495 struct sde_mdss_cfg *cat;
2496 struct drm_device *dev;
2497 struct msm_drm_private *priv;
2498 struct sde_splash_data *splash_data;
2499 int i;
2500 int ctl_top_cnt;
2501
2502 if (!sde_kms || !sde_kms->catalog) {
2503 SDE_ERROR("invalid kms\n");
2504 return -EINVAL;
2505 }
2506 cat = sde_kms->catalog;
2507 dev = sde_kms->dev;
2508 priv = dev->dev_private;
2509 splash_data = &sde_kms->splash_data;
2510 SDE_DEBUG("mixer_count=%d, ctl_count=%d, dsc_count=%d\n",
2511 cat->mixer_count,
2512 cat->ctl_count,
2513 cat->dsc_count);
2514
2515 ctl_top_cnt = cat->ctl_count;
2516
2517 if (ctl_top_cnt > ARRAY_SIZE(splash_data->top)) {
2518 SDE_ERROR("Mismatch in ctl_top array size\n");
2519 return -EINVAL;
2520 }
2521 for (i = 0; i < ctl_top_cnt; i++) {
2522 sde_get_ctl_top_for_cont_splash(sde_kms->mmio,
2523 &splash_data->top[i], i);
2524 if (splash_data->top[i].intf_sel) {
2525 splash_data->lm_cnt +=
2526 sde_get_ctl_lm_for_cont_splash
2527 (sde_kms->mmio,
2528 sde_kms->catalog->mixer_count,
2529 splash_data->lm_cnt,
2530 splash_data->lm_ids,
2531 &splash_data->top[i], i);
Chandan Uddaraju18f09402017-09-29 11:54:29 -07002532 splash_data->ctl_ids[splash_data->ctl_top_cnt]
Shashank Babu Chinta Venkata5d641d42017-09-29 12:16:28 -07002533 = i + CTL_0;
2534 splash_data->ctl_top_cnt++;
Chandan Uddaraju18f09402017-09-29 11:54:29 -07002535 sde_kms->cont_splash_en = true;
Shashank Babu Chinta Venkata5d641d42017-09-29 12:16:28 -07002536 }
2537 }
2538
2539 /* Skip DSC blk reads if cont_splash is disabled */
2540 if (!sde_kms->cont_splash_en)
2541 return 0;
2542
2543 splash_data->dsc_cnt =
2544 sde_get_pp_dsc_for_cont_splash(sde_kms->mmio,
2545 sde_kms->catalog->dsc_count,
2546 splash_data->dsc_ids);
2547 SDE_DEBUG("splash_data: ctl_top_cnt=%d, lm_cnt=%d, dsc_cnt=%d\n",
2548 splash_data->ctl_top_cnt, splash_data->lm_cnt,
2549 splash_data->dsc_cnt);
2550
2551 return 0;
2552}
2553
Clarence Ip7f0de632017-05-31 14:59:14 -04002554static void sde_kms_handle_power_event(u32 event_type, void *usr)
2555{
2556 struct sde_kms *sde_kms = usr;
Harsh Sahu08a4a742017-09-18 11:42:39 -07002557 struct msm_kms *msm_kms;
Clarence Ip7f0de632017-05-31 14:59:14 -04002558
Harsh Sahu08a4a742017-09-18 11:42:39 -07002559 msm_kms = &sde_kms->base;
Clarence Ip7f0de632017-05-31 14:59:14 -04002560 if (!sde_kms)
2561 return;
2562
Harsh Sahu08a4a742017-09-18 11:42:39 -07002563 SDE_DEBUG("event_type:%d\n", event_type);
2564 SDE_EVT32_VERBOSE(event_type);
2565
2566 if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
2567 sde_irq_update(msm_kms, true);
Clarence Ip7f0de632017-05-31 14:59:14 -04002568 sde_vbif_init_memtypes(sde_kms);
Harsh Sahu08a4a742017-09-18 11:42:39 -07002569 } else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
2570 sde_irq_update(msm_kms, false);
2571 }
Clarence Ip7f0de632017-05-31 14:59:14 -04002572}
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04002573
Alan Kwong23afc2d92017-09-15 10:59:06 -04002574#define genpd_to_sde_kms(domain) container_of(domain, struct sde_kms, genpd)
2575
2576static int sde_kms_pd_enable(struct generic_pm_domain *genpd)
2577{
2578 struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
2579 struct drm_device *dev;
2580 struct msm_drm_private *priv;
2581 int rc;
2582
2583 SDE_DEBUG("\n");
2584
2585 dev = sde_kms->dev;
2586 if (!dev)
2587 return -EINVAL;
2588
2589 priv = dev->dev_private;
2590 if (!priv)
2591 return -EINVAL;
2592
2593 SDE_EVT32(genpd->device_count);
2594
2595 rc = sde_power_resource_enable(&priv->phandle, priv->pclient, true);
2596
2597 return rc;
2598}
2599
2600static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
2601{
2602 struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
2603 struct drm_device *dev;
2604 struct msm_drm_private *priv;
2605 int rc;
2606
2607 SDE_DEBUG("\n");
2608
2609 dev = sde_kms->dev;
2610 if (!dev)
2611 return -EINVAL;
2612
2613 priv = dev->dev_private;
2614 if (!priv)
2615 return -EINVAL;
2616
2617 SDE_EVT32(genpd->device_count);
2618
2619 rc = sde_power_resource_enable(&priv->phandle, priv->pclient, false);
2620
2621 return rc;
2622}
2623
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -07002624static int _sde_kms_get_splash_data(struct sde_splash_data *data)
2625{
2626 int ret = 0;
2627 struct device_node *parent, *node;
2628 struct resource r;
2629
2630 if (!data)
2631 return -EINVAL;
2632
2633 parent = of_find_node_by_path("/reserved-memory");
2634 if (!parent) {
2635 SDE_ERROR("failed to find reserved-memory node\n");
2636 return -EINVAL;
2637 }
2638
2639 node = of_find_node_by_name(parent, "cont_splash_region");
2640 if (!node) {
2641 SDE_ERROR("failed to find splash memory reservation\n");
2642 return -EINVAL;
2643 }
2644
2645 if (of_address_to_resource(node, 0, &r)) {
2646 SDE_ERROR("failed to find data for splash memory\n");
2647 return -EINVAL;
2648 }
2649
2650 data->splash_base = (unsigned long)r.start;
2651 data->splash_size = (r.end - r.start) + 1;
2652
2653 pr_info("found continuous splash base address:%lx size:%x\n",
2654 data->splash_base,
2655 data->splash_size);
2656 data->smmu_handoff_pending = true;
2657
2658 return ret;
2659}
2660
Clarence Ip17162b52016-11-24 17:06:29 -05002661static int sde_kms_hw_init(struct msm_kms *kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002662{
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002663 struct sde_kms *sde_kms;
Clarence Ip17162b52016-11-24 17:06:29 -05002664 struct drm_device *dev;
Dhaval Patel3949f032016-06-20 16:24:33 -07002665 struct msm_drm_private *priv;
Vara Reddyc90c7fe2017-11-10 17:02:02 -08002666 bool splash_mem_found = false;
Clarence Ip17162b52016-11-24 17:06:29 -05002667 int i, rc = -EINVAL;
Dhaval Patel3949f032016-06-20 16:24:33 -07002668
Clarence Ip17162b52016-11-24 17:06:29 -05002669 if (!kms) {
2670 SDE_ERROR("invalid kms\n");
2671 goto end;
2672 }
2673
2674 sde_kms = to_sde_kms(kms);
2675 dev = sde_kms->dev;
2676 if (!dev || !dev->platformdev) {
2677 SDE_ERROR("invalid device\n");
Dhaval Patel3949f032016-06-20 16:24:33 -07002678 goto end;
2679 }
2680
2681 priv = dev->dev_private;
Clarence Ip17162b52016-11-24 17:06:29 -05002682 if (!priv) {
2683 SDE_ERROR("invalid private data\n");
Dhaval Patel3949f032016-06-20 16:24:33 -07002684 goto end;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002685 }
2686
Dhaval Patela2430842017-06-15 14:32:36 -07002687 sde_kms->mmio = msm_ioremap(dev->platformdev, "mdp_phys", "mdp_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05002688 if (IS_ERR(sde_kms->mmio)) {
2689 rc = PTR_ERR(sde_kms->mmio);
2690 SDE_ERROR("mdp register memory map failed: %d\n", rc);
2691 sde_kms->mmio = NULL;
2692 goto error;
2693 }
2694 DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio);
Dhaval Patela2430842017-06-15 14:32:36 -07002695 sde_kms->mmio_len = msm_iomap_size(dev->platformdev, "mdp_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05002696
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04002697 rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
2698 sde_kms->mmio_len);
2699 if (rc)
2700 SDE_ERROR("dbg base register kms failed: %d\n", rc);
2701
Dhaval Patela2430842017-06-15 14:32:36 -07002702 sde_kms->vbif[VBIF_RT] = msm_ioremap(dev->platformdev, "vbif_phys",
2703 "vbif_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05002704 if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
2705 rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
2706 SDE_ERROR("vbif register memory map failed: %d\n", rc);
2707 sde_kms->vbif[VBIF_RT] = NULL;
2708 goto error;
2709 }
Dhaval Patela2430842017-06-15 14:32:36 -07002710 sde_kms->vbif_len[VBIF_RT] = msm_iomap_size(dev->platformdev,
2711 "vbif_phys");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04002712 rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
2713 sde_kms->vbif_len[VBIF_RT]);
2714 if (rc)
2715 SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
2716
Dhaval Patela2430842017-06-15 14:32:36 -07002717 sde_kms->vbif[VBIF_NRT] = msm_ioremap(dev->platformdev, "vbif_nrt_phys",
2718 "vbif_nrt_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05002719 if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
2720 sde_kms->vbif[VBIF_NRT] = NULL;
2721 SDE_DEBUG("VBIF NRT is not defined");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04002722 } else {
Dhaval Patela2430842017-06-15 14:32:36 -07002723 sde_kms->vbif_len[VBIF_NRT] = msm_iomap_size(dev->platformdev,
2724 "vbif_nrt_phys");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04002725 rc = sde_dbg_reg_register_base("vbif_nrt",
2726 sde_kms->vbif[VBIF_NRT],
2727 sde_kms->vbif_len[VBIF_NRT]);
2728 if (rc)
2729 SDE_ERROR("dbg base register vbif_nrt failed: %d\n",
2730 rc);
Clarence Ip17162b52016-11-24 17:06:29 -05002731 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002732
Dhaval Patela2430842017-06-15 14:32:36 -07002733 sde_kms->reg_dma = msm_ioremap(dev->platformdev, "regdma_phys",
2734 "regdma_phys");
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08002735 if (IS_ERR(sde_kms->reg_dma)) {
2736 sde_kms->reg_dma = NULL;
2737 SDE_DEBUG("REG_DMA is not defined");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04002738 } else {
Dhaval Patela2430842017-06-15 14:32:36 -07002739 sde_kms->reg_dma_len = msm_iomap_size(dev->platformdev,
2740 "regdma_phys");
Gopikrishnaiah Anandanbc5aa792017-08-23 18:30:08 -07002741 rc = sde_dbg_reg_register_base("reg_dma",
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04002742 sde_kms->reg_dma,
2743 sde_kms->reg_dma_len);
2744 if (rc)
2745 SDE_ERROR("dbg base register reg_dma failed: %d\n",
2746 rc);
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08002747 }
2748
Dhaval Patel3949f032016-06-20 16:24:33 -07002749 sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
2750 if (IS_ERR_OR_NULL(sde_kms->core_client)) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002751 rc = PTR_ERR(sde_kms->core_client);
Dhaval Patel5398f602017-03-25 18:25:18 -07002752 if (!sde_kms->core_client)
2753 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002754 SDE_ERROR("sde power client create failed: %d\n", rc);
2755 sde_kms->core_client = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05002756 goto error;
Dhaval Patel3949f032016-06-20 16:24:33 -07002757 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002758
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -07002759 rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
2760 if (rc) {
Vara Reddyc90c7fe2017-11-10 17:02:02 -08002761 SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
2762 splash_mem_found = false;
2763 } else {
2764 splash_mem_found = true;
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -07002765 }
2766
Dhaval Patel3949f032016-06-20 16:24:33 -07002767 rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
2768 true);
2769 if (rc) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002770 SDE_ERROR("resource enable failed: %d\n", rc);
Clarence Ip17162b52016-11-24 17:06:29 -05002771 goto error;
Dhaval Patel3949f032016-06-20 16:24:33 -07002772 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002773
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002774 _sde_kms_core_hw_rev_init(sde_kms);
Lloyd Atkinson11f34442016-08-11 11:19:52 -04002775
Dhaval Patelb271b842016-10-19 21:41:22 -07002776 pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
2777
Dhaval Patel8bf7ff32016-07-20 18:13:24 -07002778 sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
Dhaval Patel3949f032016-06-20 16:24:33 -07002779 if (IS_ERR_OR_NULL(sde_kms->catalog)) {
Dhaval Patel3949f032016-06-20 16:24:33 -07002780 rc = PTR_ERR(sde_kms->catalog);
Dhaval Patel5398f602017-03-25 18:25:18 -07002781 if (!sde_kms->catalog)
2782 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002783 SDE_ERROR("catalog init failed: %d\n", rc);
2784 sde_kms->catalog = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05002785 goto power_error;
Dhaval Patel3949f032016-06-20 16:24:33 -07002786 }
2787
Lloyd Atkinson274cc462017-02-21 11:52:06 -05002788 sde_dbg_init_dbg_buses(sde_kms->core_rev);
2789
Vara Reddyc90c7fe2017-11-10 17:02:02 -08002790 /*
2791 * Attempt continuous splash handoff only if reserved
2792 * splash memory is found.
2793 */
2794 if (splash_mem_found)
2795 _sde_kms_cont_splash_res_init(sde_kms);
Gopikrishnaiah Anandane69dc592017-03-29 14:00:55 -07002796
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08002797 /* Initialize reg dma block which is a singleton */
2798 rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
2799 sde_kms->dev);
2800 if (rc) {
2801 SDE_ERROR("failed: reg dma init failed\n");
2802 goto power_error;
2803 }
2804
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -07002805 rc = _sde_kms_mmu_init(sde_kms);
2806 if (rc) {
2807 SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
2808 goto power_error;
2809 }
2810
Dhaval Patel3949f032016-06-20 16:24:33 -07002811 rc = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio,
Lloyd Atkinson11f34442016-08-11 11:19:52 -04002812 sde_kms->dev);
Clarence Ip17162b52016-11-24 17:06:29 -05002813 if (rc) {
2814 SDE_ERROR("rm init failed: %d\n", rc);
2815 goto power_error;
2816 }
2817
2818 sde_kms->rm_init = true;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002819
2820 sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
2821 if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
2822 rc = PTR_ERR(sde_kms->hw_mdp);
Dhaval Patel5398f602017-03-25 18:25:18 -07002823 if (!sde_kms->hw_mdp)
2824 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002825 SDE_ERROR("failed to get hw_mdp: %d\n", rc);
2826 sde_kms->hw_mdp = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05002827 goto power_error;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002828 }
Dhaval Patel3949f032016-06-20 16:24:33 -07002829
Alan Kwong5d324e42016-07-28 22:56:18 -04002830 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
2831 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
2832
2833 sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
2834 sde_kms->vbif[vbif_idx], sde_kms->catalog);
2835 if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002836 rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
Dhaval Patel5398f602017-03-25 18:25:18 -07002837 if (!sde_kms->hw_vbif[vbif_idx])
2838 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002839 SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
Alan Kwong5d324e42016-07-28 22:56:18 -04002840 sde_kms->hw_vbif[vbif_idx] = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05002841 goto power_error;
Alan Kwong5d324e42016-07-28 22:56:18 -04002842 }
2843 }
2844
Alan Kwong54125bb2017-02-26 16:01:36 -08002845 sde_kms->iclient = msm_ion_client_create(dev->unique);
2846 if (IS_ERR(sde_kms->iclient)) {
2847 rc = PTR_ERR(sde_kms->iclient);
2848 SDE_DEBUG("msm_ion_client not available: %d\n", rc);
2849 sde_kms->iclient = NULL;
2850 }
2851
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002852
Alan Kwong67a3f792016-11-01 23:16:53 -04002853 rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
Dhaval Patel446446e2017-04-21 19:38:17 -07002854 &priv->phandle, priv->pclient, "core_clk");
Alan Kwong67a3f792016-11-01 23:16:53 -04002855 if (rc) {
2856 SDE_ERROR("failed to init perf %d\n", rc);
2857 goto perf_err;
2858 }
2859
Abhinav Kumar2316fb92017-01-30 23:07:08 -08002860 sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
2861 if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
2862 rc = PTR_ERR(sde_kms->hw_intr);
2863 SDE_ERROR("hw_intr init failed: %d\n", rc);
2864 sde_kms->hw_intr = NULL;
2865 goto hw_intr_init_err;
2866 }
2867
Clarence Ip4ce59322016-06-26 22:27:51 -04002868 /*
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002869 * _sde_kms_drm_obj_init should create the DRM related objects
2870 * i.e. CRTCs, planes, encoders, connectors and so forth
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002871 */
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002872 rc = _sde_kms_drm_obj_init(sde_kms);
2873 if (rc) {
2874 SDE_ERROR("modeset init failed: %d\n", rc);
Alan Kwong67a3f792016-11-01 23:16:53 -04002875 goto drm_obj_init_err;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002876 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002877
2878 dev->mode_config.min_width = 0;
2879 dev->mode_config.min_height = 0;
2880
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002881 /*
Dhaval Patel4e574842016-08-23 15:11:37 -07002882 * max crtc width is equal to the max mixer width * 2 and max height is
2883 * is 4K
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002884 */
Dhaval Patele4a5dda2016-10-13 19:29:30 -07002885 dev->mode_config.max_width = sde_kms->catalog->max_mixer_width * 2;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002886 dev->mode_config.max_height = 4096;
2887
Lloyd Atkinsonfa2489c2016-05-25 15:16:03 -04002888 /*
2889 * Support format modifiers for compression etc.
2890 */
2891 dev->mode_config.allow_fb_modifiers = true;
2892
Clarence Ip7f0de632017-05-31 14:59:14 -04002893 /*
2894 * Handle (re)initializations during power enable
2895 */
2896 sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
2897 sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
Harsh Sahu08a4a742017-09-18 11:42:39 -07002898 SDE_POWER_EVENT_POST_ENABLE |
2899 SDE_POWER_EVENT_PRE_DISABLE,
Clarence Ip7f0de632017-05-31 14:59:14 -04002900 sde_kms_handle_power_event, sde_kms, "kms");
2901
Alan Kwong23afc2d92017-09-15 10:59:06 -04002902 /* initialize power domain if defined */
2903 if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) {
2904 sde_kms->genpd.name = dev->unique;
2905 sde_kms->genpd.power_off = sde_kms_pd_disable;
2906 sde_kms->genpd.power_on = sde_kms_pd_enable;
2907
2908 rc = pm_genpd_init(&sde_kms->genpd, NULL, true);
2909 if (rc < 0) {
2910 SDE_ERROR("failed to init genpd provider %s: %d\n",
2911 sde_kms->genpd.name, rc);
2912 goto genpd_err;
2913 }
2914
2915 rc = of_genpd_add_provider_simple(dev->dev->of_node,
2916 &sde_kms->genpd);
2917 if (rc < 0) {
2918 SDE_ERROR("failed to add genpd provider %s: %d\n",
2919 sde_kms->genpd.name, rc);
2920 pm_genpd_remove(&sde_kms->genpd);
2921 goto genpd_err;
2922 }
2923
2924 sde_kms->genpd_init = true;
2925 SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
2926 }
2927
Chandan Uddaraju18f09402017-09-29 11:54:29 -07002928 if (sde_kms->cont_splash_en)
2929 SDE_DEBUG("Skipping MDP Resources disable\n");
2930 else
2931 sde_power_resource_enable(&priv->phandle,
2932 sde_kms->core_client, false);
Alan Kwong23afc2d92017-09-15 10:59:06 -04002933
Clarence Ip17162b52016-11-24 17:06:29 -05002934 return 0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002935
Alan Kwong23afc2d92017-09-15 10:59:06 -04002936genpd_err:
Alan Kwong67a3f792016-11-01 23:16:53 -04002937drm_obj_init_err:
2938 sde_core_perf_destroy(&sde_kms->perf);
Abhinav Kumar2316fb92017-01-30 23:07:08 -08002939hw_intr_init_err:
Alan Kwong67a3f792016-11-01 23:16:53 -04002940perf_err:
Clarence Ip17162b52016-11-24 17:06:29 -05002941power_error:
Dhaval Patel3949f032016-06-20 16:24:33 -07002942 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
Clarence Ip17162b52016-11-24 17:06:29 -05002943error:
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002944 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
Dhaval Patel3949f032016-06-20 16:24:33 -07002945end:
Clarence Ip17162b52016-11-24 17:06:29 -05002946 return rc;
2947}
2948
2949struct msm_kms *sde_kms_init(struct drm_device *dev)
2950{
2951 struct msm_drm_private *priv;
2952 struct sde_kms *sde_kms;
2953
2954 if (!dev || !dev->dev_private) {
2955 SDE_ERROR("drm device node invalid\n");
2956 return ERR_PTR(-EINVAL);
2957 }
2958
2959 priv = dev->dev_private;
2960
2961 sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
2962 if (!sde_kms) {
2963 SDE_ERROR("failed to allocate sde kms\n");
2964 return ERR_PTR(-ENOMEM);
2965 }
2966
2967 msm_kms_init(&sde_kms->base, &kms_funcs);
2968 sde_kms->dev = dev;
2969
2970 return &sde_kms->base;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002971}
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07002972
2973static int _sde_kms_register_events(struct msm_kms *kms,
2974 struct drm_mode_object *obj, u32 event, bool en)
2975{
2976 int ret = 0;
2977 struct drm_crtc *crtc = NULL;
2978 struct drm_connector *conn = NULL;
2979 struct sde_kms *sde_kms = NULL;
2980
2981 if (!kms || !obj) {
2982 SDE_ERROR("invalid argument kms %pK obj %pK\n", kms, obj);
2983 return -EINVAL;
2984 }
2985
2986 sde_kms = to_sde_kms(kms);
2987 switch (obj->type) {
2988 case DRM_MODE_OBJECT_CRTC:
2989 crtc = obj_to_crtc(obj);
2990 ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
2991 break;
2992 case DRM_MODE_OBJECT_CONNECTOR:
2993 conn = obj_to_connector(obj);
2994 ret = sde_connector_register_custom_event(sde_kms, conn, event,
2995 en);
2996 break;
2997 }
2998
2999 return ret;
3000}
Sandeep Panda11b20d82017-06-19 12:57:27 +05303001
3002int sde_kms_handle_recovery(struct drm_encoder *encoder)
3003{
3004 SDE_EVT32(DRMID(encoder), MSM_ENC_ACTIVE_REGION);
3005 return sde_encoder_wait_for_event(encoder, MSM_ENC_ACTIVE_REGION);
3006}