blob: 9e80e39f565d11e4083f47357b21fa01e6879fe0 [file] [log] [blame]
Dhaval Patel14d46ce2017-01-17 16:28:12 -08001/*
2 * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07005 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -08006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07009 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -080010 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070017 */
18
Alan Kwong5d324e42016-07-28 22:56:18 -040019#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
20
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070021#include <drm/drm_crtc.h>
Clarence Ip4ce59322016-06-26 22:27:51 -040022#include <linux/debugfs.h>
Dhaval Patel04c7e8e2016-09-26 20:14:31 -070023#include <linux/of_irq.h>
Alan Kwong4dd64c82017-02-04 18:41:51 -080024#include <linux/dma-buf.h>
Clarence Ip4ce59322016-06-26 22:27:51 -040025
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070026#include "msm_drv.h"
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040027#include "msm_mmu.h"
Clarence Ipd02440b2017-05-21 18:10:01 -040028#include "msm_gem.h"
Clarence Ip3649f8b2016-10-31 09:59:44 -040029
30#include "dsi_display.h"
31#include "dsi_drm.h"
32#include "sde_wb.h"
Padmanabhan Komanduru63758612017-05-23 01:47:18 -070033#include "dp_display.h"
34#include "dp_drm.h"
Clarence Ip3649f8b2016-10-31 09:59:44 -040035
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070036#include "sde_kms.h"
Alan Kwongf5dd86c2016-08-09 18:08:17 -040037#include "sde_core_irq.h"
Clarence Ip4ce59322016-06-26 22:27:51 -040038#include "sde_formats.h"
Alan Kwong5d324e42016-07-28 22:56:18 -040039#include "sde_hw_vbif.h"
Alan Kwong83285fb2016-10-21 20:51:17 -040040#include "sde_vbif.h"
41#include "sde_encoder.h"
42#include "sde_plane.h"
43#include "sde_crtc.h"
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -080044#include "sde_reg_dma.h"
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070045
Alan Kwong1a00e4d2016-07-18 09:42:30 -040046#define CREATE_TRACE_POINTS
47#include "sde_trace.h"
48
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040049static const char * const iommu_ports[] = {
50 "mdp_0",
51};
52
Clarence Ip4ce59322016-06-26 22:27:51 -040053/**
54 * Controls size of event log buffer. Specified as a power of 2.
55 */
56#define SDE_EVTLOG_SIZE 1024
57
58/*
59 * To enable overall DRM driver logging
60 * # echo 0x2 > /sys/module/drm/parameters/debug
61 *
62 * To enable DRM driver h/w logging
Dhaval Patel6c666622017-03-21 23:02:59 -070063 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
Clarence Ip4ce59322016-06-26 22:27:51 -040064 *
65 * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
66 */
67#define SDE_DEBUGFS_DIR "msm_sde"
68#define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
69
Clarence Ipdd395242016-09-09 10:47:17 -040070/**
71 * sdecustom - enable certain driver customizations for sde clients
72 * Enabling this modifies the standard DRM behavior slightly and assumes
73 * that the clients have specific knowledge about the modifications that
74 * are involved, so don't enable this unless you know what you're doing.
75 *
76 * Parts of the driver that are affected by this setting may be located by
77 * searching for invocations of the 'sde_is_custom_client()' function.
78 *
79 * This is disabled by default.
80 */
Clarence Ipb1b3c802016-10-03 16:49:38 -040081static bool sdecustom = true;
Clarence Ipdd395242016-09-09 10:47:17 -040082module_param(sdecustom, bool, 0400);
83MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
84
Clarence Ip17162b52016-11-24 17:06:29 -050085static int sde_kms_hw_init(struct msm_kms *kms);
86static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -070087static int _sde_kms_register_events(struct msm_kms *kms,
88 struct drm_mode_object *obj, u32 event, bool en);
Clarence Ipdd395242016-09-09 10:47:17 -040089bool sde_is_custom_client(void)
90{
91 return sdecustom;
92}
93
Alan Kwongf0fd8512016-10-24 21:39:26 -040094#ifdef CONFIG_DEBUG_FS
95static int _sde_danger_signal_status(struct seq_file *s,
96 bool danger_status)
97{
98 struct sde_kms *kms = (struct sde_kms *)s->private;
99 struct msm_drm_private *priv;
100 struct sde_danger_safe_status status;
101 int i;
102
103 if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
104 SDE_ERROR("invalid arg(s)\n");
105 return 0;
106 }
107
108 priv = kms->dev->dev_private;
109 memset(&status, 0, sizeof(struct sde_danger_safe_status));
110
111 sde_power_resource_enable(&priv->phandle, kms->core_client, true);
112 if (danger_status) {
113 seq_puts(s, "\nDanger signal status:\n");
114 if (kms->hw_mdp->ops.get_danger_status)
115 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
116 &status);
117 } else {
118 seq_puts(s, "\nSafe signal status:\n");
119 if (kms->hw_mdp->ops.get_danger_status)
120 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
121 &status);
122 }
123 sde_power_resource_enable(&priv->phandle, kms->core_client, false);
124
125 seq_printf(s, "MDP : 0x%x\n", status.mdp);
126
127 for (i = SSPP_VIG0; i < SSPP_MAX; i++)
128 seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
129 status.sspp[i]);
130 seq_puts(s, "\n");
131
132 for (i = WB_0; i < WB_MAX; i++)
133 seq_printf(s, "WB%d : 0x%x \t", i - WB_0,
134 status.wb[i]);
135 seq_puts(s, "\n");
136
137 return 0;
138}
139
140#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
141static int __prefix ## _open(struct inode *inode, struct file *file) \
142{ \
143 return single_open(file, __prefix ## _show, inode->i_private); \
144} \
145static const struct file_operations __prefix ## _fops = { \
146 .owner = THIS_MODULE, \
147 .open = __prefix ## _open, \
148 .release = single_release, \
149 .read = seq_read, \
150 .llseek = seq_lseek, \
151}
152
153static int sde_debugfs_danger_stats_show(struct seq_file *s, void *v)
154{
155 return _sde_danger_signal_status(s, true);
156}
157DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_danger_stats);
158
159static int sde_debugfs_safe_stats_show(struct seq_file *s, void *v)
160{
161 return _sde_danger_signal_status(s, false);
162}
163DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_safe_stats);
164
165static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms)
166{
167 debugfs_remove_recursive(sde_kms->debugfs_danger);
168 sde_kms->debugfs_danger = NULL;
169}
170
171static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
172 struct dentry *parent)
173{
174 sde_kms->debugfs_danger = debugfs_create_dir("danger",
175 parent);
176 if (!sde_kms->debugfs_danger) {
177 SDE_ERROR("failed to create danger debugfs\n");
178 return -EINVAL;
179 }
180
Lloyd Atkinson8de415a2017-05-23 11:31:16 -0400181 debugfs_create_file("danger_status", 0600, sde_kms->debugfs_danger,
Alan Kwongf0fd8512016-10-24 21:39:26 -0400182 sde_kms, &sde_debugfs_danger_stats_fops);
Lloyd Atkinson8de415a2017-05-23 11:31:16 -0400183 debugfs_create_file("safe_status", 0600, sde_kms->debugfs_danger,
Alan Kwongf0fd8512016-10-24 21:39:26 -0400184 sde_kms, &sde_debugfs_safe_stats_fops);
185
186 return 0;
187}
188
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400189static int _sde_debugfs_show_regset32(struct seq_file *s, void *data)
Clarence Ip4ce59322016-06-26 22:27:51 -0400190{
Clarence Ipaac9f332016-08-31 15:46:35 -0400191 struct sde_debugfs_regset32 *regset;
192 struct sde_kms *sde_kms;
193 struct drm_device *dev;
194 struct msm_drm_private *priv;
Clarence Ip4ce59322016-06-26 22:27:51 -0400195 void __iomem *base;
Clarence Ipaac9f332016-08-31 15:46:35 -0400196 uint32_t i, addr;
Clarence Ip4ce59322016-06-26 22:27:51 -0400197
Clarence Ipaac9f332016-08-31 15:46:35 -0400198 if (!s || !s->private)
199 return 0;
Clarence Ip4ce59322016-06-26 22:27:51 -0400200
Clarence Ipaac9f332016-08-31 15:46:35 -0400201 regset = s->private;
202
203 sde_kms = regset->sde_kms;
204 if (!sde_kms || !sde_kms->mmio)
205 return 0;
206
207 dev = sde_kms->dev;
208 if (!dev)
209 return 0;
210
211 priv = dev->dev_private;
212 if (!priv)
213 return 0;
214
215 base = sde_kms->mmio + regset->offset;
216
217 /* insert padding spaces, if needed */
218 if (regset->offset & 0xF) {
219 seq_printf(s, "[%x]", regset->offset & ~0xF);
220 for (i = 0; i < (regset->offset & 0xF); i += 4)
221 seq_puts(s, " ");
222 }
223
224 if (sde_power_resource_enable(&priv->phandle,
225 sde_kms->core_client, true)) {
226 seq_puts(s, "failed to enable sde clocks\n");
227 return 0;
228 }
229
230 /* main register output */
231 for (i = 0; i < regset->blk_len; i += 4) {
232 addr = regset->offset + i;
233 if ((addr & 0xF) == 0x0)
234 seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
235 seq_printf(s, " %08x", readl_relaxed(base + i));
236 }
237 seq_puts(s, "\n");
238 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
Clarence Ip4ce59322016-06-26 22:27:51 -0400239
240 return 0;
241}
242
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400243static int sde_debugfs_open_regset32(struct inode *inode,
244 struct file *file)
Clarence Ip4ce59322016-06-26 22:27:51 -0400245{
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400246 return single_open(file, _sde_debugfs_show_regset32, inode->i_private);
Clarence Ip4ce59322016-06-26 22:27:51 -0400247}
248
249static const struct file_operations sde_fops_regset32 = {
250 .open = sde_debugfs_open_regset32,
251 .read = seq_read,
252 .llseek = seq_lseek,
253 .release = single_release,
254};
255
256void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
Clarence Ipaac9f332016-08-31 15:46:35 -0400257 uint32_t offset, uint32_t length, struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400258{
259 if (regset) {
260 regset->offset = offset;
261 regset->blk_len = length;
Clarence Ipaac9f332016-08-31 15:46:35 -0400262 regset->sde_kms = sde_kms;
Clarence Ip4ce59322016-06-26 22:27:51 -0400263 }
264}
265
266void *sde_debugfs_create_regset32(const char *name, umode_t mode,
267 void *parent, struct sde_debugfs_regset32 *regset)
268{
Clarence Ipaac9f332016-08-31 15:46:35 -0400269 if (!name || !regset || !regset->sde_kms || !regset->blk_len)
Clarence Ip4ce59322016-06-26 22:27:51 -0400270 return NULL;
271
272 /* make sure offset is a multiple of 4 */
273 regset->offset = round_down(regset->offset, 4);
274
275 return debugfs_create_file(name, mode, parent,
276 regset, &sde_fops_regset32);
277}
278
279void *sde_debugfs_get_root(struct sde_kms *sde_kms)
280{
Dhaval Patel6c666622017-03-21 23:02:59 -0700281 struct msm_drm_private *priv;
282
283 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
284 return NULL;
285
286 priv = sde_kms->dev->dev_private;
287 return priv->debug_root;
Clarence Ip4ce59322016-06-26 22:27:51 -0400288}
289
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400290static int _sde_debugfs_init(struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400291{
292 void *p;
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700293 int rc;
294 void *debugfs_root;
Clarence Ip4ce59322016-06-26 22:27:51 -0400295
296 p = sde_hw_util_get_log_mask_ptr();
297
298 if (!sde_kms || !p)
299 return -EINVAL;
300
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700301 debugfs_root = sde_debugfs_get_root(sde_kms);
302 if (!debugfs_root)
303 return -EINVAL;
Clarence Ip4ce59322016-06-26 22:27:51 -0400304
305 /* allow debugfs_root to be NULL */
Lloyd Atkinson8de415a2017-05-23 11:31:16 -0400306 debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0600, debugfs_root, p);
Alan Kwongcd1c09f2016-11-04 20:37:30 -0400307
Lloyd Atkinson09e64bf2017-04-13 14:09:59 -0700308 (void) sde_debugfs_danger_init(sde_kms, debugfs_root);
309 (void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
310 (void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
Alan Kwongcd1c09f2016-11-04 20:37:30 -0400311
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700312 rc = sde_core_perf_debugfs_init(&sde_kms->perf, debugfs_root);
313 if (rc) {
314 SDE_ERROR("failed to init perf %d\n", rc);
315 return rc;
316 }
Alan Kwongf0fd8512016-10-24 21:39:26 -0400317
Clarence Ip4ce59322016-06-26 22:27:51 -0400318 return 0;
319}
320
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400321static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400322{
323 /* don't need to NULL check debugfs_root */
324 if (sde_kms) {
Alan Kwong748e833d2016-10-26 12:34:48 -0400325 sde_debugfs_vbif_destroy(sde_kms);
Alan Kwongf0fd8512016-10-24 21:39:26 -0400326 sde_debugfs_danger_destroy(sde_kms);
Lloyd Atkinson09e64bf2017-04-13 14:09:59 -0700327 sde_debugfs_core_irq_destroy(sde_kms);
Clarence Ip4ce59322016-06-26 22:27:51 -0400328 }
329}
Alan Kwongf0fd8512016-10-24 21:39:26 -0400330#else
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700331static int _sde_debugfs_init(struct sde_kms *sde_kms)
332{
333 return 0;
334}
335
336static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
337{
Alan Kwongf0fd8512016-10-24 21:39:26 -0400338}
339#endif
Clarence Ip4ce59322016-06-26 22:27:51 -0400340
Alan Kwongf5dd86c2016-08-09 18:08:17 -0400341static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
342{
343 return sde_crtc_vblank(crtc, true);
344}
345
346static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
347{
348 sde_crtc_vblank(crtc, false);
349}
350
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700351static void sde_kms_wait_for_frame_transfer_complete(struct msm_kms *kms,
Jeykumar Sankarandfaeec92017-06-06 15:21:51 -0700352 struct drm_crtc *crtc)
353{
354 struct drm_encoder *encoder;
355 struct drm_device *dev;
356 int ret;
357
358 if (!kms || !crtc || !crtc->state || !crtc->dev) {
359 SDE_ERROR("invalid params\n");
360 return;
361 }
362
363 if (!crtc->state->enable) {
364 SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
365 return;
366 }
367
368 if (!crtc->state->active) {
369 SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
370 return;
371 }
372
373 dev = crtc->dev;
374
375 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
376 if (encoder->crtc != crtc)
377 continue;
378 /*
379 * Video Mode - Wait for VSYNC
380 * Cmd Mode - Wait for PP_DONE. Will be no-op if transfer is
381 * complete
382 */
383 SDE_EVT32_VERBOSE(DRMID(crtc));
384 ret = sde_encoder_wait_for_event(encoder, MSM_ENC_TX_COMPLETE);
385 if (ret && ret != -EWOULDBLOCK) {
386 SDE_ERROR(
387 "[crtc: %d][enc: %d] wait for commit done returned %d\n",
388 crtc->base.id, encoder->base.id, ret);
389 break;
390 }
391 }
392}
393
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700394static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
395 struct drm_atomic_state *state)
396{
397 struct drm_crtc *crtc;
398 struct drm_crtc_state *old_crtc_state;
399
400 struct drm_plane *plane;
401 struct drm_plane_state *plane_state;
402 struct sde_kms *sde_kms = to_sde_kms(kms);
403 struct drm_device *dev = sde_kms->dev;
404 int i, ops = 0, ret = 0;
405 bool old_valid_fb = false;
406
407 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
408 if (!crtc->state || !crtc->state->active)
409 continue;
410 /*
411 * It is safe to assume only one active crtc,
412 * and compatible translation modes on the
413 * planes staged on this crtc.
414 * otherwise validation would have failed.
415 * For this CRTC,
416 */
417
418 /*
419 * 1. Check if old state on the CRTC has planes
420 * staged with valid fbs
421 */
422 for_each_plane_in_state(state, plane, plane_state, i) {
423 if (!plane_state->crtc)
424 continue;
425 if (plane_state->fb) {
426 old_valid_fb = true;
427 break;
428 }
429 }
430
431 /*
432 * 2.Get the operations needed to be performed before
433 * secure transition can be initiated.
434 */
435 ops = sde_crtc_get_secure_transition_ops(crtc,
436 old_crtc_state,
437 old_valid_fb);
438 if (ops < 0) {
439 SDE_ERROR("invalid secure operations %x\n", ops);
440 return ops;
441 }
442
443 if (!ops)
444 goto no_ops;
445
446 SDE_DEBUG("%d:secure operations(%x) started on state:%pK\n",
447 crtc->base.id,
448 ops,
449 crtc->state);
450
451 /* 3. Perform operations needed for secure transition */
452 if (ops & SDE_KMS_OPS_WAIT_FOR_TX_DONE) {
453 SDE_DEBUG("wait_for_transfer_done\n");
454 sde_kms_wait_for_frame_transfer_complete(kms, crtc);
455 }
456 if (ops & SDE_KMS_OPS_CLEANUP_PLANE_FB) {
457 SDE_DEBUG("cleanup planes\n");
458 drm_atomic_helper_cleanup_planes(dev, state);
459 }
460 if (ops & SDE_KMS_OPS_CRTC_SECURE_STATE_CHANGE) {
461 SDE_DEBUG("secure ctrl\n");
462 sde_crtc_secure_ctrl(crtc, false);
463 }
464 if (ops & SDE_KMS_OPS_PREPARE_PLANE_FB) {
465 SDE_DEBUG("prepare planes %d",
466 crtc->state->plane_mask);
467 drm_atomic_crtc_for_each_plane(plane,
468 crtc) {
469 const struct drm_plane_helper_funcs *funcs;
470
471 plane_state = plane->state;
472 funcs = plane->helper_private;
473
474 SDE_DEBUG("psde:%d FB[%u]\n",
475 plane->base.id,
476 plane->fb->base.id);
477 if (!funcs)
478 continue;
479
480 if (funcs->prepare_fb(plane, plane_state)) {
481 ret = funcs->prepare_fb(plane,
482 plane_state);
483 if (ret)
484 return ret;
485 }
486 }
487 }
488 SDE_DEBUG("secure operations completed\n");
489 }
490
491no_ops:
492 return 0;
493}
494
495static void sde_kms_prepare_commit(struct msm_kms *kms,
496 struct drm_atomic_state *state)
497{
498 struct sde_kms *sde_kms;
499 struct msm_drm_private *priv;
500 struct drm_device *dev;
501 struct drm_encoder *encoder;
502
503 if (!kms)
504 return;
505 sde_kms = to_sde_kms(kms);
506 dev = sde_kms->dev;
507
508 if (!dev || !dev->dev_private)
509 return;
510 priv = dev->dev_private;
511
512 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
513
514 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
515 if (encoder->crtc != NULL)
516 sde_encoder_prepare_commit(encoder);
517
518 /*
519 * NOTE: for secure use cases we want to apply the new HW
520 * configuration only after completing preparation for secure
521 * transitions prepare below if any transtions is required.
522 */
523 sde_kms_prepare_secure_transition(kms, state);
524}
525
526static void sde_kms_commit(struct msm_kms *kms,
527 struct drm_atomic_state *old_state)
528{
529 struct drm_crtc *crtc;
530 struct drm_crtc_state *old_crtc_state;
531 int i;
532
533 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
534 if (crtc->state->active) {
535 SDE_EVT32(DRMID(crtc));
536 sde_crtc_commit_kickoff(crtc);
537 }
538 }
539}
540
541static void sde_kms_complete_commit(struct msm_kms *kms,
542 struct drm_atomic_state *old_state)
543{
544 struct sde_kms *sde_kms;
545 struct msm_drm_private *priv;
546 struct drm_crtc *crtc;
547 struct drm_crtc_state *old_crtc_state;
548 int i;
549
550 if (!kms || !old_state)
551 return;
552 sde_kms = to_sde_kms(kms);
553
554 if (!sde_kms->dev || !sde_kms->dev->dev_private)
555 return;
556 priv = sde_kms->dev->dev_private;
557
558 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
559 sde_crtc_complete_commit(crtc, old_crtc_state);
560
561 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
562
563 SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
564}
565
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400566static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
Abhijit Kulkarni40e38162016-06-26 22:12:09 -0400567 struct drm_crtc *crtc)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400568{
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400569 struct drm_encoder *encoder;
Narendra Muppallaec11a0a2017-06-15 15:35:17 -0700570 struct drm_device *dev;
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400571 int ret;
572
Alan Kwongf34ef982016-09-29 20:53:53 -0400573 if (!kms || !crtc || !crtc->state) {
574 SDE_ERROR("invalid params\n");
575 return;
576 }
577
Narendra Muppallaec11a0a2017-06-15 15:35:17 -0700578 dev = crtc->dev;
579
Alan Kwongf34ef982016-09-29 20:53:53 -0400580 if (!crtc->state->enable) {
581 SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
582 return;
583 }
584
585 if (!crtc->state->active) {
586 SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
587 return;
588 }
589
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400590 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
591 if (encoder->crtc != crtc)
592 continue;
593 /*
Dhaval Patel6c666622017-03-21 23:02:59 -0700594 * Wait for post-flush if necessary to delay before
595 * plane_cleanup. For example, wait for vsync in case of video
596 * mode panels. This may be a no-op for command mode panels.
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400597 */
Dhaval Patel6c666622017-03-21 23:02:59 -0700598 SDE_EVT32_VERBOSE(DRMID(crtc));
Jeykumar Sankarandfaeec92017-06-06 15:21:51 -0700599 ret = sde_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400600 if (ret && ret != -EWOULDBLOCK) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -0400601 SDE_ERROR("wait for commit done returned %d\n", ret);
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400602 break;
603 }
604 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400605}
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400606
Clarence Ip24f80662016-06-13 19:05:32 -0400607static void sde_kms_prepare_fence(struct msm_kms *kms,
Clarence Ip0d0e96d2016-10-24 18:13:13 -0400608 struct drm_atomic_state *old_state)
Clarence Ip24f80662016-06-13 19:05:32 -0400609{
610 struct drm_crtc *crtc;
Clarence Ip0d0e96d2016-10-24 18:13:13 -0400611 struct drm_crtc_state *old_crtc_state;
612 int i, rc;
Clarence Ip24f80662016-06-13 19:05:32 -0400613
Clarence Ip0d0e96d2016-10-24 18:13:13 -0400614 if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
615 SDE_ERROR("invalid argument(s)\n");
616 return;
617 }
618
619retry:
620 /* attempt to acquire ww mutex for connection */
621 rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
622 old_state->acquire_ctx);
623
624 if (rc == -EDEADLK) {
625 drm_modeset_backoff(old_state->acquire_ctx);
626 goto retry;
627 }
628
629 /* old_state actually contains updated crtc pointers */
Veera Sundaram Sankaran675ff622017-06-21 21:44:46 -0700630 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
631 if (crtc->state->active)
632 sde_crtc_prepare_commit(crtc, old_crtc_state);
633 }
Clarence Ip24f80662016-06-13 19:05:32 -0400634}
635
Clarence Ip3649f8b2016-10-31 09:59:44 -0400636/**
637 * _sde_kms_get_displays - query for underlying display handles and cache them
638 * @sde_kms: Pointer to sde kms structure
639 * Returns: Zero on success
640 */
641static int _sde_kms_get_displays(struct sde_kms *sde_kms)
642{
643 int rc = -ENOMEM;
644
645 if (!sde_kms) {
646 SDE_ERROR("invalid sde kms\n");
647 return -EINVAL;
648 }
649
650 /* dsi */
651 sde_kms->dsi_displays = NULL;
652 sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
653 if (sde_kms->dsi_display_count) {
654 sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
655 sizeof(void *),
656 GFP_KERNEL);
657 if (!sde_kms->dsi_displays) {
658 SDE_ERROR("failed to allocate dsi displays\n");
659 goto exit_deinit_dsi;
660 }
661 sde_kms->dsi_display_count =
662 dsi_display_get_active_displays(sde_kms->dsi_displays,
663 sde_kms->dsi_display_count);
664 }
665
666 /* wb */
667 sde_kms->wb_displays = NULL;
668 sde_kms->wb_display_count = sde_wb_get_num_of_displays();
669 if (sde_kms->wb_display_count) {
670 sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
671 sizeof(void *),
672 GFP_KERNEL);
673 if (!sde_kms->wb_displays) {
674 SDE_ERROR("failed to allocate wb displays\n");
675 goto exit_deinit_wb;
676 }
677 sde_kms->wb_display_count =
678 wb_display_get_displays(sde_kms->wb_displays,
679 sde_kms->wb_display_count);
680 }
Padmanabhan Komanduru63758612017-05-23 01:47:18 -0700681
682 /* dp */
683 sde_kms->dp_displays = NULL;
684 sde_kms->dp_display_count = dp_display_get_num_of_displays();
685 if (sde_kms->dp_display_count) {
686 sde_kms->dp_displays = kcalloc(sde_kms->dp_display_count,
687 sizeof(void *), GFP_KERNEL);
688 if (!sde_kms->dp_displays) {
689 SDE_ERROR("failed to allocate dp displays\n");
690 goto exit_deinit_dp;
691 }
692 sde_kms->dp_display_count =
693 dp_display_get_displays(sde_kms->dp_displays,
694 sde_kms->dp_display_count);
695 }
Clarence Ip3649f8b2016-10-31 09:59:44 -0400696 return 0;
697
Padmanabhan Komanduru63758612017-05-23 01:47:18 -0700698exit_deinit_dp:
699 kfree(sde_kms->dp_displays);
700 sde_kms->dp_display_count = 0;
701 sde_kms->dp_displays = NULL;
702
Clarence Ip3649f8b2016-10-31 09:59:44 -0400703exit_deinit_wb:
704 kfree(sde_kms->wb_displays);
705 sde_kms->wb_display_count = 0;
706 sde_kms->wb_displays = NULL;
707
708exit_deinit_dsi:
709 kfree(sde_kms->dsi_displays);
710 sde_kms->dsi_display_count = 0;
711 sde_kms->dsi_displays = NULL;
712 return rc;
713}
714
715/**
716 * _sde_kms_release_displays - release cache of underlying display handles
717 * @sde_kms: Pointer to sde kms structure
718 */
719static void _sde_kms_release_displays(struct sde_kms *sde_kms)
720{
721 if (!sde_kms) {
722 SDE_ERROR("invalid sde kms\n");
723 return;
724 }
725
726 kfree(sde_kms->wb_displays);
727 sde_kms->wb_displays = NULL;
728 sde_kms->wb_display_count = 0;
729
730 kfree(sde_kms->dsi_displays);
731 sde_kms->dsi_displays = NULL;
732 sde_kms->dsi_display_count = 0;
733}
734
735/**
736 * _sde_kms_setup_displays - create encoders, bridges and connectors
737 * for underlying displays
738 * @dev: Pointer to drm device structure
739 * @priv: Pointer to private drm device data
740 * @sde_kms: Pointer to sde kms structure
741 * Returns: Zero on success
742 */
743static int _sde_kms_setup_displays(struct drm_device *dev,
744 struct msm_drm_private *priv,
745 struct sde_kms *sde_kms)
746{
747 static const struct sde_connector_ops dsi_ops = {
748 .post_init = dsi_conn_post_init,
749 .detect = dsi_conn_detect,
750 .get_modes = dsi_connector_get_modes,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -0700751 .put_modes = dsi_connector_put_modes,
Clarence Ip3649f8b2016-10-31 09:59:44 -0400752 .mode_valid = dsi_conn_mode_valid,
753 .get_info = dsi_display_get_info,
Lloyd Atkinson8c49c582016-11-18 14:23:54 -0500754 .set_backlight = dsi_display_set_backlight,
Lloyd Atkinson05d75512017-01-17 14:45:51 -0500755 .soft_reset = dsi_display_soft_reset,
Veera Sundaram Sankaranbb2bf9a2017-03-29 18:56:47 -0700756 .pre_kickoff = dsi_conn_pre_kickoff,
Jeykumar Sankaran2b098072017-03-16 17:25:59 -0700757 .clk_ctrl = dsi_display_clk_ctrl,
Clarence Ipd57b0622017-07-10 11:28:57 -0400758 .set_power = dsi_display_set_power,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -0700759 .get_mode_info = dsi_conn_get_mode_info,
760 .get_dst_format = dsi_display_get_dst_format,
Clarence Ip3649f8b2016-10-31 09:59:44 -0400761 };
762 static const struct sde_connector_ops wb_ops = {
763 .post_init = sde_wb_connector_post_init,
764 .detect = sde_wb_connector_detect,
765 .get_modes = sde_wb_connector_get_modes,
766 .set_property = sde_wb_connector_set_property,
767 .get_info = sde_wb_get_info,
Jeykumar Sankaran2b098072017-03-16 17:25:59 -0700768 .soft_reset = NULL,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -0700769 .get_mode_info = sde_wb_get_mode_info,
Ping Li8430ee12017-02-24 14:14:44 -0800770 .get_dst_format = NULL
Clarence Ip3649f8b2016-10-31 09:59:44 -0400771 };
Padmanabhan Komanduru63758612017-05-23 01:47:18 -0700772 static const struct sde_connector_ops dp_ops = {
773 .post_init = dp_connector_post_init,
774 .detect = dp_connector_detect,
775 .get_modes = dp_connector_get_modes,
776 .mode_valid = dp_connector_mode_valid,
777 .get_info = dp_connector_get_info,
Jeykumar Sankaran446a5f12017-05-09 20:30:39 -0700778 .get_mode_info = dp_connector_get_mode_info,
Padmanabhan Komanduru63758612017-05-23 01:47:18 -0700779 };
Clarence Ip3649f8b2016-10-31 09:59:44 -0400780 struct msm_display_info info;
781 struct drm_encoder *encoder;
782 void *display, *connector;
783 int i, max_encoders;
784 int rc = 0;
785
786 if (!dev || !priv || !sde_kms) {
787 SDE_ERROR("invalid argument(s)\n");
788 return -EINVAL;
789 }
790
Padmanabhan Komanduru63758612017-05-23 01:47:18 -0700791 max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count +
792 sde_kms->dp_display_count;
Clarence Ip3649f8b2016-10-31 09:59:44 -0400793 if (max_encoders > ARRAY_SIZE(priv->encoders)) {
794 max_encoders = ARRAY_SIZE(priv->encoders);
795 SDE_ERROR("capping number of displays to %d", max_encoders);
796 }
797
798 /* dsi */
799 for (i = 0; i < sde_kms->dsi_display_count &&
800 priv->num_encoders < max_encoders; ++i) {
801 display = sde_kms->dsi_displays[i];
802 encoder = NULL;
803
804 memset(&info, 0x0, sizeof(info));
805 rc = dsi_display_get_info(&info, display);
806 if (rc) {
807 SDE_ERROR("dsi get_info %d failed\n", i);
808 continue;
809 }
810
811 encoder = sde_encoder_init(dev, &info);
812 if (IS_ERR_OR_NULL(encoder)) {
813 SDE_ERROR("encoder init failed for dsi %d\n", i);
814 continue;
815 }
816
817 rc = dsi_display_drm_bridge_init(display, encoder);
818 if (rc) {
819 SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
820 sde_encoder_destroy(encoder);
821 continue;
822 }
823
824 connector = sde_connector_init(dev,
825 encoder,
826 0,
827 display,
828 &dsi_ops,
829 DRM_CONNECTOR_POLL_HPD,
830 DRM_MODE_CONNECTOR_DSI);
831 if (connector) {
832 priv->encoders[priv->num_encoders++] = encoder;
833 } else {
834 SDE_ERROR("dsi %d connector init failed\n", i);
835 dsi_display_drm_bridge_deinit(display);
836 sde_encoder_destroy(encoder);
837 }
838 }
839
840 /* wb */
841 for (i = 0; i < sde_kms->wb_display_count &&
842 priv->num_encoders < max_encoders; ++i) {
843 display = sde_kms->wb_displays[i];
844 encoder = NULL;
845
846 memset(&info, 0x0, sizeof(info));
847 rc = sde_wb_get_info(&info, display);
848 if (rc) {
849 SDE_ERROR("wb get_info %d failed\n", i);
850 continue;
851 }
852
853 encoder = sde_encoder_init(dev, &info);
854 if (IS_ERR_OR_NULL(encoder)) {
855 SDE_ERROR("encoder init failed for wb %d\n", i);
856 continue;
857 }
858
859 rc = sde_wb_drm_init(display, encoder);
860 if (rc) {
861 SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
862 sde_encoder_destroy(encoder);
863 continue;
864 }
865
866 connector = sde_connector_init(dev,
867 encoder,
868 0,
869 display,
870 &wb_ops,
871 DRM_CONNECTOR_POLL_HPD,
872 DRM_MODE_CONNECTOR_VIRTUAL);
873 if (connector) {
874 priv->encoders[priv->num_encoders++] = encoder;
875 } else {
876 SDE_ERROR("wb %d connector init failed\n", i);
877 sde_wb_drm_deinit(display);
878 sde_encoder_destroy(encoder);
879 }
880 }
Padmanabhan Komanduru63758612017-05-23 01:47:18 -0700881 /* dp */
882 for (i = 0; i < sde_kms->dp_display_count &&
883 priv->num_encoders < max_encoders; ++i) {
884 display = sde_kms->dp_displays[i];
885 encoder = NULL;
886
887 memset(&info, 0x0, sizeof(info));
888 rc = dp_connector_get_info(&info, display);
889 if (rc) {
890 SDE_ERROR("dp get_info %d failed\n", i);
891 continue;
892 }
893
894 encoder = sde_encoder_init(dev, &info);
895 if (IS_ERR_OR_NULL(encoder)) {
896 SDE_ERROR("dp encoder init failed %d\n", i);
897 continue;
898 }
899
900 rc = dp_drm_bridge_init(display, encoder);
901 if (rc) {
902 SDE_ERROR("dp bridge %d init failed, %d\n", i, rc);
903 sde_encoder_destroy(encoder);
904 continue;
905 }
906
907 connector = sde_connector_init(dev,
908 encoder,
909 NULL,
910 display,
911 &dp_ops,
912 DRM_CONNECTOR_POLL_HPD,
913 DRM_MODE_CONNECTOR_DisplayPort);
914 if (connector) {
915 priv->encoders[priv->num_encoders++] = encoder;
916 } else {
917 SDE_ERROR("dp %d connector init failed\n", i);
918 dp_drm_bridge_deinit(display);
919 sde_encoder_destroy(encoder);
920 }
921 }
Clarence Ip3649f8b2016-10-31 09:59:44 -0400922
923 return 0;
924}
925
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400926static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
927{
928 struct msm_drm_private *priv;
929 int i;
930
931 if (!sde_kms) {
932 SDE_ERROR("invalid sde_kms\n");
933 return;
934 } else if (!sde_kms->dev) {
935 SDE_ERROR("invalid dev\n");
936 return;
937 } else if (!sde_kms->dev->dev_private) {
938 SDE_ERROR("invalid dev_private\n");
939 return;
940 }
941 priv = sde_kms->dev->dev_private;
942
943 for (i = 0; i < priv->num_crtcs; i++)
944 priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
Clarence Ip17162b52016-11-24 17:06:29 -0500945 priv->num_crtcs = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400946
947 for (i = 0; i < priv->num_planes; i++)
948 priv->planes[i]->funcs->destroy(priv->planes[i]);
Clarence Ip17162b52016-11-24 17:06:29 -0500949 priv->num_planes = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400950
951 for (i = 0; i < priv->num_connectors; i++)
952 priv->connectors[i]->funcs->destroy(priv->connectors[i]);
Clarence Ip17162b52016-11-24 17:06:29 -0500953 priv->num_connectors = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400954
955 for (i = 0; i < priv->num_encoders; i++)
956 priv->encoders[i]->funcs->destroy(priv->encoders[i]);
Clarence Ip17162b52016-11-24 17:06:29 -0500957 priv->num_encoders = 0;
958
959 _sde_kms_release_displays(sde_kms);
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400960}
961
962static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700963{
Clarence Ip2bbf7b32016-09-23 15:07:16 -0400964 struct drm_device *dev;
Dhaval Patel44f12472016-08-29 12:19:47 -0700965 struct drm_plane *primary_planes[MAX_PLANES], *plane;
966 struct drm_crtc *crtc;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700967
Clarence Ip2bbf7b32016-09-23 15:07:16 -0400968 struct msm_drm_private *priv;
969 struct sde_mdss_cfg *catalog;
Dhaval Patel44f12472016-08-29 12:19:47 -0700970
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800971 int primary_planes_idx = 0, i, ret;
972 int max_crtc_count;
973
974 u32 sspp_id[MAX_PLANES];
975 u32 master_plane_id[MAX_PLANES];
976 u32 num_virt_planes = 0;
Clarence Ip2bbf7b32016-09-23 15:07:16 -0400977
Clarence Ipdd395242016-09-09 10:47:17 -0400978 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
Clarence Ip2bbf7b32016-09-23 15:07:16 -0400979 SDE_ERROR("invalid sde_kms\n");
980 return -EINVAL;
981 }
982
983 dev = sde_kms->dev;
984 priv = dev->dev_private;
985 catalog = sde_kms->catalog;
986
Abhinav Kumar2316fb92017-01-30 23:07:08 -0800987 ret = sde_core_irq_domain_add(sde_kms);
988 if (ret)
989 goto fail_irq;
Clarence Ip3649f8b2016-10-31 09:59:44 -0400990 /*
991 * Query for underlying display drivers, and create connectors,
992 * bridges and encoders for them.
993 */
994 if (!_sde_kms_get_displays(sde_kms))
995 (void)_sde_kms_setup_displays(dev, priv, sde_kms);
Clarence Ip2bbf7b32016-09-23 15:07:16 -0400996
997 max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700998
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700999 /* Create the planes */
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001000 for (i = 0; i < catalog->sspp_count; i++) {
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001001 bool primary = true;
1002
1003 if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001004 || primary_planes_idx >= max_crtc_count)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001005 primary = false;
1006
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001007 plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001008 (1UL << max_crtc_count) - 1, 0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001009 if (IS_ERR(plane)) {
Clarence Ip2bbf7b32016-09-23 15:07:16 -04001010 SDE_ERROR("sde_plane_init failed\n");
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001011 ret = PTR_ERR(plane);
1012 goto fail;
1013 }
1014 priv->planes[priv->num_planes++] = plane;
1015
1016 if (primary)
1017 primary_planes[primary_planes_idx++] = plane;
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001018
1019 if (sde_hw_sspp_multirect_enabled(&catalog->sspp[i]) &&
1020 sde_is_custom_client()) {
1021 int priority =
1022 catalog->sspp[i].sblk->smart_dma_priority;
1023 sspp_id[priority - 1] = catalog->sspp[i].id;
1024 master_plane_id[priority - 1] = plane->base.id;
1025 num_virt_planes++;
1026 }
1027 }
1028
1029 /* Initialize smart DMA virtual planes */
1030 for (i = 0; i < num_virt_planes; i++) {
1031 plane = sde_plane_init(dev, sspp_id[i], false,
1032 (1UL << max_crtc_count) - 1, master_plane_id[i]);
1033 if (IS_ERR(plane)) {
1034 SDE_ERROR("sde_plane for virtual SSPP init failed\n");
1035 ret = PTR_ERR(plane);
1036 goto fail;
1037 }
1038 priv->planes[priv->num_planes++] = plane;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001039 }
1040
Dhaval Patel44f12472016-08-29 12:19:47 -07001041 max_crtc_count = min(max_crtc_count, primary_planes_idx);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001042
Dhaval Patel44f12472016-08-29 12:19:47 -07001043 /* Create one CRTC per encoder */
1044 for (i = 0; i < max_crtc_count; i++) {
Lloyd Atkinsonac933642016-09-14 11:52:00 -04001045 crtc = sde_crtc_init(dev, primary_planes[i]);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001046 if (IS_ERR(crtc)) {
1047 ret = PTR_ERR(crtc);
1048 goto fail;
1049 }
1050 priv->crtcs[priv->num_crtcs++] = crtc;
1051 }
1052
Clarence Ipdd395242016-09-09 10:47:17 -04001053 if (sde_is_custom_client()) {
1054 /* All CRTCs are compatible with all planes */
1055 for (i = 0; i < priv->num_planes; i++)
1056 priv->planes[i]->possible_crtcs =
1057 (1 << priv->num_crtcs) - 1;
1058 }
1059
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001060 /* All CRTCs are compatible with all encoders */
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001061 for (i = 0; i < priv->num_encoders; i++)
1062 priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
1063
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001064 return 0;
1065fail:
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001066 _sde_kms_drm_obj_destroy(sde_kms);
Abhinav Kumar2316fb92017-01-30 23:07:08 -08001067fail_irq:
1068 sde_core_irq_domain_fini(sde_kms);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001069 return ret;
1070}
1071
Alan Kwong4dd64c82017-02-04 18:41:51 -08001072/**
1073 * struct sde_kms_fbo_fb - framebuffer creation list
1074 * @list: list of framebuffer attached to framebuffer object
1075 * @fb: Pointer to framebuffer attached to framebuffer object
1076 */
1077struct sde_kms_fbo_fb {
1078 struct list_head list;
1079 struct drm_framebuffer *fb;
1080};
1081
1082struct drm_framebuffer *sde_kms_fbo_create_fb(struct drm_device *dev,
1083 struct sde_kms_fbo *fbo)
1084{
1085 struct drm_framebuffer *fb = NULL;
1086 struct sde_kms_fbo_fb *fbo_fb;
1087 struct drm_mode_fb_cmd2 mode_cmd = {0};
1088 u32 base_offset = 0;
1089 int i, ret;
1090
1091 if (!dev) {
1092 SDE_ERROR("invalid drm device node\n");
1093 return NULL;
1094 }
1095
1096 fbo_fb = kzalloc(sizeof(struct sde_kms_fbo_fb), GFP_KERNEL);
1097 if (!fbo_fb)
1098 return NULL;
1099
1100 mode_cmd.pixel_format = fbo->pixel_format;
1101 mode_cmd.width = fbo->width;
1102 mode_cmd.height = fbo->height;
1103 mode_cmd.flags = fbo->flags;
1104
1105 for (i = 0; i < fbo->nplane; i++) {
1106 mode_cmd.offsets[i] = base_offset;
1107 mode_cmd.pitches[i] = fbo->layout.plane_pitch[i];
1108 mode_cmd.modifier[i] = fbo->modifier[i];
1109 base_offset += fbo->layout.plane_size[i];
1110 SDE_DEBUG("offset[%d]:%x\n", i, mode_cmd.offsets[i]);
1111 }
1112
1113 fb = msm_framebuffer_init(dev, &mode_cmd, fbo->bo);
1114 if (IS_ERR(fb)) {
1115 ret = PTR_ERR(fb);
1116 fb = NULL;
1117 SDE_ERROR("failed to allocate fb %d\n", ret);
1118 goto fail;
1119 }
1120
1121 /* need to take one reference for gem object */
1122 for (i = 0; i < fbo->nplane; i++)
1123 drm_gem_object_reference(fbo->bo[i]);
1124
1125 SDE_DEBUG("register private fb:%d\n", fb->base.id);
1126
1127 INIT_LIST_HEAD(&fbo_fb->list);
1128 fbo_fb->fb = fb;
1129 drm_framebuffer_reference(fbo_fb->fb);
1130 list_add_tail(&fbo_fb->list, &fbo->fb_list);
1131
1132 return fb;
1133
1134fail:
1135 kfree(fbo_fb);
1136 return NULL;
1137}
1138
1139static void sde_kms_fbo_destroy(struct sde_kms_fbo *fbo)
1140{
1141 struct msm_drm_private *priv;
1142 struct sde_kms *sde_kms;
1143 struct drm_device *dev;
1144 struct sde_kms_fbo_fb *curr, *next;
1145 int i;
1146
1147 if (!fbo) {
1148 SDE_ERROR("invalid drm device node\n");
1149 return;
1150 }
1151 dev = fbo->dev;
1152
1153 if (!dev || !dev->dev_private) {
1154 SDE_ERROR("invalid drm device node\n");
1155 return;
1156 }
1157 priv = dev->dev_private;
1158
1159 if (!priv->kms) {
1160 SDE_ERROR("invalid kms handle\n");
1161 return;
1162 }
1163 sde_kms = to_sde_kms(priv->kms);
1164
1165 SDE_DEBUG("%dx%d@%c%c%c%c/%llx/%x\n", fbo->width, fbo->height,
1166 fbo->pixel_format >> 0, fbo->pixel_format >> 8,
1167 fbo->pixel_format >> 16, fbo->pixel_format >> 24,
1168 fbo->modifier[0], fbo->flags);
1169
1170 list_for_each_entry_safe(curr, next, &fbo->fb_list, list) {
1171 SDE_DEBUG("unregister private fb:%d\n", curr->fb->base.id);
1172 drm_framebuffer_unregister_private(curr->fb);
1173 drm_framebuffer_unreference(curr->fb);
1174 list_del(&curr->list);
1175 kfree(curr);
1176 }
1177
1178 for (i = 0; i < fbo->layout.num_planes; i++) {
1179 if (fbo->bo[i]) {
1180 mutex_lock(&dev->struct_mutex);
1181 drm_gem_object_unreference(fbo->bo[i]);
1182 mutex_unlock(&dev->struct_mutex);
1183 fbo->bo[i] = NULL;
1184 }
1185 }
1186
1187 if (fbo->dma_buf) {
1188 dma_buf_put(fbo->dma_buf);
1189 fbo->dma_buf = NULL;
1190 }
1191
Alan Kwong54125bb2017-02-26 16:01:36 -08001192 if (sde_kms->iclient && fbo->ihandle) {
1193 ion_free(sde_kms->iclient, fbo->ihandle);
1194 fbo->ihandle = NULL;
Alan Kwong4dd64c82017-02-04 18:41:51 -08001195 }
1196}
1197
Clarence Ipd02440b2017-05-21 18:10:01 -04001198static void sde_kms_set_gem_flags(struct msm_gem_object *msm_obj,
1199 uint32_t flags)
1200{
1201 if (msm_obj)
1202 msm_obj->flags |= flags;
1203}
1204
Alan Kwong4dd64c82017-02-04 18:41:51 -08001205struct sde_kms_fbo *sde_kms_fbo_alloc(struct drm_device *dev, u32 width,
1206 u32 height, u32 pixel_format, u64 modifier[4], u32 flags)
1207{
1208 struct msm_drm_private *priv;
1209 struct sde_kms *sde_kms;
1210 struct sde_kms_fbo *fbo;
1211 int i, ret;
1212
1213 if (!dev || !dev->dev_private) {
1214 SDE_ERROR("invalid drm device node\n");
1215 return NULL;
1216 }
1217 priv = dev->dev_private;
1218
1219 if (!priv->kms) {
1220 SDE_ERROR("invalid kms handle\n");
1221 return NULL;
1222 }
1223 sde_kms = to_sde_kms(priv->kms);
1224
1225 SDE_DEBUG("%dx%d@%c%c%c%c/%llx/%x\n", width, height,
1226 pixel_format >> 0, pixel_format >> 8,
1227 pixel_format >> 16, pixel_format >> 24,
1228 modifier[0], flags);
1229
1230 fbo = kzalloc(sizeof(struct sde_kms_fbo), GFP_KERNEL);
1231 if (!fbo)
1232 return NULL;
1233
1234 atomic_set(&fbo->refcount, 0);
1235 INIT_LIST_HEAD(&fbo->fb_list);
1236 fbo->dev = dev;
1237 fbo->width = width;
1238 fbo->height = height;
1239 fbo->pixel_format = pixel_format;
1240 fbo->flags = flags;
1241 for (i = 0; i < ARRAY_SIZE(fbo->modifier); i++)
1242 fbo->modifier[i] = modifier[i];
1243 fbo->nplane = drm_format_num_planes(fbo->pixel_format);
1244 fbo->fmt = sde_get_sde_format_ext(fbo->pixel_format, fbo->modifier,
1245 fbo->nplane);
1246 if (!fbo->fmt) {
1247 ret = -EINVAL;
1248 SDE_ERROR("failed to find pixel format\n");
1249 goto done;
1250 }
1251
1252 ret = sde_format_get_plane_sizes(fbo->fmt, fbo->width, fbo->height,
Narendra Muppalla58a64e22017-07-24 10:54:47 -07001253 &fbo->layout, fbo->layout.plane_pitch);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001254 if (ret) {
1255 SDE_ERROR("failed to get plane sizes\n");
1256 goto done;
1257 }
1258
1259 /* allocate backing buffer object */
Alan Kwong54125bb2017-02-26 16:01:36 -08001260 if (sde_kms->iclient) {
1261 u32 heap_id = fbo->flags & DRM_MODE_FB_SECURE ?
1262 ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID) :
1263 ION_HEAP(ION_SYSTEM_HEAP_ID);
1264
1265 fbo->ihandle = ion_alloc(sde_kms->iclient,
1266 fbo->layout.total_size, SZ_4K, heap_id, 0);
1267 if (IS_ERR_OR_NULL(fbo->ihandle)) {
1268 SDE_ERROR("failed to alloc ion memory\n");
1269 ret = PTR_ERR(fbo->ihandle);
1270 fbo->ihandle = NULL;
1271 goto done;
1272 }
1273
1274 fbo->dma_buf = ion_share_dma_buf(sde_kms->iclient,
1275 fbo->ihandle);
1276 if (IS_ERR(fbo->dma_buf)) {
1277 SDE_ERROR("failed to share ion memory\n");
1278 ret = -ENOMEM;
1279 fbo->dma_buf = NULL;
1280 goto done;
1281 }
1282
1283 fbo->bo[0] = dev->driver->gem_prime_import(dev,
1284 fbo->dma_buf);
1285 if (IS_ERR(fbo->bo[0])) {
1286 SDE_ERROR("failed to import ion memory\n");
1287 ret = PTR_ERR(fbo->bo[0]);
1288 fbo->bo[0] = NULL;
1289 goto done;
1290 }
Clarence Ipd02440b2017-05-21 18:10:01 -04001291
1292 /* insert extra bo flags */
1293 sde_kms_set_gem_flags(to_msm_bo(fbo->bo[0]), MSM_BO_KEEPATTRS);
Alan Kwong54125bb2017-02-26 16:01:36 -08001294 } else {
1295 mutex_lock(&dev->struct_mutex);
1296 fbo->bo[0] = msm_gem_new(dev, fbo->layout.total_size,
Clarence Ipd02440b2017-05-21 18:10:01 -04001297 MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_KEEPATTRS);
Alan Kwong54125bb2017-02-26 16:01:36 -08001298 if (IS_ERR(fbo->bo[0])) {
1299 mutex_unlock(&dev->struct_mutex);
1300 SDE_ERROR("failed to new gem buffer\n");
1301 ret = PTR_ERR(fbo->bo[0]);
1302 fbo->bo[0] = NULL;
1303 goto done;
1304 }
Alan Kwong4dd64c82017-02-04 18:41:51 -08001305 mutex_unlock(&dev->struct_mutex);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001306 }
1307
Alan Kwong54125bb2017-02-26 16:01:36 -08001308 mutex_lock(&dev->struct_mutex);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001309 for (i = 1; i < fbo->layout.num_planes; i++) {
1310 fbo->bo[i] = fbo->bo[0];
1311 drm_gem_object_reference(fbo->bo[i]);
1312 }
1313 mutex_unlock(&dev->struct_mutex);
1314
1315done:
1316 if (ret) {
1317 sde_kms_fbo_destroy(fbo);
1318 kfree(fbo);
1319 fbo = NULL;
1320 } else {
1321 sde_kms_fbo_reference(fbo);
1322 }
1323
1324 return fbo;
1325}
1326
1327int sde_kms_fbo_reference(struct sde_kms_fbo *fbo)
1328{
1329 if (!fbo) {
1330 SDE_ERROR("invalid parameters\n");
1331 return -EINVAL;
1332 }
1333
1334 SDE_DEBUG("%pS refcount:%d\n", __builtin_return_address(0),
1335 atomic_read(&fbo->refcount));
1336
1337 atomic_inc(&fbo->refcount);
1338
1339 return 0;
1340}
1341
1342void sde_kms_fbo_unreference(struct sde_kms_fbo *fbo)
1343{
1344 if (!fbo) {
1345 SDE_ERROR("invalid parameters\n");
1346 return;
1347 }
1348
1349 SDE_DEBUG("%pS refcount:%d\n", __builtin_return_address(0),
1350 atomic_read(&fbo->refcount));
1351
1352 if (!atomic_read(&fbo->refcount)) {
1353 SDE_ERROR("invalid refcount\n");
1354 return;
1355 } else if (atomic_dec_return(&fbo->refcount) == 0) {
1356 sde_kms_fbo_destroy(fbo);
1357 }
1358}
1359
Alan Kwong5a3ac752016-10-16 01:02:35 -04001360static int sde_kms_postinit(struct msm_kms *kms)
1361{
1362 struct sde_kms *sde_kms = to_sde_kms(kms);
1363 struct drm_device *dev;
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07001364 int rc;
Alan Kwong5a3ac752016-10-16 01:02:35 -04001365
1366 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
1367 SDE_ERROR("invalid sde_kms\n");
1368 return -EINVAL;
1369 }
1370
1371 dev = sde_kms->dev;
1372
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07001373 rc = _sde_debugfs_init(sde_kms);
1374 if (rc)
1375 SDE_ERROR("sde_debugfs init failed: %d\n", rc);
1376
1377 return rc;
Alan Kwong5a3ac752016-10-16 01:02:35 -04001378}
1379
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001380static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001381 struct drm_encoder *encoder)
1382{
1383 return rate;
1384}
1385
Clarence Ip17162b52016-11-24 17:06:29 -05001386static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
1387 struct platform_device *pdev)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001388{
Clarence Ip17162b52016-11-24 17:06:29 -05001389 struct drm_device *dev;
1390 struct msm_drm_private *priv;
Alan Kwong5d324e42016-07-28 22:56:18 -04001391 int i;
1392
Clarence Ip17162b52016-11-24 17:06:29 -05001393 if (!sde_kms || !pdev)
1394 return;
1395
1396 dev = sde_kms->dev;
1397 if (!dev)
1398 return;
1399
1400 priv = dev->dev_private;
1401 if (!priv)
1402 return;
1403
1404 if (sde_kms->hw_intr)
1405 sde_hw_intr_destroy(sde_kms->hw_intr);
1406 sde_kms->hw_intr = NULL;
1407
Clarence Ip7f0de632017-05-31 14:59:14 -04001408 if (sde_kms->power_event)
1409 sde_power_handle_unregister_event(
1410 &priv->phandle, sde_kms->power_event);
1411
Clarence Ip17162b52016-11-24 17:06:29 -05001412 _sde_kms_release_displays(sde_kms);
1413
1414 /* safe to call these more than once during shutdown */
1415 _sde_debugfs_destroy(sde_kms);
1416 _sde_kms_mmu_destroy(sde_kms);
1417
Alan Kwong54125bb2017-02-26 16:01:36 -08001418 if (sde_kms->iclient) {
1419 ion_client_destroy(sde_kms->iclient);
1420 sde_kms->iclient = NULL;
1421 }
1422
Lloyd Atkinson79f08802017-01-09 17:37:18 -05001423 if (sde_kms->catalog) {
1424 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
1425 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
Alan Kwong5d324e42016-07-28 22:56:18 -04001426
Lloyd Atkinson79f08802017-01-09 17:37:18 -05001427 if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
1428 sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
1429 }
Alan Kwong5d324e42016-07-28 22:56:18 -04001430 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001431
Clarence Ip17162b52016-11-24 17:06:29 -05001432 if (sde_kms->rm_init)
1433 sde_rm_destroy(&sde_kms->rm);
1434 sde_kms->rm_init = false;
1435
1436 if (sde_kms->catalog)
1437 sde_hw_catalog_deinit(sde_kms->catalog);
1438 sde_kms->catalog = NULL;
1439
1440 if (sde_kms->core_client)
1441 sde_power_client_destroy(&priv->phandle, sde_kms->core_client);
1442 sde_kms->core_client = NULL;
1443
1444 if (sde_kms->vbif[VBIF_NRT])
1445 msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
1446 sde_kms->vbif[VBIF_NRT] = NULL;
1447
1448 if (sde_kms->vbif[VBIF_RT])
1449 msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
1450 sde_kms->vbif[VBIF_RT] = NULL;
1451
1452 if (sde_kms->mmio)
1453 msm_iounmap(pdev, sde_kms->mmio);
1454 sde_kms->mmio = NULL;
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -07001455
1456 sde_reg_dma_deinit();
Clarence Ip17162b52016-11-24 17:06:29 -05001457}
1458
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07001459int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only)
1460{
1461 int i;
1462
1463 if (!sde_kms)
1464 return -EINVAL;
1465
1466 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
1467 struct msm_mmu *mmu;
1468 struct msm_gem_address_space *aspace = sde_kms->aspace[i];
1469
1470 if (!aspace)
1471 continue;
1472
1473 mmu = sde_kms->aspace[i]->mmu;
1474
1475 if (secure_only &&
1476 !aspace->mmu->funcs->is_domain_secure(mmu))
1477 continue;
1478
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07001479 /* cleanup aspace before detaching */
1480 msm_gem_aspace_domain_attach_detach_update(aspace, true);
1481
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07001482 SDE_DEBUG("Detaching domain:%d\n", i);
1483 aspace->mmu->funcs->detach(mmu, (const char **)iommu_ports,
1484 ARRAY_SIZE(iommu_ports));
1485
1486 aspace->domain_attached = false;
1487 }
1488
1489 return 0;
1490}
1491
1492int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only)
1493{
1494 int i;
1495
1496 if (!sde_kms)
1497 return -EINVAL;
1498
1499 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
1500 struct msm_mmu *mmu;
1501 struct msm_gem_address_space *aspace = sde_kms->aspace[i];
1502
1503 if (!aspace)
1504 continue;
1505
1506 mmu = sde_kms->aspace[i]->mmu;
1507
1508 if (secure_only &&
1509 !aspace->mmu->funcs->is_domain_secure(mmu))
1510 continue;
1511
1512 SDE_DEBUG("Attaching domain:%d\n", i);
1513 aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
1514 ARRAY_SIZE(iommu_ports));
1515
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07001516 msm_gem_aspace_domain_attach_detach_update(aspace, false);
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07001517 aspace->domain_attached = true;
1518 }
1519
1520 return 0;
1521}
1522
Clarence Ip17162b52016-11-24 17:06:29 -05001523static void sde_kms_destroy(struct msm_kms *kms)
1524{
1525 struct sde_kms *sde_kms;
1526 struct drm_device *dev;
1527
1528 if (!kms) {
1529 SDE_ERROR("invalid kms\n");
1530 return;
1531 }
1532
1533 sde_kms = to_sde_kms(kms);
1534 dev = sde_kms->dev;
1535 if (!dev) {
1536 SDE_ERROR("invalid device\n");
1537 return;
1538 }
1539
1540 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001541 kfree(sde_kms);
1542}
1543
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04001544static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
1545{
1546 struct sde_kms *sde_kms = to_sde_kms(kms);
1547 struct drm_device *dev = sde_kms->dev;
1548 struct msm_drm_private *priv = dev->dev_private;
1549 unsigned int i;
1550
1551 for (i = 0; i < priv->num_crtcs; i++)
1552 sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
1553}
1554
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001555static int sde_kms_check_secure_transition(struct msm_kms *kms,
1556 struct drm_atomic_state *state)
1557{
1558 struct sde_kms *sde_kms;
1559 struct drm_device *dev;
1560 struct drm_crtc *crtc;
1561 struct drm_crtc *sec_crtc = NULL, *temp_crtc = NULL;
1562 struct drm_crtc_state *crtc_state;
1563 int secure_crtc_cnt = 0, active_crtc_cnt = 0;
1564 int secure_global_crtc_cnt = 0, active_mode_crtc_cnt = 0;
1565 int i;
1566
1567 if (!kms || !state) {
1568 return -EINVAL;
1569 SDE_ERROR("invalid arguments\n");
1570 }
1571
1572 /* iterate state object for active and secure crtc */
1573 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1574 if (!crtc_state->active)
1575 continue;
1576 active_crtc_cnt++;
1577 if (sde_crtc_get_secure_level(crtc, crtc_state) ==
1578 SDE_DRM_SEC_ONLY) {
1579 sec_crtc = crtc;
1580 secure_crtc_cnt++;
1581 }
1582 }
1583
1584 /* bail out from further validation if no secure ctrc */
1585 if (!secure_crtc_cnt)
1586 return 0;
1587
1588 if ((secure_crtc_cnt > MAX_ALLOWED_SECURE_CLIENT_CNT) ||
1589 (secure_crtc_cnt &&
1590 (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE))) {
1591 SDE_ERROR("Secure check failed active:%d, secure:%d\n",
1592 active_crtc_cnt, secure_crtc_cnt);
1593 return -EPERM;
1594 }
1595
1596 sde_kms = to_sde_kms(kms);
1597 dev = sde_kms->dev;
1598 /* iterate global list for active and secure crtc */
1599 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1600
1601 if (!crtc->state->active)
1602 continue;
1603
1604 active_mode_crtc_cnt++;
1605
1606 if (sde_crtc_get_secure_level(crtc, crtc->state) ==
1607 SDE_DRM_SEC_ONLY) {
1608 secure_global_crtc_cnt++;
1609 temp_crtc = crtc;
1610 }
1611 }
1612
1613 /**
1614 * if more than one crtc is active fail
1615 * check if the previous and current commit secure
1616 * are same
1617 */
1618 if (secure_crtc_cnt && ((active_mode_crtc_cnt > 1) ||
1619 (secure_global_crtc_cnt && (temp_crtc != sec_crtc))))
1620 SDE_ERROR("Secure check failed active:%d crtc_id:%d\n",
1621 active_mode_crtc_cnt, temp_crtc->base.id);
1622
1623 return 0;
1624}
1625
1626static int sde_kms_atomic_check(struct msm_kms *kms,
1627 struct drm_atomic_state *state)
1628{
1629 struct sde_kms *sde_kms;
1630 struct drm_device *dev;
1631 int ret;
1632
1633 if (!kms || !state)
1634 return -EINVAL;
1635
1636 sde_kms = to_sde_kms(kms);
1637 dev = sde_kms->dev;
1638
Clarence Ipd86f6e42017-08-08 18:31:00 -04001639 if (sde_kms_is_suspend_blocked(dev)) {
1640 SDE_DEBUG("suspended, skip atomic_check\n");
1641 return -EBUSY;
1642 }
1643
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001644 ret = drm_atomic_helper_check(dev, state);
1645 if (ret)
1646 return ret;
1647 /*
1648 * Check if any secure transition(moving CRTC between secure and
1649 * non-secure state and vice-versa) is allowed or not. when moving
1650 * to secure state, planes with fb_mode set to dir_translated only can
1651 * be staged on the CRTC, and only one CRTC can be active during
1652 * Secure state
1653 */
1654 return sde_kms_check_secure_transition(kms, state);
1655}
1656
Jordan Croused8e96522017-02-13 10:14:16 -07001657static struct msm_gem_address_space*
1658_sde_kms_get_address_space(struct msm_kms *kms,
1659 unsigned int domain)
1660{
1661 struct sde_kms *sde_kms;
1662
1663 if (!kms) {
1664 SDE_ERROR("invalid kms\n");
1665 return NULL;
1666 }
1667
1668 sde_kms = to_sde_kms(kms);
1669 if (!sde_kms) {
1670 SDE_ERROR("invalid sde_kms\n");
1671 return NULL;
1672 }
1673
1674 if (domain >= MSM_SMMU_DOMAIN_MAX)
1675 return NULL;
1676
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07001677 return (sde_kms->aspace[domain] &&
1678 sde_kms->aspace[domain]->domain_attached) ?
1679 sde_kms->aspace[domain] : NULL;
Jordan Croused8e96522017-02-13 10:14:16 -07001680}
1681
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07001682static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
1683{
1684 struct drm_device *dev = NULL;
1685 struct sde_kms *sde_kms = NULL;
1686
1687 if (!kms) {
1688 SDE_ERROR("invalid kms\n");
1689 return;
1690 }
1691
1692 sde_kms = to_sde_kms(kms);
1693 dev = sde_kms->dev;
1694
1695 if (!dev) {
1696 SDE_ERROR("invalid device\n");
1697 return;
1698 }
1699
1700 if (dev->mode_config.funcs->output_poll_changed)
1701 dev->mode_config.funcs->output_poll_changed(dev);
1702}
1703
Clarence Ipd86f6e42017-08-08 18:31:00 -04001704static int sde_kms_pm_suspend(struct device *dev)
1705{
1706 struct drm_device *ddev;
1707 struct drm_modeset_acquire_ctx ctx;
1708 struct drm_connector *conn;
1709 struct drm_atomic_state *state;
1710 struct sde_kms *sde_kms;
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04001711 int ret = 0, num_crtcs = 0;
Clarence Ipd86f6e42017-08-08 18:31:00 -04001712
1713 if (!dev)
1714 return -EINVAL;
1715
1716 ddev = dev_get_drvdata(dev);
1717 if (!ddev || !ddev_to_msm_kms(ddev))
1718 return -EINVAL;
1719
1720 sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
1721 SDE_EVT32(0);
1722
1723 /* disable hot-plug polling */
1724 drm_kms_helper_poll_disable(ddev);
1725
1726 /* acquire modeset lock(s) */
1727 drm_modeset_acquire_init(&ctx, 0);
1728
1729retry:
1730 ret = drm_modeset_lock_all_ctx(ddev, &ctx);
1731 if (ret)
1732 goto unlock;
1733
1734 /* save current state for resume */
1735 if (sde_kms->suspend_state)
1736 drm_atomic_state_free(sde_kms->suspend_state);
1737 sde_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
1738 if (IS_ERR_OR_NULL(sde_kms->suspend_state)) {
1739 DRM_ERROR("failed to back up suspend state\n");
1740 sde_kms->suspend_state = NULL;
1741 goto unlock;
1742 }
1743
1744 /* create atomic state to disable all CRTCs */
1745 state = drm_atomic_state_alloc(ddev);
1746 if (IS_ERR_OR_NULL(state)) {
1747 DRM_ERROR("failed to allocate crtc disable state\n");
1748 goto unlock;
1749 }
1750
1751 state->acquire_ctx = &ctx;
1752 drm_for_each_connector(conn, ddev) {
1753 struct drm_crtc_state *crtc_state;
1754 uint64_t lp;
1755
1756 if (!conn->state || !conn->state->crtc ||
1757 conn->dpms != DRM_MODE_DPMS_ON)
1758 continue;
1759
1760 lp = sde_connector_get_lp(conn);
1761 if (lp == SDE_MODE_DPMS_LP1) {
1762 /* transition LP1->LP2 on pm suspend */
1763 ret = sde_connector_set_property_for_commit(conn, state,
1764 CONNECTOR_PROP_LP, SDE_MODE_DPMS_LP2);
1765 if (ret) {
1766 DRM_ERROR("failed to set lp2 for conn %d\n",
1767 conn->base.id);
1768 drm_atomic_state_free(state);
1769 goto unlock;
1770 }
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04001771 }
1772
1773 if (lp != SDE_MODE_DPMS_LP2) {
Clarence Ipd86f6e42017-08-08 18:31:00 -04001774 /* force CRTC to be inactive */
1775 crtc_state = drm_atomic_get_crtc_state(state,
1776 conn->state->crtc);
1777 if (IS_ERR_OR_NULL(crtc_state)) {
1778 DRM_ERROR("failed to get crtc %d state\n",
1779 conn->state->crtc->base.id);
1780 drm_atomic_state_free(state);
1781 goto unlock;
1782 }
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04001783
1784 if (lp != SDE_MODE_DPMS_LP1)
1785 crtc_state->active = false;
1786 ++num_crtcs;
Clarence Ipd86f6e42017-08-08 18:31:00 -04001787 }
1788 }
1789
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04001790 /* check for nothing to do */
1791 if (num_crtcs == 0) {
1792 DRM_DEBUG("all crtcs are already in the off state\n");
1793 drm_atomic_state_free(state);
1794 goto suspended;
1795 }
1796
Clarence Ipd86f6e42017-08-08 18:31:00 -04001797 /* commit the "disable all" state */
1798 ret = drm_atomic_commit(state);
1799 if (ret < 0) {
1800 DRM_ERROR("failed to disable crtcs, %d\n", ret);
1801 drm_atomic_state_free(state);
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04001802 goto unlock;
Clarence Ipd86f6e42017-08-08 18:31:00 -04001803 }
1804
Lloyd Atkinson84875dd2017-08-24 15:34:28 -04001805suspended:
1806 sde_kms->suspend_block = true;
1807
Clarence Ipd86f6e42017-08-08 18:31:00 -04001808unlock:
1809 if (ret == -EDEADLK) {
1810 drm_modeset_backoff(&ctx);
1811 goto retry;
1812 }
1813 drm_modeset_drop_locks(&ctx);
1814 drm_modeset_acquire_fini(&ctx);
1815
1816 return 0;
1817}
1818
1819static int sde_kms_pm_resume(struct device *dev)
1820{
1821 struct drm_device *ddev;
1822 struct sde_kms *sde_kms;
1823 int ret;
1824
1825 if (!dev)
1826 return -EINVAL;
1827
1828 ddev = dev_get_drvdata(dev);
1829 if (!ddev || !ddev_to_msm_kms(ddev))
1830 return -EINVAL;
1831
1832 sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
1833
1834 SDE_EVT32(sde_kms->suspend_state != NULL);
1835
1836 drm_mode_config_reset(ddev);
1837
1838 drm_modeset_lock_all(ddev);
1839
1840 sde_kms->suspend_block = false;
1841
1842 if (sde_kms->suspend_state) {
1843 sde_kms->suspend_state->acquire_ctx =
1844 ddev->mode_config.acquire_ctx;
1845 ret = drm_atomic_commit(sde_kms->suspend_state);
1846 if (ret < 0) {
1847 DRM_ERROR("failed to restore state, %d\n", ret);
1848 drm_atomic_state_free(sde_kms->suspend_state);
1849 }
1850 sde_kms->suspend_state = NULL;
1851 }
1852 drm_modeset_unlock_all(ddev);
1853
1854 /* enable hot-plug polling */
1855 drm_kms_helper_poll_enable(ddev);
1856
1857 return 0;
1858}
1859
Ben Chan78647cd2016-06-26 22:02:47 -04001860static const struct msm_kms_funcs kms_funcs = {
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001861 .hw_init = sde_kms_hw_init,
Alan Kwong5a3ac752016-10-16 01:02:35 -04001862 .postinit = sde_kms_postinit,
Ben Chan78647cd2016-06-26 22:02:47 -04001863 .irq_preinstall = sde_irq_preinstall,
1864 .irq_postinstall = sde_irq_postinstall,
1865 .irq_uninstall = sde_irq_uninstall,
1866 .irq = sde_irq,
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04001867 .preclose = sde_kms_preclose,
Clarence Ip24f80662016-06-13 19:05:32 -04001868 .prepare_fence = sde_kms_prepare_fence,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001869 .prepare_commit = sde_kms_prepare_commit,
1870 .commit = sde_kms_commit,
1871 .complete_commit = sde_kms_complete_commit,
1872 .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -07001873 .wait_for_tx_complete = sde_kms_wait_for_frame_transfer_complete,
Alan Kwongf5dd86c2016-08-09 18:08:17 -04001874 .enable_vblank = sde_kms_enable_vblank,
1875 .disable_vblank = sde_kms_disable_vblank,
Lloyd Atkinsonfa2489c2016-05-25 15:16:03 -04001876 .check_modified_format = sde_format_check_modified_format,
Abhijit Kulkarni7444a7d2017-06-21 18:53:36 -07001877 .atomic_check = sde_kms_atomic_check,
Clarence Ip4ce59322016-06-26 22:27:51 -04001878 .get_format = sde_get_msm_format,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001879 .round_pixclk = sde_kms_round_pixclk,
Clarence Ipd86f6e42017-08-08 18:31:00 -04001880 .pm_suspend = sde_kms_pm_suspend,
1881 .pm_resume = sde_kms_pm_resume,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001882 .destroy = sde_kms_destroy,
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07001883 .register_events = _sde_kms_register_events,
Jordan Croused8e96522017-02-13 10:14:16 -07001884 .get_address_space = _sde_kms_get_address_space,
Tatenda Chipeperekwac72985b2017-07-31 16:27:56 -07001885 .postopen = _sde_kms_post_open,
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001886};
1887
Dhaval Patel3949f032016-06-20 16:24:33 -07001888/* the caller api needs to turn on clock before calling it */
Clarence Ip17162b52016-11-24 17:06:29 -05001889static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001890{
Dhaval Patel88739332017-04-11 11:08:04 -07001891 sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001892}
1893
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001894static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
1895{
1896 struct msm_mmu *mmu;
1897 int i;
1898
Jordan Croused8e96522017-02-13 10:14:16 -07001899 for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
1900 if (!sde_kms->aspace[i])
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001901 continue;
1902
Jordan Croused8e96522017-02-13 10:14:16 -07001903 mmu = sde_kms->aspace[i]->mmu;
1904
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001905 mmu->funcs->detach(mmu, (const char **)iommu_ports,
1906 ARRAY_SIZE(iommu_ports));
Jordan Crouse12bf3622017-02-13 10:14:11 -07001907 msm_gem_address_space_destroy(sde_kms->aspace[i]);
1908
Jordan Croused8e96522017-02-13 10:14:16 -07001909 sde_kms->aspace[i] = NULL;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001910 }
1911
1912 return 0;
1913}
1914
1915static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001916{
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001917 struct msm_mmu *mmu;
1918 int i, ret;
1919
Alan Kwong112a84f2016-05-24 20:49:21 -04001920 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
Jordan Crouse12bf3622017-02-13 10:14:11 -07001921 struct msm_gem_address_space *aspace;
1922
Alan Kwong112a84f2016-05-24 20:49:21 -04001923 mmu = msm_smmu_new(sde_kms->dev->dev, i);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001924 if (IS_ERR(mmu)) {
1925 ret = PTR_ERR(mmu);
Dhaval Patel5473cd22017-03-19 21:38:08 -07001926 SDE_DEBUG("failed to init iommu id %d: rc:%d\n",
1927 i, ret);
Dhaval Patel5200c602017-01-17 15:53:37 -08001928 continue;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001929 }
1930
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -07001931 aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
Jordan Crouse12bf3622017-02-13 10:14:11 -07001932 mmu, "sde");
1933 if (IS_ERR(aspace)) {
1934 ret = PTR_ERR(aspace);
1935 mmu->funcs->destroy(mmu);
1936 goto fail;
1937 }
1938
1939 sde_kms->aspace[i] = aspace;
1940
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001941 ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
1942 ARRAY_SIZE(iommu_ports));
1943 if (ret) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001944 SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
Jordan Crouse12bf3622017-02-13 10:14:11 -07001945 msm_gem_address_space_destroy(aspace);
1946 goto fail;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001947 }
Abhijit Kulkarni329a94d2017-06-20 17:07:08 -07001948 aspace->domain_attached = true;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001949 }
1950
1951 return 0;
1952fail:
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001953 _sde_kms_mmu_destroy(sde_kms);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001954
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001955 return ret;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001956}
1957
Clarence Ip7f0de632017-05-31 14:59:14 -04001958static void sde_kms_handle_power_event(u32 event_type, void *usr)
1959{
1960 struct sde_kms *sde_kms = usr;
1961
1962 if (!sde_kms)
1963 return;
1964
1965 if (event_type == SDE_POWER_EVENT_POST_ENABLE)
1966 sde_vbif_init_memtypes(sde_kms);
1967}
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04001968
Clarence Ip17162b52016-11-24 17:06:29 -05001969static int sde_kms_hw_init(struct msm_kms *kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001970{
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001971 struct sde_kms *sde_kms;
Clarence Ip17162b52016-11-24 17:06:29 -05001972 struct drm_device *dev;
Dhaval Patel3949f032016-06-20 16:24:33 -07001973 struct msm_drm_private *priv;
Clarence Ip17162b52016-11-24 17:06:29 -05001974 int i, rc = -EINVAL;
Dhaval Patel3949f032016-06-20 16:24:33 -07001975
Clarence Ip17162b52016-11-24 17:06:29 -05001976 if (!kms) {
1977 SDE_ERROR("invalid kms\n");
1978 goto end;
1979 }
1980
1981 sde_kms = to_sde_kms(kms);
1982 dev = sde_kms->dev;
1983 if (!dev || !dev->platformdev) {
1984 SDE_ERROR("invalid device\n");
Dhaval Patel3949f032016-06-20 16:24:33 -07001985 goto end;
1986 }
1987
1988 priv = dev->dev_private;
Clarence Ip17162b52016-11-24 17:06:29 -05001989 if (!priv) {
1990 SDE_ERROR("invalid private data\n");
Dhaval Patel3949f032016-06-20 16:24:33 -07001991 goto end;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001992 }
1993
Dhaval Patela2430842017-06-15 14:32:36 -07001994 sde_kms->mmio = msm_ioremap(dev->platformdev, "mdp_phys", "mdp_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05001995 if (IS_ERR(sde_kms->mmio)) {
1996 rc = PTR_ERR(sde_kms->mmio);
1997 SDE_ERROR("mdp register memory map failed: %d\n", rc);
1998 sde_kms->mmio = NULL;
1999 goto error;
2000 }
2001 DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio);
Dhaval Patela2430842017-06-15 14:32:36 -07002002 sde_kms->mmio_len = msm_iomap_size(dev->platformdev, "mdp_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05002003
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04002004 rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
2005 sde_kms->mmio_len);
2006 if (rc)
2007 SDE_ERROR("dbg base register kms failed: %d\n", rc);
2008
Dhaval Patela2430842017-06-15 14:32:36 -07002009 sde_kms->vbif[VBIF_RT] = msm_ioremap(dev->platformdev, "vbif_phys",
2010 "vbif_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05002011 if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
2012 rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
2013 SDE_ERROR("vbif register memory map failed: %d\n", rc);
2014 sde_kms->vbif[VBIF_RT] = NULL;
2015 goto error;
2016 }
Dhaval Patela2430842017-06-15 14:32:36 -07002017 sde_kms->vbif_len[VBIF_RT] = msm_iomap_size(dev->platformdev,
2018 "vbif_phys");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04002019 rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
2020 sde_kms->vbif_len[VBIF_RT]);
2021 if (rc)
2022 SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
2023
Dhaval Patela2430842017-06-15 14:32:36 -07002024 sde_kms->vbif[VBIF_NRT] = msm_ioremap(dev->platformdev, "vbif_nrt_phys",
2025 "vbif_nrt_phys");
Clarence Ip17162b52016-11-24 17:06:29 -05002026 if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
2027 sde_kms->vbif[VBIF_NRT] = NULL;
2028 SDE_DEBUG("VBIF NRT is not defined");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04002029 } else {
Dhaval Patela2430842017-06-15 14:32:36 -07002030 sde_kms->vbif_len[VBIF_NRT] = msm_iomap_size(dev->platformdev,
2031 "vbif_nrt_phys");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04002032 rc = sde_dbg_reg_register_base("vbif_nrt",
2033 sde_kms->vbif[VBIF_NRT],
2034 sde_kms->vbif_len[VBIF_NRT]);
2035 if (rc)
2036 SDE_ERROR("dbg base register vbif_nrt failed: %d\n",
2037 rc);
Clarence Ip17162b52016-11-24 17:06:29 -05002038 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002039
Dhaval Patela2430842017-06-15 14:32:36 -07002040 sde_kms->reg_dma = msm_ioremap(dev->platformdev, "regdma_phys",
2041 "regdma_phys");
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08002042 if (IS_ERR(sde_kms->reg_dma)) {
2043 sde_kms->reg_dma = NULL;
2044 SDE_DEBUG("REG_DMA is not defined");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04002045 } else {
Dhaval Patela2430842017-06-15 14:32:36 -07002046 sde_kms->reg_dma_len = msm_iomap_size(dev->platformdev,
2047 "regdma_phys");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04002048 rc = sde_dbg_reg_register_base("vbif_nrt",
2049 sde_kms->reg_dma,
2050 sde_kms->reg_dma_len);
2051 if (rc)
2052 SDE_ERROR("dbg base register reg_dma failed: %d\n",
2053 rc);
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08002054 }
2055
Dhaval Patel3949f032016-06-20 16:24:33 -07002056 sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
2057 if (IS_ERR_OR_NULL(sde_kms->core_client)) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002058 rc = PTR_ERR(sde_kms->core_client);
Dhaval Patel5398f602017-03-25 18:25:18 -07002059 if (!sde_kms->core_client)
2060 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002061 SDE_ERROR("sde power client create failed: %d\n", rc);
2062 sde_kms->core_client = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05002063 goto error;
Dhaval Patel3949f032016-06-20 16:24:33 -07002064 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002065
Dhaval Patel3949f032016-06-20 16:24:33 -07002066 rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
2067 true);
2068 if (rc) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002069 SDE_ERROR("resource enable failed: %d\n", rc);
Clarence Ip17162b52016-11-24 17:06:29 -05002070 goto error;
Dhaval Patel3949f032016-06-20 16:24:33 -07002071 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002072
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002073 _sde_kms_core_hw_rev_init(sde_kms);
Lloyd Atkinson11f34442016-08-11 11:19:52 -04002074
Dhaval Patelb271b842016-10-19 21:41:22 -07002075 pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
2076
Dhaval Patel8bf7ff32016-07-20 18:13:24 -07002077 sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
Dhaval Patel3949f032016-06-20 16:24:33 -07002078 if (IS_ERR_OR_NULL(sde_kms->catalog)) {
Dhaval Patel3949f032016-06-20 16:24:33 -07002079 rc = PTR_ERR(sde_kms->catalog);
Dhaval Patel5398f602017-03-25 18:25:18 -07002080 if (!sde_kms->catalog)
2081 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002082 SDE_ERROR("catalog init failed: %d\n", rc);
2083 sde_kms->catalog = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05002084 goto power_error;
Dhaval Patel3949f032016-06-20 16:24:33 -07002085 }
2086
Lloyd Atkinson274cc462017-02-21 11:52:06 -05002087 sde_dbg_init_dbg_buses(sde_kms->core_rev);
2088
Gopikrishnaiah Anandane69dc592017-03-29 14:00:55 -07002089 /*
2090 * Now we need to read the HW catalog and initialize resources such as
2091 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
2092 */
2093 rc = _sde_kms_mmu_init(sde_kms);
2094 if (rc) {
2095 SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
2096 goto power_error;
2097 }
2098
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08002099 /* Initialize reg dma block which is a singleton */
2100 rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
2101 sde_kms->dev);
2102 if (rc) {
2103 SDE_ERROR("failed: reg dma init failed\n");
2104 goto power_error;
2105 }
2106
Dhaval Patel3949f032016-06-20 16:24:33 -07002107 rc = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio,
Lloyd Atkinson11f34442016-08-11 11:19:52 -04002108 sde_kms->dev);
Clarence Ip17162b52016-11-24 17:06:29 -05002109 if (rc) {
2110 SDE_ERROR("rm init failed: %d\n", rc);
2111 goto power_error;
2112 }
2113
2114 sde_kms->rm_init = true;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002115
2116 sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
2117 if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
2118 rc = PTR_ERR(sde_kms->hw_mdp);
Dhaval Patel5398f602017-03-25 18:25:18 -07002119 if (!sde_kms->hw_mdp)
2120 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002121 SDE_ERROR("failed to get hw_mdp: %d\n", rc);
2122 sde_kms->hw_mdp = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05002123 goto power_error;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002124 }
Dhaval Patel3949f032016-06-20 16:24:33 -07002125
Alan Kwong5d324e42016-07-28 22:56:18 -04002126 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
2127 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
2128
2129 sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
2130 sde_kms->vbif[vbif_idx], sde_kms->catalog);
2131 if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002132 rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
Dhaval Patel5398f602017-03-25 18:25:18 -07002133 if (!sde_kms->hw_vbif[vbif_idx])
2134 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002135 SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
Alan Kwong5d324e42016-07-28 22:56:18 -04002136 sde_kms->hw_vbif[vbif_idx] = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05002137 goto power_error;
Alan Kwong5d324e42016-07-28 22:56:18 -04002138 }
2139 }
2140
Alan Kwong54125bb2017-02-26 16:01:36 -08002141 sde_kms->iclient = msm_ion_client_create(dev->unique);
2142 if (IS_ERR(sde_kms->iclient)) {
2143 rc = PTR_ERR(sde_kms->iclient);
2144 SDE_DEBUG("msm_ion_client not available: %d\n", rc);
2145 sde_kms->iclient = NULL;
2146 }
2147
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002148
Alan Kwong67a3f792016-11-01 23:16:53 -04002149 rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
Dhaval Patel446446e2017-04-21 19:38:17 -07002150 &priv->phandle, priv->pclient, "core_clk");
Alan Kwong67a3f792016-11-01 23:16:53 -04002151 if (rc) {
2152 SDE_ERROR("failed to init perf %d\n", rc);
2153 goto perf_err;
2154 }
2155
Abhinav Kumar2316fb92017-01-30 23:07:08 -08002156 sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
2157 if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
2158 rc = PTR_ERR(sde_kms->hw_intr);
2159 SDE_ERROR("hw_intr init failed: %d\n", rc);
2160 sde_kms->hw_intr = NULL;
2161 goto hw_intr_init_err;
2162 }
2163
Clarence Ip4ce59322016-06-26 22:27:51 -04002164 /*
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002165 * _sde_kms_drm_obj_init should create the DRM related objects
2166 * i.e. CRTCs, planes, encoders, connectors and so forth
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002167 */
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002168 rc = _sde_kms_drm_obj_init(sde_kms);
2169 if (rc) {
2170 SDE_ERROR("modeset init failed: %d\n", rc);
Alan Kwong67a3f792016-11-01 23:16:53 -04002171 goto drm_obj_init_err;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04002172 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002173
2174 dev->mode_config.min_width = 0;
2175 dev->mode_config.min_height = 0;
2176
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002177 /*
Dhaval Patel4e574842016-08-23 15:11:37 -07002178 * max crtc width is equal to the max mixer width * 2 and max height is
2179 * is 4K
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002180 */
Dhaval Patele4a5dda2016-10-13 19:29:30 -07002181 dev->mode_config.max_width = sde_kms->catalog->max_mixer_width * 2;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002182 dev->mode_config.max_height = 4096;
2183
Lloyd Atkinsonfa2489c2016-05-25 15:16:03 -04002184 /*
2185 * Support format modifiers for compression etc.
2186 */
2187 dev->mode_config.allow_fb_modifiers = true;
2188
Clarence Ip7f0de632017-05-31 14:59:14 -04002189 /*
2190 * Handle (re)initializations during power enable
2191 */
2192 sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
2193 sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
2194 SDE_POWER_EVENT_POST_ENABLE,
2195 sde_kms_handle_power_event, sde_kms, "kms");
2196
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002197 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
Clarence Ip17162b52016-11-24 17:06:29 -05002198 return 0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002199
Alan Kwong67a3f792016-11-01 23:16:53 -04002200drm_obj_init_err:
2201 sde_core_perf_destroy(&sde_kms->perf);
Abhinav Kumar2316fb92017-01-30 23:07:08 -08002202hw_intr_init_err:
Alan Kwong67a3f792016-11-01 23:16:53 -04002203perf_err:
Clarence Ip17162b52016-11-24 17:06:29 -05002204power_error:
Dhaval Patel3949f032016-06-20 16:24:33 -07002205 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
Clarence Ip17162b52016-11-24 17:06:29 -05002206error:
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04002207 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
Dhaval Patel3949f032016-06-20 16:24:33 -07002208end:
Clarence Ip17162b52016-11-24 17:06:29 -05002209 return rc;
2210}
2211
2212struct msm_kms *sde_kms_init(struct drm_device *dev)
2213{
2214 struct msm_drm_private *priv;
2215 struct sde_kms *sde_kms;
2216
2217 if (!dev || !dev->dev_private) {
2218 SDE_ERROR("drm device node invalid\n");
2219 return ERR_PTR(-EINVAL);
2220 }
2221
2222 priv = dev->dev_private;
2223
2224 sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
2225 if (!sde_kms) {
2226 SDE_ERROR("failed to allocate sde kms\n");
2227 return ERR_PTR(-ENOMEM);
2228 }
2229
2230 msm_kms_init(&sde_kms->base, &kms_funcs);
2231 sde_kms->dev = dev;
2232
2233 return &sde_kms->base;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002234}
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07002235
2236static int _sde_kms_register_events(struct msm_kms *kms,
2237 struct drm_mode_object *obj, u32 event, bool en)
2238{
2239 int ret = 0;
2240 struct drm_crtc *crtc = NULL;
2241 struct drm_connector *conn = NULL;
2242 struct sde_kms *sde_kms = NULL;
2243
2244 if (!kms || !obj) {
2245 SDE_ERROR("invalid argument kms %pK obj %pK\n", kms, obj);
2246 return -EINVAL;
2247 }
2248
2249 sde_kms = to_sde_kms(kms);
2250 switch (obj->type) {
2251 case DRM_MODE_OBJECT_CRTC:
2252 crtc = obj_to_crtc(obj);
2253 ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
2254 break;
2255 case DRM_MODE_OBJECT_CONNECTOR:
2256 conn = obj_to_connector(obj);
2257 ret = sde_connector_register_custom_event(sde_kms, conn, event,
2258 en);
2259 break;
2260 }
2261
2262 return ret;
2263}