blob: 2393e61a696dca0307ca68f1e451c10a9594f1a6 [file] [log] [blame]
Dhaval Patel14d46ce2017-01-17 16:28:12 -08001/*
2 * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07005 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -08006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07009 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -080010 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070017 */
18
Alan Kwong5d324e42016-07-28 22:56:18 -040019#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
20
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070021#include <drm/drm_crtc.h>
Clarence Ip4ce59322016-06-26 22:27:51 -040022#include <linux/debugfs.h>
Dhaval Patel04c7e8e2016-09-26 20:14:31 -070023#include <linux/of_irq.h>
Alan Kwong4dd64c82017-02-04 18:41:51 -080024#include <linux/dma-buf.h>
Clarence Ip4ce59322016-06-26 22:27:51 -040025
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070026#include "msm_drv.h"
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040027#include "msm_mmu.h"
Clarence Ip3649f8b2016-10-31 09:59:44 -040028
29#include "dsi_display.h"
30#include "dsi_drm.h"
31#include "sde_wb.h"
32
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070033#include "sde_kms.h"
Alan Kwongf5dd86c2016-08-09 18:08:17 -040034#include "sde_core_irq.h"
Clarence Ip4ce59322016-06-26 22:27:51 -040035#include "sde_formats.h"
Alan Kwong5d324e42016-07-28 22:56:18 -040036#include "sde_hw_vbif.h"
Alan Kwong83285fb2016-10-21 20:51:17 -040037#include "sde_vbif.h"
38#include "sde_encoder.h"
39#include "sde_plane.h"
40#include "sde_crtc.h"
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -080041#include "sde_reg_dma.h"
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070042
Alan Kwong1a00e4d2016-07-18 09:42:30 -040043#define CREATE_TRACE_POINTS
44#include "sde_trace.h"
45
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040046static const char * const iommu_ports[] = {
47 "mdp_0",
48};
49
Clarence Ip4ce59322016-06-26 22:27:51 -040050/**
51 * Controls size of event log buffer. Specified as a power of 2.
52 */
53#define SDE_EVTLOG_SIZE 1024
54
55/*
56 * To enable overall DRM driver logging
57 * # echo 0x2 > /sys/module/drm/parameters/debug
58 *
59 * To enable DRM driver h/w logging
Dhaval Patel6c666622017-03-21 23:02:59 -070060 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
Clarence Ip4ce59322016-06-26 22:27:51 -040061 *
62 * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
63 */
64#define SDE_DEBUGFS_DIR "msm_sde"
65#define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
66
Clarence Ipdd395242016-09-09 10:47:17 -040067/**
68 * sdecustom - enable certain driver customizations for sde clients
69 * Enabling this modifies the standard DRM behavior slightly and assumes
70 * that the clients have specific knowledge about the modifications that
71 * are involved, so don't enable this unless you know what you're doing.
72 *
73 * Parts of the driver that are affected by this setting may be located by
74 * searching for invocations of the 'sde_is_custom_client()' function.
75 *
76 * This is disabled by default.
77 */
Clarence Ipb1b3c802016-10-03 16:49:38 -040078static bool sdecustom = true;
Clarence Ipdd395242016-09-09 10:47:17 -040079module_param(sdecustom, bool, 0400);
80MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
81
Clarence Ip17162b52016-11-24 17:06:29 -050082static int sde_kms_hw_init(struct msm_kms *kms);
83static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -070084static int _sde_kms_register_events(struct msm_kms *kms,
85 struct drm_mode_object *obj, u32 event, bool en);
Clarence Ipdd395242016-09-09 10:47:17 -040086bool sde_is_custom_client(void)
87{
88 return sdecustom;
89}
90
Alan Kwongf0fd8512016-10-24 21:39:26 -040091#ifdef CONFIG_DEBUG_FS
92static int _sde_danger_signal_status(struct seq_file *s,
93 bool danger_status)
94{
95 struct sde_kms *kms = (struct sde_kms *)s->private;
96 struct msm_drm_private *priv;
97 struct sde_danger_safe_status status;
98 int i;
99
100 if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
101 SDE_ERROR("invalid arg(s)\n");
102 return 0;
103 }
104
105 priv = kms->dev->dev_private;
106 memset(&status, 0, sizeof(struct sde_danger_safe_status));
107
108 sde_power_resource_enable(&priv->phandle, kms->core_client, true);
109 if (danger_status) {
110 seq_puts(s, "\nDanger signal status:\n");
111 if (kms->hw_mdp->ops.get_danger_status)
112 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
113 &status);
114 } else {
115 seq_puts(s, "\nSafe signal status:\n");
116 if (kms->hw_mdp->ops.get_danger_status)
117 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
118 &status);
119 }
120 sde_power_resource_enable(&priv->phandle, kms->core_client, false);
121
122 seq_printf(s, "MDP : 0x%x\n", status.mdp);
123
124 for (i = SSPP_VIG0; i < SSPP_MAX; i++)
125 seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
126 status.sspp[i]);
127 seq_puts(s, "\n");
128
129 for (i = WB_0; i < WB_MAX; i++)
130 seq_printf(s, "WB%d : 0x%x \t", i - WB_0,
131 status.wb[i]);
132 seq_puts(s, "\n");
133
134 return 0;
135}
136
137#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
138static int __prefix ## _open(struct inode *inode, struct file *file) \
139{ \
140 return single_open(file, __prefix ## _show, inode->i_private); \
141} \
142static const struct file_operations __prefix ## _fops = { \
143 .owner = THIS_MODULE, \
144 .open = __prefix ## _open, \
145 .release = single_release, \
146 .read = seq_read, \
147 .llseek = seq_lseek, \
148}
149
150static int sde_debugfs_danger_stats_show(struct seq_file *s, void *v)
151{
152 return _sde_danger_signal_status(s, true);
153}
154DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_danger_stats);
155
156static int sde_debugfs_safe_stats_show(struct seq_file *s, void *v)
157{
158 return _sde_danger_signal_status(s, false);
159}
160DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_safe_stats);
161
162static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms)
163{
164 debugfs_remove_recursive(sde_kms->debugfs_danger);
165 sde_kms->debugfs_danger = NULL;
166}
167
168static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
169 struct dentry *parent)
170{
171 sde_kms->debugfs_danger = debugfs_create_dir("danger",
172 parent);
173 if (!sde_kms->debugfs_danger) {
174 SDE_ERROR("failed to create danger debugfs\n");
175 return -EINVAL;
176 }
177
178 debugfs_create_file("danger_status", 0644, sde_kms->debugfs_danger,
179 sde_kms, &sde_debugfs_danger_stats_fops);
180 debugfs_create_file("safe_status", 0644, sde_kms->debugfs_danger,
181 sde_kms, &sde_debugfs_safe_stats_fops);
182
183 return 0;
184}
185
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400186static int _sde_debugfs_show_regset32(struct seq_file *s, void *data)
Clarence Ip4ce59322016-06-26 22:27:51 -0400187{
Clarence Ipaac9f332016-08-31 15:46:35 -0400188 struct sde_debugfs_regset32 *regset;
189 struct sde_kms *sde_kms;
190 struct drm_device *dev;
191 struct msm_drm_private *priv;
Clarence Ip4ce59322016-06-26 22:27:51 -0400192 void __iomem *base;
Clarence Ipaac9f332016-08-31 15:46:35 -0400193 uint32_t i, addr;
Clarence Ip4ce59322016-06-26 22:27:51 -0400194
Clarence Ipaac9f332016-08-31 15:46:35 -0400195 if (!s || !s->private)
196 return 0;
Clarence Ip4ce59322016-06-26 22:27:51 -0400197
Clarence Ipaac9f332016-08-31 15:46:35 -0400198 regset = s->private;
199
200 sde_kms = regset->sde_kms;
201 if (!sde_kms || !sde_kms->mmio)
202 return 0;
203
204 dev = sde_kms->dev;
205 if (!dev)
206 return 0;
207
208 priv = dev->dev_private;
209 if (!priv)
210 return 0;
211
212 base = sde_kms->mmio + regset->offset;
213
214 /* insert padding spaces, if needed */
215 if (regset->offset & 0xF) {
216 seq_printf(s, "[%x]", regset->offset & ~0xF);
217 for (i = 0; i < (regset->offset & 0xF); i += 4)
218 seq_puts(s, " ");
219 }
220
221 if (sde_power_resource_enable(&priv->phandle,
222 sde_kms->core_client, true)) {
223 seq_puts(s, "failed to enable sde clocks\n");
224 return 0;
225 }
226
227 /* main register output */
228 for (i = 0; i < regset->blk_len; i += 4) {
229 addr = regset->offset + i;
230 if ((addr & 0xF) == 0x0)
231 seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
232 seq_printf(s, " %08x", readl_relaxed(base + i));
233 }
234 seq_puts(s, "\n");
235 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
Clarence Ip4ce59322016-06-26 22:27:51 -0400236
237 return 0;
238}
239
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400240static int sde_debugfs_open_regset32(struct inode *inode,
241 struct file *file)
Clarence Ip4ce59322016-06-26 22:27:51 -0400242{
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400243 return single_open(file, _sde_debugfs_show_regset32, inode->i_private);
Clarence Ip4ce59322016-06-26 22:27:51 -0400244}
245
246static const struct file_operations sde_fops_regset32 = {
247 .open = sde_debugfs_open_regset32,
248 .read = seq_read,
249 .llseek = seq_lseek,
250 .release = single_release,
251};
252
253void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
Clarence Ipaac9f332016-08-31 15:46:35 -0400254 uint32_t offset, uint32_t length, struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400255{
256 if (regset) {
257 regset->offset = offset;
258 regset->blk_len = length;
Clarence Ipaac9f332016-08-31 15:46:35 -0400259 regset->sde_kms = sde_kms;
Clarence Ip4ce59322016-06-26 22:27:51 -0400260 }
261}
262
263void *sde_debugfs_create_regset32(const char *name, umode_t mode,
264 void *parent, struct sde_debugfs_regset32 *regset)
265{
Clarence Ipaac9f332016-08-31 15:46:35 -0400266 if (!name || !regset || !regset->sde_kms || !regset->blk_len)
Clarence Ip4ce59322016-06-26 22:27:51 -0400267 return NULL;
268
269 /* make sure offset is a multiple of 4 */
270 regset->offset = round_down(regset->offset, 4);
271
272 return debugfs_create_file(name, mode, parent,
273 regset, &sde_fops_regset32);
274}
275
276void *sde_debugfs_get_root(struct sde_kms *sde_kms)
277{
Dhaval Patel6c666622017-03-21 23:02:59 -0700278 struct msm_drm_private *priv;
279
280 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
281 return NULL;
282
283 priv = sde_kms->dev->dev_private;
284 return priv->debug_root;
Clarence Ip4ce59322016-06-26 22:27:51 -0400285}
286
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400287static int _sde_debugfs_init(struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400288{
289 void *p;
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700290 int rc;
291 void *debugfs_root;
Clarence Ip4ce59322016-06-26 22:27:51 -0400292
293 p = sde_hw_util_get_log_mask_ptr();
294
295 if (!sde_kms || !p)
296 return -EINVAL;
297
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700298 debugfs_root = sde_debugfs_get_root(sde_kms);
299 if (!debugfs_root)
300 return -EINVAL;
Clarence Ip4ce59322016-06-26 22:27:51 -0400301
302 /* allow debugfs_root to be NULL */
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700303 debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0644, debugfs_root, p);
Alan Kwongcd1c09f2016-11-04 20:37:30 -0400304
Lloyd Atkinson09e64bf2017-04-13 14:09:59 -0700305 (void) sde_debugfs_danger_init(sde_kms, debugfs_root);
306 (void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
307 (void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
Alan Kwongcd1c09f2016-11-04 20:37:30 -0400308
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700309 rc = sde_core_perf_debugfs_init(&sde_kms->perf, debugfs_root);
310 if (rc) {
311 SDE_ERROR("failed to init perf %d\n", rc);
312 return rc;
313 }
Alan Kwongf0fd8512016-10-24 21:39:26 -0400314
Clarence Ip4ce59322016-06-26 22:27:51 -0400315 return 0;
316}
317
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400318static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
Clarence Ip4ce59322016-06-26 22:27:51 -0400319{
320 /* don't need to NULL check debugfs_root */
321 if (sde_kms) {
Alan Kwong748e833d2016-10-26 12:34:48 -0400322 sde_debugfs_vbif_destroy(sde_kms);
Alan Kwongf0fd8512016-10-24 21:39:26 -0400323 sde_debugfs_danger_destroy(sde_kms);
Lloyd Atkinson09e64bf2017-04-13 14:09:59 -0700324 sde_debugfs_core_irq_destroy(sde_kms);
Clarence Ip4ce59322016-06-26 22:27:51 -0400325 }
326}
Alan Kwongf0fd8512016-10-24 21:39:26 -0400327#else
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -0700328static int _sde_debugfs_init(struct sde_kms *sde_kms)
329{
330 return 0;
331}
332
333static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
334{
Alan Kwongf0fd8512016-10-24 21:39:26 -0400335}
336#endif
Clarence Ip4ce59322016-06-26 22:27:51 -0400337
Alan Kwongf5dd86c2016-08-09 18:08:17 -0400338static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
339{
340 return sde_crtc_vblank(crtc, true);
341}
342
343static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
344{
345 sde_crtc_vblank(crtc, false);
346}
347
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400348static void sde_kms_prepare_commit(struct msm_kms *kms,
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400349 struct drm_atomic_state *state)
350{
Clarence Ip4eef82b2017-04-10 13:13:01 -0700351 struct sde_kms *sde_kms;
352 struct msm_drm_private *priv;
353
354 if (!kms)
355 return;
356 sde_kms = to_sde_kms(kms);
357
358 if (!sde_kms->dev || !sde_kms->dev->dev_private)
359 return;
360 priv = sde_kms->dev->dev_private;
Clarence Ipa36c92e2016-07-26 14:33:46 -0400361
Dhaval Patel3949f032016-06-20 16:24:33 -0700362 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400363}
364
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400365static void sde_kms_commit(struct msm_kms *kms,
366 struct drm_atomic_state *old_state)
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400367{
368 struct drm_crtc *crtc;
369 struct drm_crtc_state *old_crtc_state;
370 int i;
371
Lloyd Atkinson5d40d312016-09-06 08:34:13 -0400372 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
373 if (crtc->state->active) {
374 SDE_EVT32(DRMID(crtc));
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400375 sde_crtc_commit_kickoff(crtc);
Lloyd Atkinson5d40d312016-09-06 08:34:13 -0400376 }
377 }
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400378}
379
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400380static void sde_kms_complete_commit(struct msm_kms *kms,
Clarence Ip0d0e96d2016-10-24 18:13:13 -0400381 struct drm_atomic_state *old_state)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400382{
Clarence Ip4eef82b2017-04-10 13:13:01 -0700383 struct sde_kms *sde_kms;
384 struct msm_drm_private *priv;
Clarence Ip24f80662016-06-13 19:05:32 -0400385 struct drm_crtc *crtc;
Clarence Ip0d0e96d2016-10-24 18:13:13 -0400386 struct drm_crtc_state *old_crtc_state;
Clarence Ip24f80662016-06-13 19:05:32 -0400387 int i;
Lloyd Atkinsona59eead2016-05-30 14:37:06 -0400388
Clarence Ip4eef82b2017-04-10 13:13:01 -0700389 if (!kms || !old_state)
390 return;
391 sde_kms = to_sde_kms(kms);
392
393 if (!sde_kms->dev || !sde_kms->dev->dev_private)
394 return;
395 priv = sde_kms->dev->dev_private;
396
Clarence Ip0d0e96d2016-10-24 18:13:13 -0400397 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
398 sde_crtc_complete_commit(crtc, old_crtc_state);
Dhaval Patel3949f032016-06-20 16:24:33 -0700399 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
Lloyd Atkinsona59eead2016-05-30 14:37:06 -0400400
Lloyd Atkinson5d40d312016-09-06 08:34:13 -0400401 SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400402}
403
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400404static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
Abhijit Kulkarni40e38162016-06-26 22:12:09 -0400405 struct drm_crtc *crtc)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400406{
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400407 struct drm_encoder *encoder;
408 struct drm_device *dev = crtc->dev;
409 int ret;
410
Alan Kwongf34ef982016-09-29 20:53:53 -0400411 if (!kms || !crtc || !crtc->state) {
412 SDE_ERROR("invalid params\n");
413 return;
414 }
415
416 if (!crtc->state->enable) {
417 SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
418 return;
419 }
420
421 if (!crtc->state->active) {
422 SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
423 return;
424 }
425
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400426 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
427 if (encoder->crtc != crtc)
428 continue;
429 /*
Dhaval Patel6c666622017-03-21 23:02:59 -0700430 * Wait for post-flush if necessary to delay before
431 * plane_cleanup. For example, wait for vsync in case of video
432 * mode panels. This may be a no-op for command mode panels.
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400433 */
Dhaval Patel6c666622017-03-21 23:02:59 -0700434 SDE_EVT32_VERBOSE(DRMID(crtc));
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400435 ret = sde_encoder_wait_for_commit_done(encoder);
436 if (ret && ret != -EWOULDBLOCK) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -0400437 SDE_ERROR("wait for commit done returned %d\n", ret);
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400438 break;
439 }
440 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400441}
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400442
Clarence Ip24f80662016-06-13 19:05:32 -0400443static void sde_kms_prepare_fence(struct msm_kms *kms,
Clarence Ip0d0e96d2016-10-24 18:13:13 -0400444 struct drm_atomic_state *old_state)
Clarence Ip24f80662016-06-13 19:05:32 -0400445{
446 struct drm_crtc *crtc;
Clarence Ip0d0e96d2016-10-24 18:13:13 -0400447 struct drm_crtc_state *old_crtc_state;
448 int i, rc;
Clarence Ip24f80662016-06-13 19:05:32 -0400449
Clarence Ip0d0e96d2016-10-24 18:13:13 -0400450 if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
451 SDE_ERROR("invalid argument(s)\n");
452 return;
453 }
454
455retry:
456 /* attempt to acquire ww mutex for connection */
457 rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
458 old_state->acquire_ctx);
459
460 if (rc == -EDEADLK) {
461 drm_modeset_backoff(old_state->acquire_ctx);
462 goto retry;
463 }
464
465 /* old_state actually contains updated crtc pointers */
466 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
467 sde_crtc_prepare_commit(crtc, old_crtc_state);
Clarence Ip24f80662016-06-13 19:05:32 -0400468}
469
Clarence Ip3649f8b2016-10-31 09:59:44 -0400470/**
471 * _sde_kms_get_displays - query for underlying display handles and cache them
472 * @sde_kms: Pointer to sde kms structure
473 * Returns: Zero on success
474 */
475static int _sde_kms_get_displays(struct sde_kms *sde_kms)
476{
477 int rc = -ENOMEM;
478
479 if (!sde_kms) {
480 SDE_ERROR("invalid sde kms\n");
481 return -EINVAL;
482 }
483
484 /* dsi */
485 sde_kms->dsi_displays = NULL;
486 sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
487 if (sde_kms->dsi_display_count) {
488 sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
489 sizeof(void *),
490 GFP_KERNEL);
491 if (!sde_kms->dsi_displays) {
492 SDE_ERROR("failed to allocate dsi displays\n");
493 goto exit_deinit_dsi;
494 }
495 sde_kms->dsi_display_count =
496 dsi_display_get_active_displays(sde_kms->dsi_displays,
497 sde_kms->dsi_display_count);
498 }
499
500 /* wb */
501 sde_kms->wb_displays = NULL;
502 sde_kms->wb_display_count = sde_wb_get_num_of_displays();
503 if (sde_kms->wb_display_count) {
504 sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
505 sizeof(void *),
506 GFP_KERNEL);
507 if (!sde_kms->wb_displays) {
508 SDE_ERROR("failed to allocate wb displays\n");
509 goto exit_deinit_wb;
510 }
511 sde_kms->wb_display_count =
512 wb_display_get_displays(sde_kms->wb_displays,
513 sde_kms->wb_display_count);
514 }
515 return 0;
516
517exit_deinit_wb:
518 kfree(sde_kms->wb_displays);
519 sde_kms->wb_display_count = 0;
520 sde_kms->wb_displays = NULL;
521
522exit_deinit_dsi:
523 kfree(sde_kms->dsi_displays);
524 sde_kms->dsi_display_count = 0;
525 sde_kms->dsi_displays = NULL;
526 return rc;
527}
528
529/**
530 * _sde_kms_release_displays - release cache of underlying display handles
531 * @sde_kms: Pointer to sde kms structure
532 */
533static void _sde_kms_release_displays(struct sde_kms *sde_kms)
534{
535 if (!sde_kms) {
536 SDE_ERROR("invalid sde kms\n");
537 return;
538 }
539
540 kfree(sde_kms->wb_displays);
541 sde_kms->wb_displays = NULL;
542 sde_kms->wb_display_count = 0;
543
544 kfree(sde_kms->dsi_displays);
545 sde_kms->dsi_displays = NULL;
546 sde_kms->dsi_display_count = 0;
547}
548
549/**
550 * _sde_kms_setup_displays - create encoders, bridges and connectors
551 * for underlying displays
552 * @dev: Pointer to drm device structure
553 * @priv: Pointer to private drm device data
554 * @sde_kms: Pointer to sde kms structure
555 * Returns: Zero on success
556 */
557static int _sde_kms_setup_displays(struct drm_device *dev,
558 struct msm_drm_private *priv,
559 struct sde_kms *sde_kms)
560{
561 static const struct sde_connector_ops dsi_ops = {
562 .post_init = dsi_conn_post_init,
563 .detect = dsi_conn_detect,
564 .get_modes = dsi_connector_get_modes,
565 .mode_valid = dsi_conn_mode_valid,
566 .get_info = dsi_display_get_info,
Lloyd Atkinson8c49c582016-11-18 14:23:54 -0500567 .set_backlight = dsi_display_set_backlight,
Lloyd Atkinson05d75512017-01-17 14:45:51 -0500568 .soft_reset = dsi_display_soft_reset,
569 .pre_kickoff = dsi_conn_pre_kickoff
Clarence Ip3649f8b2016-10-31 09:59:44 -0400570 };
571 static const struct sde_connector_ops wb_ops = {
572 .post_init = sde_wb_connector_post_init,
573 .detect = sde_wb_connector_detect,
574 .get_modes = sde_wb_connector_get_modes,
575 .set_property = sde_wb_connector_set_property,
576 .get_info = sde_wb_get_info,
Lloyd Atkinson8c49c582016-11-18 14:23:54 -0500577 .soft_reset = NULL
Clarence Ip3649f8b2016-10-31 09:59:44 -0400578 };
579 struct msm_display_info info;
580 struct drm_encoder *encoder;
581 void *display, *connector;
582 int i, max_encoders;
583 int rc = 0;
584
585 if (!dev || !priv || !sde_kms) {
586 SDE_ERROR("invalid argument(s)\n");
587 return -EINVAL;
588 }
589
590 max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count;
591 if (max_encoders > ARRAY_SIZE(priv->encoders)) {
592 max_encoders = ARRAY_SIZE(priv->encoders);
593 SDE_ERROR("capping number of displays to %d", max_encoders);
594 }
595
596 /* dsi */
597 for (i = 0; i < sde_kms->dsi_display_count &&
598 priv->num_encoders < max_encoders; ++i) {
599 display = sde_kms->dsi_displays[i];
600 encoder = NULL;
601
602 memset(&info, 0x0, sizeof(info));
603 rc = dsi_display_get_info(&info, display);
604 if (rc) {
605 SDE_ERROR("dsi get_info %d failed\n", i);
606 continue;
607 }
608
609 encoder = sde_encoder_init(dev, &info);
610 if (IS_ERR_OR_NULL(encoder)) {
611 SDE_ERROR("encoder init failed for dsi %d\n", i);
612 continue;
613 }
614
615 rc = dsi_display_drm_bridge_init(display, encoder);
616 if (rc) {
617 SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
618 sde_encoder_destroy(encoder);
619 continue;
620 }
621
622 connector = sde_connector_init(dev,
623 encoder,
624 0,
625 display,
626 &dsi_ops,
627 DRM_CONNECTOR_POLL_HPD,
628 DRM_MODE_CONNECTOR_DSI);
629 if (connector) {
630 priv->encoders[priv->num_encoders++] = encoder;
631 } else {
632 SDE_ERROR("dsi %d connector init failed\n", i);
633 dsi_display_drm_bridge_deinit(display);
634 sde_encoder_destroy(encoder);
635 }
636 }
637
638 /* wb */
639 for (i = 0; i < sde_kms->wb_display_count &&
640 priv->num_encoders < max_encoders; ++i) {
641 display = sde_kms->wb_displays[i];
642 encoder = NULL;
643
644 memset(&info, 0x0, sizeof(info));
645 rc = sde_wb_get_info(&info, display);
646 if (rc) {
647 SDE_ERROR("wb get_info %d failed\n", i);
648 continue;
649 }
650
651 encoder = sde_encoder_init(dev, &info);
652 if (IS_ERR_OR_NULL(encoder)) {
653 SDE_ERROR("encoder init failed for wb %d\n", i);
654 continue;
655 }
656
657 rc = sde_wb_drm_init(display, encoder);
658 if (rc) {
659 SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
660 sde_encoder_destroy(encoder);
661 continue;
662 }
663
664 connector = sde_connector_init(dev,
665 encoder,
666 0,
667 display,
668 &wb_ops,
669 DRM_CONNECTOR_POLL_HPD,
670 DRM_MODE_CONNECTOR_VIRTUAL);
671 if (connector) {
672 priv->encoders[priv->num_encoders++] = encoder;
673 } else {
674 SDE_ERROR("wb %d connector init failed\n", i);
675 sde_wb_drm_deinit(display);
676 sde_encoder_destroy(encoder);
677 }
678 }
679
680 return 0;
681}
682
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400683static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
684{
685 struct msm_drm_private *priv;
686 int i;
687
688 if (!sde_kms) {
689 SDE_ERROR("invalid sde_kms\n");
690 return;
691 } else if (!sde_kms->dev) {
692 SDE_ERROR("invalid dev\n");
693 return;
694 } else if (!sde_kms->dev->dev_private) {
695 SDE_ERROR("invalid dev_private\n");
696 return;
697 }
698 priv = sde_kms->dev->dev_private;
699
700 for (i = 0; i < priv->num_crtcs; i++)
701 priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
Clarence Ip17162b52016-11-24 17:06:29 -0500702 priv->num_crtcs = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400703
704 for (i = 0; i < priv->num_planes; i++)
705 priv->planes[i]->funcs->destroy(priv->planes[i]);
Clarence Ip17162b52016-11-24 17:06:29 -0500706 priv->num_planes = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400707
708 for (i = 0; i < priv->num_connectors; i++)
709 priv->connectors[i]->funcs->destroy(priv->connectors[i]);
Clarence Ip17162b52016-11-24 17:06:29 -0500710 priv->num_connectors = 0;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400711
712 for (i = 0; i < priv->num_encoders; i++)
713 priv->encoders[i]->funcs->destroy(priv->encoders[i]);
Clarence Ip17162b52016-11-24 17:06:29 -0500714 priv->num_encoders = 0;
715
716 _sde_kms_release_displays(sde_kms);
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400717}
718
719static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700720{
Clarence Ip2bbf7b32016-09-23 15:07:16 -0400721 struct drm_device *dev;
Dhaval Patel44f12472016-08-29 12:19:47 -0700722 struct drm_plane *primary_planes[MAX_PLANES], *plane;
723 struct drm_crtc *crtc;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700724
Clarence Ip2bbf7b32016-09-23 15:07:16 -0400725 struct msm_drm_private *priv;
726 struct sde_mdss_cfg *catalog;
Dhaval Patel44f12472016-08-29 12:19:47 -0700727
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800728 int primary_planes_idx = 0, i, ret;
729 int max_crtc_count;
730
731 u32 sspp_id[MAX_PLANES];
732 u32 master_plane_id[MAX_PLANES];
733 u32 num_virt_planes = 0;
Clarence Ip2bbf7b32016-09-23 15:07:16 -0400734
Clarence Ipdd395242016-09-09 10:47:17 -0400735 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
Clarence Ip2bbf7b32016-09-23 15:07:16 -0400736 SDE_ERROR("invalid sde_kms\n");
737 return -EINVAL;
738 }
739
740 dev = sde_kms->dev;
741 priv = dev->dev_private;
742 catalog = sde_kms->catalog;
743
Clarence Ip3649f8b2016-10-31 09:59:44 -0400744 /*
745 * Query for underlying display drivers, and create connectors,
746 * bridges and encoders for them.
747 */
748 if (!_sde_kms_get_displays(sde_kms))
749 (void)_sde_kms_setup_displays(dev, priv, sde_kms);
Clarence Ip2bbf7b32016-09-23 15:07:16 -0400750
751 max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700752
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700753 /* Create the planes */
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800754 for (i = 0; i < catalog->sspp_count; i++) {
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700755 bool primary = true;
756
757 if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
Clarence Ip2bbf7b32016-09-23 15:07:16 -0400758 || primary_planes_idx >= max_crtc_count)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700759 primary = false;
760
Clarence Ip2bbf7b32016-09-23 15:07:16 -0400761 plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800762 (1UL << max_crtc_count) - 1, 0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700763 if (IS_ERR(plane)) {
Clarence Ip2bbf7b32016-09-23 15:07:16 -0400764 SDE_ERROR("sde_plane_init failed\n");
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700765 ret = PTR_ERR(plane);
766 goto fail;
767 }
768 priv->planes[priv->num_planes++] = plane;
769
770 if (primary)
771 primary_planes[primary_planes_idx++] = plane;
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800772
773 if (sde_hw_sspp_multirect_enabled(&catalog->sspp[i]) &&
774 sde_is_custom_client()) {
775 int priority =
776 catalog->sspp[i].sblk->smart_dma_priority;
777 sspp_id[priority - 1] = catalog->sspp[i].id;
778 master_plane_id[priority - 1] = plane->base.id;
779 num_virt_planes++;
780 }
781 }
782
783 /* Initialize smart DMA virtual planes */
784 for (i = 0; i < num_virt_planes; i++) {
785 plane = sde_plane_init(dev, sspp_id[i], false,
786 (1UL << max_crtc_count) - 1, master_plane_id[i]);
787 if (IS_ERR(plane)) {
788 SDE_ERROR("sde_plane for virtual SSPP init failed\n");
789 ret = PTR_ERR(plane);
790 goto fail;
791 }
792 priv->planes[priv->num_planes++] = plane;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700793 }
794
Dhaval Patel44f12472016-08-29 12:19:47 -0700795 max_crtc_count = min(max_crtc_count, primary_planes_idx);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700796
Dhaval Patel44f12472016-08-29 12:19:47 -0700797 /* Create one CRTC per encoder */
798 for (i = 0; i < max_crtc_count; i++) {
Lloyd Atkinsonac933642016-09-14 11:52:00 -0400799 crtc = sde_crtc_init(dev, primary_planes[i]);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700800 if (IS_ERR(crtc)) {
801 ret = PTR_ERR(crtc);
802 goto fail;
803 }
804 priv->crtcs[priv->num_crtcs++] = crtc;
805 }
806
Clarence Ipdd395242016-09-09 10:47:17 -0400807 if (sde_is_custom_client()) {
808 /* All CRTCs are compatible with all planes */
809 for (i = 0; i < priv->num_planes; i++)
810 priv->planes[i]->possible_crtcs =
811 (1 << priv->num_crtcs) - 1;
812 }
813
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400814 /* All CRTCs are compatible with all encoders */
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400815 for (i = 0; i < priv->num_encoders; i++)
816 priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
817
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700818 return 0;
819fail:
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -0400820 _sde_kms_drm_obj_destroy(sde_kms);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700821 return ret;
822}
823
Alan Kwong4dd64c82017-02-04 18:41:51 -0800824/**
825 * struct sde_kms_fbo_fb - framebuffer creation list
826 * @list: list of framebuffer attached to framebuffer object
827 * @fb: Pointer to framebuffer attached to framebuffer object
828 */
829struct sde_kms_fbo_fb {
830 struct list_head list;
831 struct drm_framebuffer *fb;
832};
833
834struct drm_framebuffer *sde_kms_fbo_create_fb(struct drm_device *dev,
835 struct sde_kms_fbo *fbo)
836{
837 struct drm_framebuffer *fb = NULL;
838 struct sde_kms_fbo_fb *fbo_fb;
839 struct drm_mode_fb_cmd2 mode_cmd = {0};
840 u32 base_offset = 0;
841 int i, ret;
842
843 if (!dev) {
844 SDE_ERROR("invalid drm device node\n");
845 return NULL;
846 }
847
848 fbo_fb = kzalloc(sizeof(struct sde_kms_fbo_fb), GFP_KERNEL);
849 if (!fbo_fb)
850 return NULL;
851
852 mode_cmd.pixel_format = fbo->pixel_format;
853 mode_cmd.width = fbo->width;
854 mode_cmd.height = fbo->height;
855 mode_cmd.flags = fbo->flags;
856
857 for (i = 0; i < fbo->nplane; i++) {
858 mode_cmd.offsets[i] = base_offset;
859 mode_cmd.pitches[i] = fbo->layout.plane_pitch[i];
860 mode_cmd.modifier[i] = fbo->modifier[i];
861 base_offset += fbo->layout.plane_size[i];
862 SDE_DEBUG("offset[%d]:%x\n", i, mode_cmd.offsets[i]);
863 }
864
865 fb = msm_framebuffer_init(dev, &mode_cmd, fbo->bo);
866 if (IS_ERR(fb)) {
867 ret = PTR_ERR(fb);
868 fb = NULL;
869 SDE_ERROR("failed to allocate fb %d\n", ret);
870 goto fail;
871 }
872
873 /* need to take one reference for gem object */
874 for (i = 0; i < fbo->nplane; i++)
875 drm_gem_object_reference(fbo->bo[i]);
876
877 SDE_DEBUG("register private fb:%d\n", fb->base.id);
878
879 INIT_LIST_HEAD(&fbo_fb->list);
880 fbo_fb->fb = fb;
881 drm_framebuffer_reference(fbo_fb->fb);
882 list_add_tail(&fbo_fb->list, &fbo->fb_list);
883
884 return fb;
885
886fail:
887 kfree(fbo_fb);
888 return NULL;
889}
890
891static void sde_kms_fbo_destroy(struct sde_kms_fbo *fbo)
892{
893 struct msm_drm_private *priv;
894 struct sde_kms *sde_kms;
895 struct drm_device *dev;
896 struct sde_kms_fbo_fb *curr, *next;
897 int i;
898
899 if (!fbo) {
900 SDE_ERROR("invalid drm device node\n");
901 return;
902 }
903 dev = fbo->dev;
904
905 if (!dev || !dev->dev_private) {
906 SDE_ERROR("invalid drm device node\n");
907 return;
908 }
909 priv = dev->dev_private;
910
911 if (!priv->kms) {
912 SDE_ERROR("invalid kms handle\n");
913 return;
914 }
915 sde_kms = to_sde_kms(priv->kms);
916
917 SDE_DEBUG("%dx%d@%c%c%c%c/%llx/%x\n", fbo->width, fbo->height,
918 fbo->pixel_format >> 0, fbo->pixel_format >> 8,
919 fbo->pixel_format >> 16, fbo->pixel_format >> 24,
920 fbo->modifier[0], fbo->flags);
921
922 list_for_each_entry_safe(curr, next, &fbo->fb_list, list) {
923 SDE_DEBUG("unregister private fb:%d\n", curr->fb->base.id);
924 drm_framebuffer_unregister_private(curr->fb);
925 drm_framebuffer_unreference(curr->fb);
926 list_del(&curr->list);
927 kfree(curr);
928 }
929
930 for (i = 0; i < fbo->layout.num_planes; i++) {
931 if (fbo->bo[i]) {
932 mutex_lock(&dev->struct_mutex);
933 drm_gem_object_unreference(fbo->bo[i]);
934 mutex_unlock(&dev->struct_mutex);
935 fbo->bo[i] = NULL;
936 }
937 }
938
939 if (fbo->dma_buf) {
940 dma_buf_put(fbo->dma_buf);
941 fbo->dma_buf = NULL;
942 }
943
Alan Kwong54125bb2017-02-26 16:01:36 -0800944 if (sde_kms->iclient && fbo->ihandle) {
945 ion_free(sde_kms->iclient, fbo->ihandle);
946 fbo->ihandle = NULL;
Alan Kwong4dd64c82017-02-04 18:41:51 -0800947 }
948}
949
950struct sde_kms_fbo *sde_kms_fbo_alloc(struct drm_device *dev, u32 width,
951 u32 height, u32 pixel_format, u64 modifier[4], u32 flags)
952{
953 struct msm_drm_private *priv;
954 struct sde_kms *sde_kms;
955 struct sde_kms_fbo *fbo;
956 int i, ret;
957
958 if (!dev || !dev->dev_private) {
959 SDE_ERROR("invalid drm device node\n");
960 return NULL;
961 }
962 priv = dev->dev_private;
963
964 if (!priv->kms) {
965 SDE_ERROR("invalid kms handle\n");
966 return NULL;
967 }
968 sde_kms = to_sde_kms(priv->kms);
969
970 SDE_DEBUG("%dx%d@%c%c%c%c/%llx/%x\n", width, height,
971 pixel_format >> 0, pixel_format >> 8,
972 pixel_format >> 16, pixel_format >> 24,
973 modifier[0], flags);
974
975 fbo = kzalloc(sizeof(struct sde_kms_fbo), GFP_KERNEL);
976 if (!fbo)
977 return NULL;
978
979 atomic_set(&fbo->refcount, 0);
980 INIT_LIST_HEAD(&fbo->fb_list);
981 fbo->dev = dev;
982 fbo->width = width;
983 fbo->height = height;
984 fbo->pixel_format = pixel_format;
985 fbo->flags = flags;
986 for (i = 0; i < ARRAY_SIZE(fbo->modifier); i++)
987 fbo->modifier[i] = modifier[i];
988 fbo->nplane = drm_format_num_planes(fbo->pixel_format);
989 fbo->fmt = sde_get_sde_format_ext(fbo->pixel_format, fbo->modifier,
990 fbo->nplane);
991 if (!fbo->fmt) {
992 ret = -EINVAL;
993 SDE_ERROR("failed to find pixel format\n");
994 goto done;
995 }
996
997 ret = sde_format_get_plane_sizes(fbo->fmt, fbo->width, fbo->height,
998 &fbo->layout);
999 if (ret) {
1000 SDE_ERROR("failed to get plane sizes\n");
1001 goto done;
1002 }
1003
1004 /* allocate backing buffer object */
Alan Kwong54125bb2017-02-26 16:01:36 -08001005 if (sde_kms->iclient) {
1006 u32 heap_id = fbo->flags & DRM_MODE_FB_SECURE ?
1007 ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID) :
1008 ION_HEAP(ION_SYSTEM_HEAP_ID);
1009
1010 fbo->ihandle = ion_alloc(sde_kms->iclient,
1011 fbo->layout.total_size, SZ_4K, heap_id, 0);
1012 if (IS_ERR_OR_NULL(fbo->ihandle)) {
1013 SDE_ERROR("failed to alloc ion memory\n");
1014 ret = PTR_ERR(fbo->ihandle);
1015 fbo->ihandle = NULL;
1016 goto done;
1017 }
1018
1019 fbo->dma_buf = ion_share_dma_buf(sde_kms->iclient,
1020 fbo->ihandle);
1021 if (IS_ERR(fbo->dma_buf)) {
1022 SDE_ERROR("failed to share ion memory\n");
1023 ret = -ENOMEM;
1024 fbo->dma_buf = NULL;
1025 goto done;
1026 }
1027
1028 fbo->bo[0] = dev->driver->gem_prime_import(dev,
1029 fbo->dma_buf);
1030 if (IS_ERR(fbo->bo[0])) {
1031 SDE_ERROR("failed to import ion memory\n");
1032 ret = PTR_ERR(fbo->bo[0]);
1033 fbo->bo[0] = NULL;
1034 goto done;
1035 }
1036 } else {
1037 mutex_lock(&dev->struct_mutex);
1038 fbo->bo[0] = msm_gem_new(dev, fbo->layout.total_size,
1039 MSM_BO_SCANOUT | MSM_BO_WC);
1040 if (IS_ERR(fbo->bo[0])) {
1041 mutex_unlock(&dev->struct_mutex);
1042 SDE_ERROR("failed to new gem buffer\n");
1043 ret = PTR_ERR(fbo->bo[0]);
1044 fbo->bo[0] = NULL;
1045 goto done;
1046 }
Alan Kwong4dd64c82017-02-04 18:41:51 -08001047 mutex_unlock(&dev->struct_mutex);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001048 }
1049
Alan Kwong54125bb2017-02-26 16:01:36 -08001050 mutex_lock(&dev->struct_mutex);
Alan Kwong4dd64c82017-02-04 18:41:51 -08001051 for (i = 1; i < fbo->layout.num_planes; i++) {
1052 fbo->bo[i] = fbo->bo[0];
1053 drm_gem_object_reference(fbo->bo[i]);
1054 }
1055 mutex_unlock(&dev->struct_mutex);
1056
1057done:
1058 if (ret) {
1059 sde_kms_fbo_destroy(fbo);
1060 kfree(fbo);
1061 fbo = NULL;
1062 } else {
1063 sde_kms_fbo_reference(fbo);
1064 }
1065
1066 return fbo;
1067}
1068
1069int sde_kms_fbo_reference(struct sde_kms_fbo *fbo)
1070{
1071 if (!fbo) {
1072 SDE_ERROR("invalid parameters\n");
1073 return -EINVAL;
1074 }
1075
1076 SDE_DEBUG("%pS refcount:%d\n", __builtin_return_address(0),
1077 atomic_read(&fbo->refcount));
1078
1079 atomic_inc(&fbo->refcount);
1080
1081 return 0;
1082}
1083
1084void sde_kms_fbo_unreference(struct sde_kms_fbo *fbo)
1085{
1086 if (!fbo) {
1087 SDE_ERROR("invalid parameters\n");
1088 return;
1089 }
1090
1091 SDE_DEBUG("%pS refcount:%d\n", __builtin_return_address(0),
1092 atomic_read(&fbo->refcount));
1093
1094 if (!atomic_read(&fbo->refcount)) {
1095 SDE_ERROR("invalid refcount\n");
1096 return;
1097 } else if (atomic_dec_return(&fbo->refcount) == 0) {
1098 sde_kms_fbo_destroy(fbo);
1099 }
1100}
1101
Alan Kwong5a3ac752016-10-16 01:02:35 -04001102static int sde_kms_postinit(struct msm_kms *kms)
1103{
1104 struct sde_kms *sde_kms = to_sde_kms(kms);
1105 struct drm_device *dev;
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07001106 int rc;
Alan Kwong5a3ac752016-10-16 01:02:35 -04001107
1108 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
1109 SDE_ERROR("invalid sde_kms\n");
1110 return -EINVAL;
1111 }
1112
1113 dev = sde_kms->dev;
1114
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07001115 rc = _sde_debugfs_init(sde_kms);
1116 if (rc)
1117 SDE_ERROR("sde_debugfs init failed: %d\n", rc);
1118
1119 return rc;
Alan Kwong5a3ac752016-10-16 01:02:35 -04001120}
1121
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001122static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001123 struct drm_encoder *encoder)
1124{
1125 return rate;
1126}
1127
Clarence Ip17162b52016-11-24 17:06:29 -05001128static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
1129 struct platform_device *pdev)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001130{
Clarence Ip17162b52016-11-24 17:06:29 -05001131 struct drm_device *dev;
1132 struct msm_drm_private *priv;
Alan Kwong5d324e42016-07-28 22:56:18 -04001133 int i;
1134
Clarence Ip17162b52016-11-24 17:06:29 -05001135 if (!sde_kms || !pdev)
1136 return;
1137
1138 dev = sde_kms->dev;
1139 if (!dev)
1140 return;
1141
1142 priv = dev->dev_private;
1143 if (!priv)
1144 return;
1145
1146 if (sde_kms->hw_intr)
1147 sde_hw_intr_destroy(sde_kms->hw_intr);
1148 sde_kms->hw_intr = NULL;
1149
1150 _sde_kms_release_displays(sde_kms);
1151
1152 /* safe to call these more than once during shutdown */
1153 _sde_debugfs_destroy(sde_kms);
1154 _sde_kms_mmu_destroy(sde_kms);
1155
Alan Kwong54125bb2017-02-26 16:01:36 -08001156 if (sde_kms->iclient) {
1157 ion_client_destroy(sde_kms->iclient);
1158 sde_kms->iclient = NULL;
1159 }
1160
Lloyd Atkinson79f08802017-01-09 17:37:18 -05001161 if (sde_kms->catalog) {
1162 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
1163 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
Alan Kwong5d324e42016-07-28 22:56:18 -04001164
Lloyd Atkinson79f08802017-01-09 17:37:18 -05001165 if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
1166 sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
1167 }
Alan Kwong5d324e42016-07-28 22:56:18 -04001168 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001169
Clarence Ip17162b52016-11-24 17:06:29 -05001170 if (sde_kms->rm_init)
1171 sde_rm_destroy(&sde_kms->rm);
1172 sde_kms->rm_init = false;
1173
1174 if (sde_kms->catalog)
1175 sde_hw_catalog_deinit(sde_kms->catalog);
1176 sde_kms->catalog = NULL;
1177
1178 if (sde_kms->core_client)
1179 sde_power_client_destroy(&priv->phandle, sde_kms->core_client);
1180 sde_kms->core_client = NULL;
1181
1182 if (sde_kms->vbif[VBIF_NRT])
1183 msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
1184 sde_kms->vbif[VBIF_NRT] = NULL;
1185
1186 if (sde_kms->vbif[VBIF_RT])
1187 msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
1188 sde_kms->vbif[VBIF_RT] = NULL;
1189
1190 if (sde_kms->mmio)
1191 msm_iounmap(pdev, sde_kms->mmio);
1192 sde_kms->mmio = NULL;
1193}
1194
1195static void sde_kms_destroy(struct msm_kms *kms)
1196{
1197 struct sde_kms *sde_kms;
1198 struct drm_device *dev;
1199
1200 if (!kms) {
1201 SDE_ERROR("invalid kms\n");
1202 return;
1203 }
1204
1205 sde_kms = to_sde_kms(kms);
1206 dev = sde_kms->dev;
1207 if (!dev) {
1208 SDE_ERROR("invalid device\n");
1209 return;
1210 }
1211
1212 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001213 kfree(sde_kms);
1214}
1215
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04001216static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
1217{
1218 struct sde_kms *sde_kms = to_sde_kms(kms);
1219 struct drm_device *dev = sde_kms->dev;
1220 struct msm_drm_private *priv = dev->dev_private;
1221 unsigned int i;
1222
1223 for (i = 0; i < priv->num_crtcs; i++)
1224 sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
1225}
1226
Ben Chan78647cd2016-06-26 22:02:47 -04001227static const struct msm_kms_funcs kms_funcs = {
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001228 .hw_init = sde_kms_hw_init,
Alan Kwong5a3ac752016-10-16 01:02:35 -04001229 .postinit = sde_kms_postinit,
Ben Chan78647cd2016-06-26 22:02:47 -04001230 .irq_preinstall = sde_irq_preinstall,
1231 .irq_postinstall = sde_irq_postinstall,
1232 .irq_uninstall = sde_irq_uninstall,
1233 .irq = sde_irq,
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04001234 .preclose = sde_kms_preclose,
Clarence Ip24f80662016-06-13 19:05:32 -04001235 .prepare_fence = sde_kms_prepare_fence,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001236 .prepare_commit = sde_kms_prepare_commit,
1237 .commit = sde_kms_commit,
1238 .complete_commit = sde_kms_complete_commit,
1239 .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
Alan Kwongf5dd86c2016-08-09 18:08:17 -04001240 .enable_vblank = sde_kms_enable_vblank,
1241 .disable_vblank = sde_kms_disable_vblank,
Lloyd Atkinsonfa2489c2016-05-25 15:16:03 -04001242 .check_modified_format = sde_format_check_modified_format,
Clarence Ip4ce59322016-06-26 22:27:51 -04001243 .get_format = sde_get_msm_format,
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001244 .round_pixclk = sde_kms_round_pixclk,
1245 .destroy = sde_kms_destroy,
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07001246 .register_events = _sde_kms_register_events,
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001247};
1248
Dhaval Patel3949f032016-06-20 16:24:33 -07001249/* the caller api needs to turn on clock before calling it */
Clarence Ip17162b52016-11-24 17:06:29 -05001250static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001251{
Dhaval Patel88739332017-04-11 11:08:04 -07001252 sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001253}
1254
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001255static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
1256{
1257 struct msm_mmu *mmu;
1258 int i;
1259
1260 for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001261 if (!sde_kms->mmu[i])
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001262 continue;
1263
1264 mmu = sde_kms->mmu[i];
1265 msm_unregister_mmu(sde_kms->dev, mmu);
1266 mmu->funcs->detach(mmu, (const char **)iommu_ports,
1267 ARRAY_SIZE(iommu_ports));
1268 mmu->funcs->destroy(mmu);
1269 sde_kms->mmu[i] = 0;
1270 sde_kms->mmu_id[i] = 0;
1271 }
1272
1273 return 0;
1274}
1275
1276static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001277{
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001278 struct msm_mmu *mmu;
1279 int i, ret;
1280
Alan Kwong112a84f2016-05-24 20:49:21 -04001281 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
1282 mmu = msm_smmu_new(sde_kms->dev->dev, i);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001283 if (IS_ERR(mmu)) {
1284 ret = PTR_ERR(mmu);
Dhaval Patel5473cd22017-03-19 21:38:08 -07001285 SDE_DEBUG("failed to init iommu id %d: rc:%d\n",
1286 i, ret);
Dhaval Patel5200c602017-01-17 15:53:37 -08001287 continue;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001288 }
1289
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001290 ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
1291 ARRAY_SIZE(iommu_ports));
1292 if (ret) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001293 SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001294 mmu->funcs->destroy(mmu);
Dhaval Patel5200c602017-01-17 15:53:37 -08001295 continue;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001296 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001297
Alan Kwong112a84f2016-05-24 20:49:21 -04001298 sde_kms->mmu_id[i] = msm_register_mmu(sde_kms->dev, mmu);
1299 if (sde_kms->mmu_id[i] < 0) {
1300 ret = sde_kms->mmu_id[i];
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001301 SDE_ERROR("failed to register sde iommu %d: %d\n",
1302 i, ret);
Alan Kwong112a84f2016-05-24 20:49:21 -04001303 mmu->funcs->detach(mmu, (const char **)iommu_ports,
1304 ARRAY_SIZE(iommu_ports));
Alan Kwong112a84f2016-05-24 20:49:21 -04001305 goto fail;
1306 }
1307
1308 sde_kms->mmu[i] = mmu;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001309 }
1310
1311 return 0;
1312fail:
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001313 _sde_kms_mmu_destroy(sde_kms);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001314
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001315 return ret;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001316}
1317
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04001318static void __iomem *_sde_kms_ioremap(struct platform_device *pdev,
1319 const char *name, unsigned long *out_size)
1320{
1321 struct resource *res;
1322 unsigned long size;
1323 void __iomem *ptr;
1324
1325 if (out_size)
1326 *out_size = 0;
1327
1328 if (name)
1329 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1330 else
1331 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1332
1333 if (!res) {
1334 /* availability depends on platform */
1335 SDE_DEBUG("failed to get memory resource: %s\n", name);
1336 return ERR_PTR(-EINVAL);
1337 }
1338
1339 size = resource_size(res);
1340
1341 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
1342 if (!ptr) {
1343 SDE_ERROR("failed to ioremap: %s\n", name);
1344 return ERR_PTR(-ENOMEM);
1345 }
1346
1347 SDE_DEBUG("IO:region %s %p %08lx\n", name, ptr, size);
1348
1349 if (out_size)
1350 *out_size = size;
1351
1352 return ptr;
1353}
1354
1355
Clarence Ip17162b52016-11-24 17:06:29 -05001356static int sde_kms_hw_init(struct msm_kms *kms)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001357{
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001358 struct sde_kms *sde_kms;
Clarence Ip17162b52016-11-24 17:06:29 -05001359 struct drm_device *dev;
Dhaval Patel3949f032016-06-20 16:24:33 -07001360 struct msm_drm_private *priv;
Clarence Ip17162b52016-11-24 17:06:29 -05001361 int i, rc = -EINVAL;
Dhaval Patel3949f032016-06-20 16:24:33 -07001362
Clarence Ip17162b52016-11-24 17:06:29 -05001363 if (!kms) {
1364 SDE_ERROR("invalid kms\n");
1365 goto end;
1366 }
1367
1368 sde_kms = to_sde_kms(kms);
1369 dev = sde_kms->dev;
1370 if (!dev || !dev->platformdev) {
1371 SDE_ERROR("invalid device\n");
Dhaval Patel3949f032016-06-20 16:24:33 -07001372 goto end;
1373 }
1374
1375 priv = dev->dev_private;
Clarence Ip17162b52016-11-24 17:06:29 -05001376 if (!priv) {
1377 SDE_ERROR("invalid private data\n");
Dhaval Patel3949f032016-06-20 16:24:33 -07001378 goto end;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001379 }
1380
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04001381 sde_kms->mmio = _sde_kms_ioremap(dev->platformdev, "mdp_phys",
1382 &sde_kms->mmio_len);
Clarence Ip17162b52016-11-24 17:06:29 -05001383 if (IS_ERR(sde_kms->mmio)) {
1384 rc = PTR_ERR(sde_kms->mmio);
1385 SDE_ERROR("mdp register memory map failed: %d\n", rc);
1386 sde_kms->mmio = NULL;
1387 goto error;
1388 }
1389 DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio);
1390
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04001391 rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
1392 sde_kms->mmio_len);
1393 if (rc)
1394 SDE_ERROR("dbg base register kms failed: %d\n", rc);
1395
1396 sde_kms->vbif[VBIF_RT] = _sde_kms_ioremap(dev->platformdev, "vbif_phys",
1397 &sde_kms->vbif_len[VBIF_RT]);
Clarence Ip17162b52016-11-24 17:06:29 -05001398 if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
1399 rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
1400 SDE_ERROR("vbif register memory map failed: %d\n", rc);
1401 sde_kms->vbif[VBIF_RT] = NULL;
1402 goto error;
1403 }
1404
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04001405 rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
1406 sde_kms->vbif_len[VBIF_RT]);
1407 if (rc)
1408 SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
1409
1410 sde_kms->vbif[VBIF_NRT] = _sde_kms_ioremap(dev->platformdev,
1411 "vbif_nrt_phys", &sde_kms->vbif_len[VBIF_NRT]);
Clarence Ip17162b52016-11-24 17:06:29 -05001412 if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
1413 sde_kms->vbif[VBIF_NRT] = NULL;
1414 SDE_DEBUG("VBIF NRT is not defined");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04001415 } else {
1416 rc = sde_dbg_reg_register_base("vbif_nrt",
1417 sde_kms->vbif[VBIF_NRT],
1418 sde_kms->vbif_len[VBIF_NRT]);
1419 if (rc)
1420 SDE_ERROR("dbg base register vbif_nrt failed: %d\n",
1421 rc);
Clarence Ip17162b52016-11-24 17:06:29 -05001422 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001423
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04001424 sde_kms->reg_dma = _sde_kms_ioremap(dev->platformdev, "regdma_phys",
1425 &sde_kms->reg_dma_len);
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08001426 if (IS_ERR(sde_kms->reg_dma)) {
1427 sde_kms->reg_dma = NULL;
1428 SDE_DEBUG("REG_DMA is not defined");
Lloyd Atkinson113aefd2016-10-23 13:15:18 -04001429 } else {
1430 rc = sde_dbg_reg_register_base("vbif_nrt",
1431 sde_kms->reg_dma,
1432 sde_kms->reg_dma_len);
1433 if (rc)
1434 SDE_ERROR("dbg base register reg_dma failed: %d\n",
1435 rc);
1436
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08001437 }
1438
Dhaval Patel3949f032016-06-20 16:24:33 -07001439 sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
1440 if (IS_ERR_OR_NULL(sde_kms->core_client)) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001441 rc = PTR_ERR(sde_kms->core_client);
Dhaval Patel5398f602017-03-25 18:25:18 -07001442 if (!sde_kms->core_client)
1443 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001444 SDE_ERROR("sde power client create failed: %d\n", rc);
1445 sde_kms->core_client = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05001446 goto error;
Dhaval Patel3949f032016-06-20 16:24:33 -07001447 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001448
Dhaval Patel3949f032016-06-20 16:24:33 -07001449 rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
1450 true);
1451 if (rc) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001452 SDE_ERROR("resource enable failed: %d\n", rc);
Clarence Ip17162b52016-11-24 17:06:29 -05001453 goto error;
Dhaval Patel3949f032016-06-20 16:24:33 -07001454 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001455
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001456 _sde_kms_core_hw_rev_init(sde_kms);
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001457
Dhaval Patelb271b842016-10-19 21:41:22 -07001458 pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
1459
Dhaval Patel8bf7ff32016-07-20 18:13:24 -07001460 sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
Dhaval Patel3949f032016-06-20 16:24:33 -07001461 if (IS_ERR_OR_NULL(sde_kms->catalog)) {
Dhaval Patel3949f032016-06-20 16:24:33 -07001462 rc = PTR_ERR(sde_kms->catalog);
Dhaval Patel5398f602017-03-25 18:25:18 -07001463 if (!sde_kms->catalog)
1464 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001465 SDE_ERROR("catalog init failed: %d\n", rc);
1466 sde_kms->catalog = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05001467 goto power_error;
Dhaval Patel3949f032016-06-20 16:24:33 -07001468 }
1469
Lloyd Atkinson274cc462017-02-21 11:52:06 -05001470 sde_dbg_init_dbg_buses(sde_kms->core_rev);
1471
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08001472 /* Initialize reg dma block which is a singleton */
1473 rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
1474 sde_kms->dev);
1475 if (rc) {
1476 SDE_ERROR("failed: reg dma init failed\n");
1477 goto power_error;
1478 }
1479
Dhaval Patel3949f032016-06-20 16:24:33 -07001480 rc = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio,
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001481 sde_kms->dev);
Clarence Ip17162b52016-11-24 17:06:29 -05001482 if (rc) {
1483 SDE_ERROR("rm init failed: %d\n", rc);
1484 goto power_error;
1485 }
1486
1487 sde_kms->rm_init = true;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001488
1489 sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
1490 if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
1491 rc = PTR_ERR(sde_kms->hw_mdp);
Dhaval Patel5398f602017-03-25 18:25:18 -07001492 if (!sde_kms->hw_mdp)
1493 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001494 SDE_ERROR("failed to get hw_mdp: %d\n", rc);
1495 sde_kms->hw_mdp = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05001496 goto power_error;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001497 }
Dhaval Patel3949f032016-06-20 16:24:33 -07001498
Alan Kwong5d324e42016-07-28 22:56:18 -04001499 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
1500 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
1501
1502 sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
1503 sde_kms->vbif[vbif_idx], sde_kms->catalog);
1504 if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001505 rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
Dhaval Patel5398f602017-03-25 18:25:18 -07001506 if (!sde_kms->hw_vbif[vbif_idx])
1507 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001508 SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
Alan Kwong5d324e42016-07-28 22:56:18 -04001509 sde_kms->hw_vbif[vbif_idx] = NULL;
Clarence Ip17162b52016-11-24 17:06:29 -05001510 goto power_error;
Alan Kwong5d324e42016-07-28 22:56:18 -04001511 }
1512 }
1513
Alan Kwong54125bb2017-02-26 16:01:36 -08001514 sde_kms->iclient = msm_ion_client_create(dev->unique);
1515 if (IS_ERR(sde_kms->iclient)) {
1516 rc = PTR_ERR(sde_kms->iclient);
1517 SDE_DEBUG("msm_ion_client not available: %d\n", rc);
1518 sde_kms->iclient = NULL;
1519 }
1520
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001521 /*
1522 * Now we need to read the HW catalog and initialize resources such as
1523 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
1524 */
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001525 rc = _sde_kms_mmu_init(sde_kms);
1526 if (rc) {
1527 SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
Clarence Ip17162b52016-11-24 17:06:29 -05001528 goto power_error;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001529 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001530
Alan Kwong67a3f792016-11-01 23:16:53 -04001531 rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
Dhaval Patel446446e2017-04-21 19:38:17 -07001532 &priv->phandle, priv->pclient, "core_clk");
Alan Kwong67a3f792016-11-01 23:16:53 -04001533 if (rc) {
1534 SDE_ERROR("failed to init perf %d\n", rc);
1535 goto perf_err;
1536 }
1537
Clarence Ip4ce59322016-06-26 22:27:51 -04001538 /*
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001539 * _sde_kms_drm_obj_init should create the DRM related objects
1540 * i.e. CRTCs, planes, encoders, connectors and so forth
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001541 */
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001542 rc = _sde_kms_drm_obj_init(sde_kms);
1543 if (rc) {
1544 SDE_ERROR("modeset init failed: %d\n", rc);
Alan Kwong67a3f792016-11-01 23:16:53 -04001545 goto drm_obj_init_err;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001546 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001547
1548 dev->mode_config.min_width = 0;
1549 dev->mode_config.min_height = 0;
1550
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001551 /*
Dhaval Patel4e574842016-08-23 15:11:37 -07001552 * max crtc width is equal to the max mixer width * 2 and max height is
1553 * is 4K
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001554 */
Dhaval Patele4a5dda2016-10-13 19:29:30 -07001555 dev->mode_config.max_width = sde_kms->catalog->max_mixer_width * 2;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001556 dev->mode_config.max_height = 4096;
1557
Lloyd Atkinsonfa2489c2016-05-25 15:16:03 -04001558 /*
1559 * Support format modifiers for compression etc.
1560 */
1561 dev->mode_config.allow_fb_modifiers = true;
1562
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001563 sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001564 if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
1565 rc = PTR_ERR(sde_kms->hw_intr);
Dhaval Patel5398f602017-03-25 18:25:18 -07001566 if (!sde_kms->hw_intr)
1567 rc = -EINVAL;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001568 SDE_ERROR("hw_intr init failed: %d\n", rc);
1569 sde_kms->hw_intr = NULL;
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001570 goto hw_intr_init_err;
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001571 }
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001572
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04001573 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
Clarence Ip17162b52016-11-24 17:06:29 -05001574 return 0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001575
Lloyd Atkinson1e2497e2016-09-26 17:55:48 -04001576hw_intr_init_err:
1577 _sde_kms_drm_obj_destroy(sde_kms);
Alan Kwong67a3f792016-11-01 23:16:53 -04001578drm_obj_init_err:
1579 sde_core_perf_destroy(&sde_kms->perf);
1580perf_err:
Clarence Ip17162b52016-11-24 17:06:29 -05001581power_error:
Dhaval Patel3949f032016-06-20 16:24:33 -07001582 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
Clarence Ip17162b52016-11-24 17:06:29 -05001583error:
Lloyd Atkinson1a0c9172016-10-04 10:01:24 -04001584 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
Dhaval Patel3949f032016-06-20 16:24:33 -07001585end:
Clarence Ip17162b52016-11-24 17:06:29 -05001586 return rc;
1587}
1588
1589struct msm_kms *sde_kms_init(struct drm_device *dev)
1590{
1591 struct msm_drm_private *priv;
1592 struct sde_kms *sde_kms;
1593
1594 if (!dev || !dev->dev_private) {
1595 SDE_ERROR("drm device node invalid\n");
1596 return ERR_PTR(-EINVAL);
1597 }
1598
1599 priv = dev->dev_private;
1600
1601 sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
1602 if (!sde_kms) {
1603 SDE_ERROR("failed to allocate sde kms\n");
1604 return ERR_PTR(-ENOMEM);
1605 }
1606
1607 msm_kms_init(&sde_kms->base, &kms_funcs);
1608 sde_kms->dev = dev;
1609
1610 return &sde_kms->base;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001611}
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07001612
1613static int _sde_kms_register_events(struct msm_kms *kms,
1614 struct drm_mode_object *obj, u32 event, bool en)
1615{
1616 int ret = 0;
1617 struct drm_crtc *crtc = NULL;
1618 struct drm_connector *conn = NULL;
1619 struct sde_kms *sde_kms = NULL;
1620
1621 if (!kms || !obj) {
1622 SDE_ERROR("invalid argument kms %pK obj %pK\n", kms, obj);
1623 return -EINVAL;
1624 }
1625
1626 sde_kms = to_sde_kms(kms);
1627 switch (obj->type) {
1628 case DRM_MODE_OBJECT_CRTC:
1629 crtc = obj_to_crtc(obj);
1630 ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
1631 break;
1632 case DRM_MODE_OBJECT_CONNECTOR:
1633 conn = obj_to_connector(obj);
1634 ret = sde_connector_register_custom_event(sde_kms, conn, event,
1635 en);
1636 break;
1637 }
1638
1639 return ret;
1640}