blob: 6cee9cfaaa560b575ee08668d90da1266a4a704f [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_DRV_H__
19#define __MSM_DRV_H__
20
21#include <linux/kernel.h>
22#include <linux/clk.h>
23#include <linux/cpufreq.h>
24#include <linux/module.h>
Rob Clark060530f2014-03-03 14:19:12 -050025#include <linux/component.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040026#include <linux/platform_device.h>
27#include <linux/pm.h>
28#include <linux/pm_runtime.h>
29#include <linux/slab.h>
30#include <linux/list.h>
31#include <linux/iommu.h>
32#include <linux/types.h>
Archit Taneja3d6df062015-06-09 14:17:22 +053033#include <linux/of_graph.h>
Archit Tanejae9fbdaf2015-11-18 12:15:14 +053034#include <linux/of_device.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040035#include <asm/sizes.h>
36
Rob Clarkc8afe682013-06-26 12:44:06 -040037#include <drm/drmP.h>
Rob Clarkcf3a7e42014-11-08 13:21:06 -050038#include <drm/drm_atomic.h>
39#include <drm/drm_atomic_helper.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040040#include <drm/drm_crtc_helper.h>
Rob Clarkcf3a7e42014-11-08 13:21:06 -050041#include <drm/drm_plane_helper.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040042#include <drm/drm_fb_helper.h>
Rob Clark7198e6b2013-07-19 12:59:32 -040043#include <drm/msm_drm.h>
Daniel Vetterd9fc9412014-09-23 15:46:53 +020044#include <drm/drm_gem.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040045
46struct msm_kms;
Rob Clark7198e6b2013-07-19 12:59:32 -040047struct msm_gpu;
Rob Clark871d8122013-11-16 12:56:06 -050048struct msm_mmu;
Archit Taneja990a4002016-05-07 23:11:25 +053049struct msm_mdss;
Rob Clarka7d3c952014-05-30 14:47:38 -040050struct msm_rd_state;
Rob Clark70c70f02014-05-30 14:49:43 -040051struct msm_perf_state;
Rob Clarka7d3c952014-05-30 14:47:38 -040052struct msm_gem_submit;
Rob Clarkca762a82016-03-15 17:22:13 -040053struct msm_fence_context;
Rob Clarkfde5de62016-03-15 15:35:08 -040054struct msm_fence_cb;
Rob Clark667ce332016-09-28 19:58:32 -040055struct msm_gem_address_space;
56struct msm_gem_vma;
Rob Clarkc8afe682013-06-26 12:44:06 -040057
Rob Clark7198e6b2013-07-19 12:59:32 -040058#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
59
60struct msm_file_private {
61 /* currently we don't do anything useful with this.. but when
62 * per-context address spaces are supported we'd keep track of
63 * the context's page-tables here.
64 */
65 int dummy;
66};
Rob Clarkc8afe682013-06-26 12:44:06 -040067
jilai wang12987782015-06-25 17:37:42 -040068enum msm_mdp_plane_property {
69 PLANE_PROP_ZPOS,
70 PLANE_PROP_ALPHA,
71 PLANE_PROP_PREMULTIPLIED,
72 PLANE_PROP_MAX_NUM
73};
74
Hai Li78b1d472015-07-27 13:49:45 -040075struct msm_vblank_ctrl {
76 struct work_struct work;
77 struct list_head event_list;
78 spinlock_t lock;
79};
80
Rob Clarkc8afe682013-06-26 12:44:06 -040081struct msm_drm_private {
82
Rob Clark68209392016-05-17 16:19:32 -040083 struct drm_device *dev;
84
Rob Clarkc8afe682013-06-26 12:44:06 -040085 struct msm_kms *kms;
86
Rob Clark060530f2014-03-03 14:19:12 -050087 /* subordinate devices, if present: */
Rob Clark067fef32014-11-04 13:33:14 -050088 struct platform_device *gpu_pdev;
89
Archit Taneja990a4002016-05-07 23:11:25 +053090 /* top level MDSS wrapper device (for MDP5 only) */
91 struct msm_mdss *mdss;
92
Rob Clark067fef32014-11-04 13:33:14 -050093 /* possibly this should be in the kms component, but it is
94 * shared by both mdp4 and mdp5..
95 */
96 struct hdmi *hdmi;
Rob Clark060530f2014-03-03 14:19:12 -050097
Hai Liab5b0102015-01-07 18:47:44 -050098 /* eDP is for mdp5 only, but kms has not been created
99 * when edp_bind() and edp_init() are called. Here is the only
100 * place to keep the edp instance.
101 */
102 struct msm_edp *edp;
103
Hai Lia6895542015-03-31 14:36:33 -0400104 /* DSI is shared by mdp4 and mdp5 */
105 struct msm_dsi *dsi[2];
106
Rob Clark7198e6b2013-07-19 12:59:32 -0400107 /* when we have more than one 'msm_gpu' these need to be an array: */
108 struct msm_gpu *gpu;
109 struct msm_file_private *lastctx;
110
Rob Clarkc8afe682013-06-26 12:44:06 -0400111 struct drm_fb_helper *fbdev;
112
Rob Clarka7d3c952014-05-30 14:47:38 -0400113 struct msm_rd_state *rd;
Rob Clark70c70f02014-05-30 14:49:43 -0400114 struct msm_perf_state *perf;
Rob Clarka7d3c952014-05-30 14:47:38 -0400115
Rob Clarkc8afe682013-06-26 12:44:06 -0400116 /* list of GEM objects: */
117 struct list_head inactive_list;
118
119 struct workqueue_struct *wq;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400120 struct workqueue_struct *atomic_wq;
Rob Clarkc8afe682013-06-26 12:44:06 -0400121
Rob Clarkf86afec2014-11-25 12:41:18 -0500122 /* crtcs pending async atomic updates: */
123 uint32_t pending_crtcs;
124 wait_queue_head_t pending_crtcs_event;
125
Rob Clark667ce332016-09-28 19:58:32 -0400126 /* Registered address spaces.. currently this is fixed per # of
127 * iommu's. Ie. one for display block and one for gpu block.
128 * Eventually, to do per-process gpu pagetables, we'll want one
129 * of these per-process.
130 */
131 unsigned int num_aspaces;
132 struct msm_gem_address_space *aspace[NUM_DOMAINS];
Rob Clarkc8afe682013-06-26 12:44:06 -0400133
Rob Clarka8623912013-10-08 12:57:48 -0400134 unsigned int num_planes;
Rob Clarkbc5289e2016-10-26 14:06:55 -0400135 struct drm_plane *planes[16];
Rob Clarka8623912013-10-08 12:57:48 -0400136
Rob Clarkc8afe682013-06-26 12:44:06 -0400137 unsigned int num_crtcs;
138 struct drm_crtc *crtcs[8];
139
140 unsigned int num_encoders;
141 struct drm_encoder *encoders[8];
142
Rob Clarka3376e32013-08-30 13:02:15 -0400143 unsigned int num_bridges;
144 struct drm_bridge *bridges[8];
145
Rob Clarkc8afe682013-06-26 12:44:06 -0400146 unsigned int num_connectors;
147 struct drm_connector *connectors[8];
Rob Clark871d8122013-11-16 12:56:06 -0500148
jilai wang12987782015-06-25 17:37:42 -0400149 /* Properties */
150 struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
151
Rob Clark871d8122013-11-16 12:56:06 -0500152 /* VRAM carveout, used when no IOMMU: */
153 struct {
154 unsigned long size;
155 dma_addr_t paddr;
156 /* NOTE: mm managed at the page level, size is in # of pages
157 * and position mm_node->start is in # of pages:
158 */
159 struct drm_mm mm;
160 } vram;
Hai Li78b1d472015-07-27 13:49:45 -0400161
Rob Clarke1e9db22016-05-27 11:16:28 -0400162 struct notifier_block vmap_notifier;
Rob Clark68209392016-05-17 16:19:32 -0400163 struct shrinker shrinker;
164
Hai Li78b1d472015-07-27 13:49:45 -0400165 struct msm_vblank_ctrl vblank_ctrl;
Rob Clarkd78d3832016-08-22 15:28:38 -0400166
167 /* task holding struct_mutex.. currently only used in submit path
168 * to detect and reject faults from copy_from_user() for submit
169 * ioctl.
170 */
171 struct task_struct *struct_mutex_task;
Rob Clarkc8afe682013-06-26 12:44:06 -0400172};
173
174struct msm_format {
175 uint32_t pixel_format;
176};
177
Daniel Vetterb4274fb2014-11-26 17:02:18 +0100178int msm_atomic_check(struct drm_device *dev,
179 struct drm_atomic_state *state);
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500180int msm_atomic_commit(struct drm_device *dev,
Maarten Lankhorsta3ccfb92016-04-26 16:11:38 +0200181 struct drm_atomic_state *state, bool nonblock);
Rob Clark870d7382016-11-04 13:51:42 -0400182struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
183void msm_atomic_state_clear(struct drm_atomic_state *state);
184void msm_atomic_state_free(struct drm_atomic_state *state);
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500185
Rob Clark667ce332016-09-28 19:58:32 -0400186int msm_register_address_space(struct drm_device *dev,
187 struct msm_gem_address_space *aspace);
188
189void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
190 struct msm_gem_vma *vma, struct sg_table *sgt);
191int msm_gem_map_vma(struct msm_gem_address_space *aspace,
192 struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
193
194void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
195struct msm_gem_address_space *
196msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
197 const char *name);
Rob Clarkc8afe682013-06-26 12:44:06 -0400198
Rob Clark40e68152016-05-03 09:50:26 -0400199void msm_gem_submit_free(struct msm_gem_submit *submit);
Rob Clark7198e6b2013-07-19 12:59:32 -0400200int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
201 struct drm_file *file);
202
Rob Clark68209392016-05-17 16:19:32 -0400203void msm_gem_shrinker_init(struct drm_device *dev);
204void msm_gem_shrinker_cleanup(struct drm_device *dev);
205
Daniel Thompson77a147e2014-11-12 11:38:14 +0000206int msm_gem_mmap_obj(struct drm_gem_object *obj,
207 struct vm_area_struct *vma);
Rob Clarkc8afe682013-06-26 12:44:06 -0400208int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
209int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
210uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
211int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
212 uint32_t *iova);
213int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
Rob Clark2638d902014-11-08 09:13:37 -0500214uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
Rob Clark05b84912013-09-28 11:28:35 -0400215struct page **msm_gem_get_pages(struct drm_gem_object *obj);
216void msm_gem_put_pages(struct drm_gem_object *obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400217void msm_gem_put_iova(struct drm_gem_object *obj, int id);
218int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
219 struct drm_mode_create_dumb *args);
Rob Clarkc8afe682013-06-26 12:44:06 -0400220int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
221 uint32_t handle, uint64_t *offset);
Rob Clark05b84912013-09-28 11:28:35 -0400222struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
223void *msm_gem_prime_vmap(struct drm_gem_object *obj);
224void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
Daniel Thompson77a147e2014-11-12 11:38:14 +0000225int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
Rob Clark05b84912013-09-28 11:28:35 -0400226struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
Maarten Lankhorstb5e9c1a2014-01-09 11:03:14 +0100227 struct dma_buf_attachment *attach, struct sg_table *sg);
Rob Clark05b84912013-09-28 11:28:35 -0400228int msm_gem_prime_pin(struct drm_gem_object *obj);
229void msm_gem_prime_unpin(struct drm_gem_object *obj);
Rob Clark18f23042016-05-26 16:24:35 -0400230void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
231void *msm_gem_get_vaddr(struct drm_gem_object *obj);
232void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
233void msm_gem_put_vaddr(struct drm_gem_object *obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400234int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
Rob Clark68209392016-05-17 16:19:32 -0400235void msm_gem_purge(struct drm_gem_object *obj);
Rob Clarke1e9db22016-05-27 11:16:28 -0400236void msm_gem_vunmap(struct drm_gem_object *obj);
Rob Clarkb6295f92016-03-15 18:26:28 -0400237int msm_gem_sync_object(struct drm_gem_object *obj,
238 struct msm_fence_context *fctx, bool exclusive);
Rob Clark7198e6b2013-07-19 12:59:32 -0400239void msm_gem_move_to_active(struct drm_gem_object *obj,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100240 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400241void msm_gem_move_to_inactive(struct drm_gem_object *obj);
Rob Clarkba00c3f2016-03-16 18:18:17 -0400242int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
Rob Clark7198e6b2013-07-19 12:59:32 -0400243int msm_gem_cpu_fini(struct drm_gem_object *obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400244void msm_gem_free_object(struct drm_gem_object *obj);
245int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
246 uint32_t size, uint32_t flags, uint32_t *handle);
247struct drm_gem_object *msm_gem_new(struct drm_device *dev,
248 uint32_t size, uint32_t flags);
Rob Clark05b84912013-09-28 11:28:35 -0400249struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -0400250 struct dma_buf *dmabuf, struct sg_table *sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -0400251
Rob Clark2638d902014-11-08 09:13:37 -0500252int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
253void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
254uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
Rob Clarkc8afe682013-06-26 12:44:06 -0400255struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
256const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
257struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
Ville Syrjälä1eb83452015-11-11 19:11:29 +0200258 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
Rob Clarkc8afe682013-06-26 12:44:06 -0400259struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
Ville Syrjälä1eb83452015-11-11 19:11:29 +0200260 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
Rob Clarkc8afe682013-06-26 12:44:06 -0400261
262struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
Archit Taneja1aaa57f2016-02-25 11:19:45 +0530263void msm_fbdev_free(struct drm_device *dev);
Rob Clarkc8afe682013-06-26 12:44:06 -0400264
Rob Clarkdada25b2013-12-01 12:12:54 -0500265struct hdmi;
Arnd Bergmannfcda50c2016-02-22 22:08:35 +0100266int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
Rob Clark067fef32014-11-04 13:33:14 -0500267 struct drm_encoder *encoder);
Arnd Bergmannfcda50c2016-02-22 22:08:35 +0100268void __init msm_hdmi_register(void);
269void __exit msm_hdmi_unregister(void);
Rob Clarkc8afe682013-06-26 12:44:06 -0400270
Hai Li00453982014-12-12 14:41:17 -0500271struct msm_edp;
272void __init msm_edp_register(void);
273void __exit msm_edp_unregister(void);
274int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
275 struct drm_encoder *encoder);
276
Hai Lia6895542015-03-31 14:36:33 -0400277struct msm_dsi;
278enum msm_dsi_encoder_id {
279 MSM_DSI_VIDEO_ENCODER_ID = 0,
280 MSM_DSI_CMD_ENCODER_ID = 1,
281 MSM_DSI_ENCODER_NUM = 2
282};
283#ifdef CONFIG_DRM_MSM_DSI
284void __init msm_dsi_register(void);
285void __exit msm_dsi_unregister(void);
286int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
287 struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]);
288#else
289static inline void __init msm_dsi_register(void)
290{
291}
292static inline void __exit msm_dsi_unregister(void)
293{
294}
295static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
296 struct drm_device *dev,
297 struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
298{
299 return -EINVAL;
300}
301#endif
302
Archit Taneja1dd0a0b2016-05-30 16:36:50 +0530303void __init msm_mdp_register(void);
304void __exit msm_mdp_unregister(void);
305
Rob Clarkc8afe682013-06-26 12:44:06 -0400306#ifdef CONFIG_DEBUG_FS
307void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
308void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
309void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
Rob Clarka7d3c952014-05-30 14:47:38 -0400310int msm_debugfs_late_init(struct drm_device *dev);
311int msm_rd_debugfs_init(struct drm_minor *minor);
312void msm_rd_debugfs_cleanup(struct drm_minor *minor);
313void msm_rd_dump_submit(struct msm_gem_submit *submit);
Rob Clark70c70f02014-05-30 14:49:43 -0400314int msm_perf_debugfs_init(struct drm_minor *minor);
315void msm_perf_debugfs_cleanup(struct drm_minor *minor);
Rob Clarka7d3c952014-05-30 14:47:38 -0400316#else
317static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
318static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {}
Rob Clarkc8afe682013-06-26 12:44:06 -0400319#endif
320
321void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
322 const char *dbgname);
323void msm_writel(u32 data, void __iomem *addr);
324u32 msm_readl(const void __iomem *addr);
325
Rob Clark7ed216e2016-11-01 17:42:33 -0400326#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
327#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
Rob Clarkc8afe682013-06-26 12:44:06 -0400328
329static inline int align_pitch(int width, int bpp)
330{
331 int bytespp = (bpp + 7) / 8;
332 /* adreno needs pitch aligned to 32 pixels: */
333 return bytespp * ALIGN(width, 32);
334}
335
336/* for the generated headers: */
337#define INVALID_IDX(idx) ({BUG(); 0;})
Rob Clark7198e6b2013-07-19 12:59:32 -0400338#define fui(x) ({BUG(); 0;})
339#define util_float_to_half(x) ({BUG(); 0;})
340
Rob Clarkc8afe682013-06-26 12:44:06 -0400341
342#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
343
344/* for conditionally setting boolean flag(s): */
345#define COND(bool, val) ((bool) ? (val) : 0)
346
Rob Clark340ff412016-03-16 14:57:22 -0400347static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
348{
349 ktime_t now = ktime_get();
350 unsigned long remaining_jiffies;
351
352 if (ktime_compare(*timeout, now) < 0) {
353 remaining_jiffies = 0;
354 } else {
355 ktime_t rem = ktime_sub(*timeout, now);
356 struct timespec ts = ktime_to_timespec(rem);
357 remaining_jiffies = timespec_to_jiffies(&ts);
358 }
359
360 return remaining_jiffies;
361}
Rob Clarkc8afe682013-06-26 12:44:06 -0400362
363#endif /* __MSM_DRV_H__ */