Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1 | /* |
Dhaval Patel | 14d46ce | 2017-01-17 16:28:12 -0800 | [diff] [blame] | 2 | * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 3 | * Copyright (C) 2013 Red Hat |
| 4 | * Author: Rob Clark <robdclark@gmail.com> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of the GNU General Public License version 2 as published by |
| 8 | * the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 13 | * more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License along with |
| 16 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 17 | */ |
| 18 | |
| 19 | #ifndef __MSM_DRV_H__ |
| 20 | #define __MSM_DRV_H__ |
| 21 | |
| 22 | #include <linux/kernel.h> |
| 23 | #include <linux/clk.h> |
| 24 | #include <linux/cpufreq.h> |
| 25 | #include <linux/module.h> |
Rob Clark | 060530f | 2014-03-03 14:19:12 -0500 | [diff] [blame] | 26 | #include <linux/component.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 27 | #include <linux/platform_device.h> |
| 28 | #include <linux/pm.h> |
| 29 | #include <linux/pm_runtime.h> |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/list.h> |
| 32 | #include <linux/iommu.h> |
| 33 | #include <linux/types.h> |
Archit Taneja | 3d6df06 | 2015-06-09 14:17:22 +0530 | [diff] [blame] | 34 | #include <linux/of_graph.h> |
Archit Taneja | e9fbdaf | 2015-11-18 12:15:14 +0530 | [diff] [blame] | 35 | #include <linux/of_device.h> |
Dhaval Patel | 1ac9103 | 2016-09-26 19:25:39 -0700 | [diff] [blame] | 36 | #include <linux/sde_io_util.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 37 | #include <asm/sizes.h> |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 38 | #include <linux/kthread.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 39 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 40 | #include <drm/drmP.h> |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 41 | #include <drm/drm_atomic.h> |
| 42 | #include <drm/drm_atomic_helper.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 43 | #include <drm/drm_crtc_helper.h> |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 44 | #include <drm/drm_plane_helper.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 45 | #include <drm/drm_fb_helper.h> |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 46 | #include <drm/msm_drm.h> |
Daniel Vetter | d9fc941 | 2014-09-23 15:46:53 +0200 | [diff] [blame] | 47 | #include <drm/drm_gem.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 48 | |
Dhaval Patel | 3949f03 | 2016-06-20 16:24:33 -0700 | [diff] [blame] | 49 | #include "sde_power_handle.h" |
| 50 | |
| 51 | #define GET_MAJOR_REV(rev) ((rev) >> 28) |
| 52 | #define GET_MINOR_REV(rev) (((rev) >> 16) & 0xFFF) |
| 53 | #define GET_STEP_REV(rev) ((rev) & 0xFFFF) |
Lloyd Atkinson | 154b6aa | 2016-05-24 17:11:37 -0400 | [diff] [blame] | 54 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 55 | struct msm_kms; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 56 | struct msm_gpu; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 57 | struct msm_mmu; |
Archit Taneja | 990a400 | 2016-05-07 23:11:25 +0530 | [diff] [blame] | 58 | struct msm_mdss; |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 59 | struct msm_rd_state; |
Rob Clark | 70c70f0 | 2014-05-30 14:49:43 -0400 | [diff] [blame] | 60 | struct msm_perf_state; |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 61 | struct msm_gem_submit; |
Rob Clark | ca762a8 | 2016-03-15 17:22:13 -0400 | [diff] [blame] | 62 | struct msm_fence_context; |
Rob Clark | fde5de6 | 2016-03-15 15:35:08 -0400 | [diff] [blame] | 63 | struct msm_fence_cb; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 64 | |
Alan Kwong | 112a84f | 2016-05-24 20:49:21 -0400 | [diff] [blame] | 65 | #define NUM_DOMAINS 4 /* one for KMS, then one per gpu core (?) */ |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 66 | #define MAX_CRTCS 8 |
| 67 | #define MAX_PLANES 12 |
| 68 | #define MAX_ENCODERS 8 |
| 69 | #define MAX_BRIDGES 8 |
| 70 | #define MAX_CONNECTORS 8 |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 71 | |
| 72 | struct msm_file_private { |
| 73 | /* currently we don't do anything useful with this.. but when |
| 74 | * per-context address spaces are supported we'd keep track of |
| 75 | * the context's page-tables here. |
| 76 | */ |
| 77 | int dummy; |
| 78 | }; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 79 | |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 80 | enum msm_mdp_plane_property { |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 81 | /* blob properties, always put these first */ |
Clarence Ip | b43d459 | 2016-09-08 14:21:35 -0400 | [diff] [blame] | 82 | PLANE_PROP_SCALER_V1, |
abeykun | 48f407a | 2016-08-25 12:06:44 -0400 | [diff] [blame] | 83 | PLANE_PROP_SCALER_V2, |
Clarence Ip | 5fc00c5 | 2016-09-23 15:03:34 -0400 | [diff] [blame] | 84 | PLANE_PROP_CSC_V1, |
Dhaval Patel | 4e57484 | 2016-08-23 15:11:37 -0700 | [diff] [blame] | 85 | PLANE_PROP_INFO, |
abeykun | 48f407a | 2016-08-25 12:06:44 -0400 | [diff] [blame] | 86 | PLANE_PROP_SCALER_LUT_ED, |
| 87 | PLANE_PROP_SCALER_LUT_CIR, |
| 88 | PLANE_PROP_SCALER_LUT_SEP, |
Benet Clark | d009b1d | 2016-06-27 14:45:59 -0700 | [diff] [blame] | 89 | PLANE_PROP_SKIN_COLOR, |
| 90 | PLANE_PROP_SKY_COLOR, |
| 91 | PLANE_PROP_FOLIAGE_COLOR, |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 92 | |
| 93 | /* # of blob properties */ |
| 94 | PLANE_PROP_BLOBCOUNT, |
| 95 | |
Clarence Ip | e78efb7 | 2016-06-24 18:35:21 -0400 | [diff] [blame] | 96 | /* range properties */ |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 97 | PLANE_PROP_ZPOS = PLANE_PROP_BLOBCOUNT, |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 98 | PLANE_PROP_ALPHA, |
Clarence Ip | cb410d4 | 2016-06-26 22:52:33 -0400 | [diff] [blame] | 99 | PLANE_PROP_COLOR_FILL, |
Clarence Ip | dedbba9 | 2016-09-27 17:43:10 -0400 | [diff] [blame] | 100 | PLANE_PROP_H_DECIMATE, |
| 101 | PLANE_PROP_V_DECIMATE, |
Clarence Ip | cae1bb6 | 2016-07-07 12:07:13 -0400 | [diff] [blame] | 102 | PLANE_PROP_INPUT_FENCE, |
Benet Clark | eb1b446 | 2016-06-27 14:43:06 -0700 | [diff] [blame] | 103 | PLANE_PROP_HUE_ADJUST, |
| 104 | PLANE_PROP_SATURATION_ADJUST, |
| 105 | PLANE_PROP_VALUE_ADJUST, |
| 106 | PLANE_PROP_CONTRAST_ADJUST, |
Veera Sundaram Sankaran | 02dd6ac | 2016-12-22 15:08:29 -0800 | [diff] [blame] | 107 | PLANE_PROP_EXCL_RECT_V1, |
Clarence Ip | e78efb7 | 2016-06-24 18:35:21 -0400 | [diff] [blame] | 108 | |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 109 | /* enum/bitmask properties */ |
| 110 | PLANE_PROP_ROTATION, |
| 111 | PLANE_PROP_BLEND_OP, |
| 112 | PLANE_PROP_SRC_CONFIG, |
Clarence Ip | e78efb7 | 2016-06-24 18:35:21 -0400 | [diff] [blame] | 113 | |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 114 | /* total # of properties */ |
| 115 | PLANE_PROP_COUNT |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 116 | }; |
| 117 | |
Clarence Ip | 7a753bb | 2016-07-07 11:47:44 -0400 | [diff] [blame] | 118 | enum msm_mdp_crtc_property { |
Dhaval Patel | e4a5dda | 2016-10-13 19:29:30 -0700 | [diff] [blame] | 119 | CRTC_PROP_INFO, |
| 120 | |
Clarence Ip | 7a753bb | 2016-07-07 11:47:44 -0400 | [diff] [blame] | 121 | /* # of blob properties */ |
| 122 | CRTC_PROP_BLOBCOUNT, |
| 123 | |
| 124 | /* range properties */ |
Clarence Ip | cae1bb6 | 2016-07-07 12:07:13 -0400 | [diff] [blame] | 125 | CRTC_PROP_INPUT_FENCE_TIMEOUT = CRTC_PROP_BLOBCOUNT, |
Clarence Ip | 24f8066 | 2016-06-13 19:05:32 -0400 | [diff] [blame] | 126 | CRTC_PROP_OUTPUT_FENCE, |
Clarence Ip | 1d9728b | 2016-09-01 11:10:54 -0400 | [diff] [blame] | 127 | CRTC_PROP_OUTPUT_FENCE_OFFSET, |
Veera Sundaram Sankaran | 3171ff8 | 2017-01-04 14:34:47 -0800 | [diff] [blame] | 128 | CRTC_PROP_DIM_LAYER_V1, |
Clarence Ip | 7a753bb | 2016-07-07 11:47:44 -0400 | [diff] [blame] | 129 | |
| 130 | /* total # of properties */ |
| 131 | CRTC_PROP_COUNT |
| 132 | }; |
| 133 | |
Clarence Ip | dd8021c | 2016-07-20 16:39:47 -0400 | [diff] [blame] | 134 | enum msm_mdp_conn_property { |
| 135 | /* blob properties, always put these first */ |
| 136 | CONNECTOR_PROP_SDE_INFO, |
| 137 | |
| 138 | /* # of blob properties */ |
| 139 | CONNECTOR_PROP_BLOBCOUNT, |
| 140 | |
| 141 | /* range properties */ |
| 142 | CONNECTOR_PROP_OUT_FB = CONNECTOR_PROP_BLOBCOUNT, |
| 143 | CONNECTOR_PROP_RETIRE_FENCE, |
Alan Kwong | bb27c09 | 2016-07-20 16:41:25 -0400 | [diff] [blame] | 144 | CONNECTOR_PROP_DST_X, |
| 145 | CONNECTOR_PROP_DST_Y, |
| 146 | CONNECTOR_PROP_DST_W, |
| 147 | CONNECTOR_PROP_DST_H, |
Clarence Ip | dd8021c | 2016-07-20 16:39:47 -0400 | [diff] [blame] | 148 | |
| 149 | /* enum/bitmask properties */ |
Lloyd Atkinson | b619197 | 2016-08-10 18:31:46 -0400 | [diff] [blame] | 150 | CONNECTOR_PROP_TOPOLOGY_NAME, |
| 151 | CONNECTOR_PROP_TOPOLOGY_CONTROL, |
Clarence Ip | dd8021c | 2016-07-20 16:39:47 -0400 | [diff] [blame] | 152 | |
| 153 | /* total # of properties */ |
| 154 | CONNECTOR_PROP_COUNT |
| 155 | }; |
| 156 | |
Hai Li | 78b1d47 | 2015-07-27 13:49:45 -0400 | [diff] [blame] | 157 | struct msm_vblank_ctrl { |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 158 | struct kthread_work work; |
Hai Li | 78b1d47 | 2015-07-27 13:49:45 -0400 | [diff] [blame] | 159 | struct list_head event_list; |
| 160 | spinlock_t lock; |
| 161 | }; |
| 162 | |
Clarence Ip | a403932 | 2016-07-15 16:23:59 -0400 | [diff] [blame] | 163 | #define MAX_H_TILES_PER_DISPLAY 2 |
| 164 | |
| 165 | /** |
| 166 | * enum msm_display_compression - compression method used for pixel stream |
| 167 | * @MSM_DISPLAY_COMPRESS_NONE: Pixel data is not compressed |
| 168 | * @MSM_DISPLAY_COMPRESS_DSC: DSC compresison is used |
| 169 | * @MSM_DISPLAY_COMPRESS_FBC: FBC compression is used |
| 170 | */ |
| 171 | enum msm_display_compression { |
| 172 | MSM_DISPLAY_COMPRESS_NONE, |
| 173 | MSM_DISPLAY_COMPRESS_DSC, |
| 174 | MSM_DISPLAY_COMPRESS_FBC, |
| 175 | }; |
| 176 | |
| 177 | /** |
| 178 | * enum msm_display_caps - features/capabilities supported by displays |
| 179 | * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported |
| 180 | * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported |
| 181 | * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported |
| 182 | * @MSM_DISPLAY_CAP_EDID: EDID supported |
| 183 | */ |
| 184 | enum msm_display_caps { |
| 185 | MSM_DISPLAY_CAP_VID_MODE = BIT(0), |
| 186 | MSM_DISPLAY_CAP_CMD_MODE = BIT(1), |
| 187 | MSM_DISPLAY_CAP_HOT_PLUG = BIT(2), |
| 188 | MSM_DISPLAY_CAP_EDID = BIT(3), |
| 189 | }; |
| 190 | |
| 191 | /** |
| 192 | * struct msm_display_info - defines display properties |
| 193 | * @intf_type: DRM_MODE_CONNECTOR_ display type |
| 194 | * @capabilities: Bitmask of display flags |
| 195 | * @num_of_h_tiles: Number of horizontal tiles in case of split interface |
| 196 | * @h_tile_instance: Controller instance used per tile. Number of elements is |
| 197 | * based on num_of_h_tiles |
| 198 | * @is_connected: Set to true if display is connected |
| 199 | * @width_mm: Physical width |
| 200 | * @height_mm: Physical height |
| 201 | * @max_width: Max width of display. In case of hot pluggable display |
| 202 | * this is max width supported by controller |
| 203 | * @max_height: Max height of display. In case of hot pluggable display |
| 204 | * this is max height supported by controller |
| 205 | * @compression: Compression supported by the display |
| 206 | */ |
| 207 | struct msm_display_info { |
| 208 | int intf_type; |
| 209 | uint32_t capabilities; |
| 210 | |
| 211 | uint32_t num_of_h_tiles; |
| 212 | uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY]; |
| 213 | |
| 214 | bool is_connected; |
| 215 | |
| 216 | unsigned int width_mm; |
| 217 | unsigned int height_mm; |
| 218 | |
| 219 | uint32_t max_width; |
| 220 | uint32_t max_height; |
| 221 | |
| 222 | enum msm_display_compression compression; |
| 223 | }; |
| 224 | |
Clarence Ip | 3649f8b | 2016-10-31 09:59:44 -0400 | [diff] [blame] | 225 | /** |
| 226 | * struct msm_drm_event - defines custom event notification struct |
| 227 | * @base: base object required for event notification by DRM framework. |
| 228 | * @event: event object required for event notification by DRM framework. |
| 229 | * @info: contains information of DRM object for which events has been |
| 230 | * requested. |
| 231 | * @data: memory location which contains response payload for event. |
| 232 | */ |
| 233 | struct msm_drm_event { |
| 234 | struct drm_pending_event base; |
| 235 | struct drm_event event; |
Clarence Ip | 3649f8b | 2016-10-31 09:59:44 -0400 | [diff] [blame] | 236 | u8 data[]; |
| 237 | }; |
Ajay Singh Parmar | 64c1919 | 2016-06-10 16:44:56 -0700 | [diff] [blame] | 238 | |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 239 | /* Commit thread specific structure */ |
| 240 | struct msm_drm_commit { |
| 241 | struct drm_device *dev; |
| 242 | struct task_struct *thread; |
| 243 | unsigned int crtc_id; |
| 244 | struct kthread_worker worker; |
| 245 | }; |
| 246 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 247 | struct msm_drm_private { |
| 248 | |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 249 | struct drm_device *dev; |
| 250 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 251 | struct msm_kms *kms; |
| 252 | |
Dhaval Patel | 3949f03 | 2016-06-20 16:24:33 -0700 | [diff] [blame] | 253 | struct sde_power_handle phandle; |
| 254 | struct sde_power_client *pclient; |
| 255 | |
Rob Clark | 060530f | 2014-03-03 14:19:12 -0500 | [diff] [blame] | 256 | /* subordinate devices, if present: */ |
Rob Clark | 067fef3 | 2014-11-04 13:33:14 -0500 | [diff] [blame] | 257 | struct platform_device *gpu_pdev; |
| 258 | |
Archit Taneja | 990a400 | 2016-05-07 23:11:25 +0530 | [diff] [blame] | 259 | /* top level MDSS wrapper device (for MDP5 only) */ |
| 260 | struct msm_mdss *mdss; |
| 261 | |
Rob Clark | 067fef3 | 2014-11-04 13:33:14 -0500 | [diff] [blame] | 262 | /* possibly this should be in the kms component, but it is |
| 263 | * shared by both mdp4 and mdp5.. |
| 264 | */ |
| 265 | struct hdmi *hdmi; |
Rob Clark | 060530f | 2014-03-03 14:19:12 -0500 | [diff] [blame] | 266 | |
Hai Li | ab5b010 | 2015-01-07 18:47:44 -0500 | [diff] [blame] | 267 | /* eDP is for mdp5 only, but kms has not been created |
| 268 | * when edp_bind() and edp_init() are called. Here is the only |
| 269 | * place to keep the edp instance. |
| 270 | */ |
| 271 | struct msm_edp *edp; |
| 272 | |
Hai Li | a689554 | 2015-03-31 14:36:33 -0400 | [diff] [blame] | 273 | /* DSI is shared by mdp4 and mdp5 */ |
| 274 | struct msm_dsi *dsi[2]; |
| 275 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 276 | /* when we have more than one 'msm_gpu' these need to be an array: */ |
| 277 | struct msm_gpu *gpu; |
| 278 | struct msm_file_private *lastctx; |
| 279 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 280 | struct drm_fb_helper *fbdev; |
| 281 | |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 282 | struct msm_rd_state *rd; |
Rob Clark | 70c70f0 | 2014-05-30 14:49:43 -0400 | [diff] [blame] | 283 | struct msm_perf_state *perf; |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 284 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 285 | /* list of GEM objects: */ |
| 286 | struct list_head inactive_list; |
| 287 | |
| 288 | struct workqueue_struct *wq; |
| 289 | |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame] | 290 | /* crtcs pending async atomic updates: */ |
| 291 | uint32_t pending_crtcs; |
| 292 | wait_queue_head_t pending_crtcs_event; |
| 293 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 294 | /* registered MMUs: */ |
| 295 | unsigned int num_mmus; |
| 296 | struct msm_mmu *mmus[NUM_DOMAINS]; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 297 | |
Rob Clark | a862391 | 2013-10-08 12:57:48 -0400 | [diff] [blame] | 298 | unsigned int num_planes; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 299 | struct drm_plane *planes[MAX_PLANES]; |
Rob Clark | a862391 | 2013-10-08 12:57:48 -0400 | [diff] [blame] | 300 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 301 | unsigned int num_crtcs; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 302 | struct drm_crtc *crtcs[MAX_CRTCS]; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 303 | |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 304 | struct msm_drm_commit disp_thread[MAX_CRTCS]; |
| 305 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 306 | unsigned int num_encoders; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 307 | struct drm_encoder *encoders[MAX_ENCODERS]; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 308 | |
Rob Clark | a3376e3 | 2013-08-30 13:02:15 -0400 | [diff] [blame] | 309 | unsigned int num_bridges; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 310 | struct drm_bridge *bridges[MAX_BRIDGES]; |
Rob Clark | a3376e3 | 2013-08-30 13:02:15 -0400 | [diff] [blame] | 311 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 312 | unsigned int num_connectors; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 313 | struct drm_connector *connectors[MAX_CONNECTORS]; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 314 | |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 315 | /* Properties */ |
Clarence Ip | e78efb7 | 2016-06-24 18:35:21 -0400 | [diff] [blame] | 316 | struct drm_property *plane_property[PLANE_PROP_COUNT]; |
Clarence Ip | 7a753bb | 2016-07-07 11:47:44 -0400 | [diff] [blame] | 317 | struct drm_property *crtc_property[CRTC_PROP_COUNT]; |
Clarence Ip | dd8021c | 2016-07-20 16:39:47 -0400 | [diff] [blame] | 318 | struct drm_property *conn_property[CONNECTOR_PROP_COUNT]; |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 319 | |
Gopikrishnaiah Anandan | e0e5e0c | 2016-05-25 11:05:33 -0700 | [diff] [blame] | 320 | /* Color processing properties for the crtc */ |
| 321 | struct drm_property **cp_property; |
| 322 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 323 | /* VRAM carveout, used when no IOMMU: */ |
| 324 | struct { |
| 325 | unsigned long size; |
| 326 | dma_addr_t paddr; |
| 327 | /* NOTE: mm managed at the page level, size is in # of pages |
| 328 | * and position mm_node->start is in # of pages: |
| 329 | */ |
| 330 | struct drm_mm mm; |
| 331 | } vram; |
Hai Li | 78b1d47 | 2015-07-27 13:49:45 -0400 | [diff] [blame] | 332 | |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 333 | struct notifier_block vmap_notifier; |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 334 | struct shrinker shrinker; |
| 335 | |
Hai Li | 78b1d47 | 2015-07-27 13:49:45 -0400 | [diff] [blame] | 336 | struct msm_vblank_ctrl vblank_ctrl; |
Rob Clark | d78d383 | 2016-08-22 15:28:38 -0400 | [diff] [blame] | 337 | |
Dhaval Patel | 5200c60 | 2017-01-17 15:53:37 -0800 | [diff] [blame] | 338 | /* task holding struct_mutex.. currently only used in submit path |
| 339 | * to detect and reject faults from copy_from_user() for submit |
| 340 | * ioctl. |
| 341 | */ |
| 342 | struct task_struct *struct_mutex_task; |
| 343 | |
Lloyd Atkinson | 5d40d31 | 2016-09-06 08:34:13 -0400 | [diff] [blame] | 344 | /* list of clients waiting for events */ |
| 345 | struct list_head client_event_list; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 346 | }; |
| 347 | |
| 348 | struct msm_format { |
| 349 | uint32_t pixel_format; |
| 350 | }; |
| 351 | |
Daniel Vetter | b4274fb | 2014-11-26 17:02:18 +0100 | [diff] [blame] | 352 | int msm_atomic_check(struct drm_device *dev, |
| 353 | struct drm_atomic_state *state); |
Dhaval Patel | 7a7d85d | 2016-08-26 16:35:34 -0700 | [diff] [blame] | 354 | /* callback from wq once fence has passed: */ |
| 355 | struct msm_fence_cb { |
| 356 | struct work_struct work; |
| 357 | uint32_t fence; |
| 358 | void (*func)(struct msm_fence_cb *cb); |
| 359 | }; |
| 360 | |
| 361 | void __msm_fence_worker(struct work_struct *work); |
| 362 | |
| 363 | #define INIT_FENCE_CB(_cb, _func) do { \ |
| 364 | INIT_WORK(&(_cb)->work, __msm_fence_worker); \ |
| 365 | (_cb)->func = _func; \ |
| 366 | } while (0) |
| 367 | |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 368 | int msm_atomic_commit(struct drm_device *dev, |
Maarten Lankhorst | a3ccfb9 | 2016-04-26 16:11:38 +0200 | [diff] [blame] | 369 | struct drm_atomic_state *state, bool nonblock); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 370 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 371 | int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); |
Lloyd Atkinson | 1e2497e | 2016-09-26 17:55:48 -0400 | [diff] [blame] | 372 | void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 373 | |
Rob Clark | 40e6815 | 2016-05-03 09:50:26 -0400 | [diff] [blame] | 374 | void msm_gem_submit_free(struct msm_gem_submit *submit); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 375 | int msm_ioctl_gem_submit(struct drm_device *dev, void *data, |
| 376 | struct drm_file *file); |
| 377 | |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 378 | void msm_gem_shrinker_init(struct drm_device *dev); |
| 379 | void msm_gem_shrinker_cleanup(struct drm_device *dev); |
| 380 | |
Daniel Thompson | 77a147e | 2014-11-12 11:38:14 +0000 | [diff] [blame] | 381 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
| 382 | struct vm_area_struct *vma); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 383 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
| 384 | int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 385 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); |
| 386 | int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, |
| 387 | uint32_t *iova); |
| 388 | int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova); |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 389 | uint32_t msm_gem_iova(struct drm_gem_object *obj, int id); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 390 | struct page **msm_gem_get_pages(struct drm_gem_object *obj); |
| 391 | void msm_gem_put_pages(struct drm_gem_object *obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 392 | void msm_gem_put_iova(struct drm_gem_object *obj, int id); |
| 393 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
| 394 | struct drm_mode_create_dumb *args); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 395 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
| 396 | uint32_t handle, uint64_t *offset); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 397 | struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); |
| 398 | void *msm_gem_prime_vmap(struct drm_gem_object *obj); |
| 399 | void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
Daniel Thompson | 77a147e | 2014-11-12 11:38:14 +0000 | [diff] [blame] | 400 | int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 401 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, |
Maarten Lankhorst | b5e9c1a | 2014-01-09 11:03:14 +0100 | [diff] [blame] | 402 | struct dma_buf_attachment *attach, struct sg_table *sg); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 403 | int msm_gem_prime_pin(struct drm_gem_object *obj); |
| 404 | void msm_gem_prime_unpin(struct drm_gem_object *obj); |
Rob Clark | 18f2304 | 2016-05-26 16:24:35 -0400 | [diff] [blame] | 405 | void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj); |
| 406 | void *msm_gem_get_vaddr(struct drm_gem_object *obj); |
| 407 | void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); |
| 408 | void msm_gem_put_vaddr(struct drm_gem_object *obj); |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 409 | int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 410 | void msm_gem_purge(struct drm_gem_object *obj); |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 411 | void msm_gem_vunmap(struct drm_gem_object *obj); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 412 | int msm_gem_sync_object(struct drm_gem_object *obj, |
| 413 | struct msm_fence_context *fctx, bool exclusive); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 414 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 415 | struct msm_gpu *gpu, bool exclusive, struct fence *fence); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 416 | void msm_gem_move_to_inactive(struct drm_gem_object *obj); |
Rob Clark | ba00c3f | 2016-03-16 18:18:17 -0400 | [diff] [blame] | 417 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 418 | int msm_gem_cpu_fini(struct drm_gem_object *obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 419 | void msm_gem_free_object(struct drm_gem_object *obj); |
| 420 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
| 421 | uint32_t size, uint32_t flags, uint32_t *handle); |
| 422 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, |
| 423 | uint32_t size, uint32_t flags); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 424 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
Rob Clark | 79f0e20 | 2016-03-16 12:40:35 -0400 | [diff] [blame] | 425 | struct dma_buf *dmabuf, struct sg_table *sgt); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 426 | |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 427 | int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id); |
| 428 | void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id); |
| 429 | uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 430 | struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); |
| 431 | const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); |
| 432 | struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, |
Ville Syrjälä | 1eb8345 | 2015-11-11 19:11:29 +0200 | [diff] [blame] | 433 | const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 434 | struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, |
Ville Syrjälä | 1eb8345 | 2015-11-11 19:11:29 +0200 | [diff] [blame] | 435 | struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 436 | |
| 437 | struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); |
Archit Taneja | 1aaa57f | 2016-02-25 11:19:45 +0530 | [diff] [blame] | 438 | void msm_fbdev_free(struct drm_device *dev); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 439 | |
Rob Clark | dada25b | 2013-12-01 12:12:54 -0500 | [diff] [blame] | 440 | struct hdmi; |
Arnd Bergmann | fcda50c | 2016-02-22 22:08:35 +0100 | [diff] [blame] | 441 | int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, |
Rob Clark | 067fef3 | 2014-11-04 13:33:14 -0500 | [diff] [blame] | 442 | struct drm_encoder *encoder); |
Arnd Bergmann | fcda50c | 2016-02-22 22:08:35 +0100 | [diff] [blame] | 443 | void __init msm_hdmi_register(void); |
| 444 | void __exit msm_hdmi_unregister(void); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 445 | |
Hai Li | 0045398 | 2014-12-12 14:41:17 -0500 | [diff] [blame] | 446 | struct msm_edp; |
| 447 | void __init msm_edp_register(void); |
| 448 | void __exit msm_edp_unregister(void); |
| 449 | int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, |
| 450 | struct drm_encoder *encoder); |
| 451 | |
Hai Li | a689554 | 2015-03-31 14:36:33 -0400 | [diff] [blame] | 452 | struct msm_dsi; |
| 453 | enum msm_dsi_encoder_id { |
| 454 | MSM_DSI_VIDEO_ENCODER_ID = 0, |
| 455 | MSM_DSI_CMD_ENCODER_ID = 1, |
| 456 | MSM_DSI_ENCODER_NUM = 2 |
| 457 | }; |
| 458 | #ifdef CONFIG_DRM_MSM_DSI |
| 459 | void __init msm_dsi_register(void); |
| 460 | void __exit msm_dsi_unregister(void); |
| 461 | int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, |
| 462 | struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]); |
| 463 | #else |
| 464 | static inline void __init msm_dsi_register(void) |
| 465 | { |
| 466 | } |
| 467 | static inline void __exit msm_dsi_unregister(void) |
| 468 | { |
| 469 | } |
| 470 | static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, |
| 471 | struct drm_device *dev, |
| 472 | struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) |
| 473 | { |
| 474 | return -EINVAL; |
| 475 | } |
| 476 | #endif |
| 477 | |
Archit Taneja | 1dd0a0b | 2016-05-30 16:36:50 +0530 | [diff] [blame] | 478 | void __init msm_mdp_register(void); |
| 479 | void __exit msm_mdp_unregister(void); |
| 480 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 481 | #ifdef CONFIG_DEBUG_FS |
| 482 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); |
| 483 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); |
| 484 | void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 485 | int msm_debugfs_late_init(struct drm_device *dev); |
| 486 | int msm_rd_debugfs_init(struct drm_minor *minor); |
| 487 | void msm_rd_debugfs_cleanup(struct drm_minor *minor); |
| 488 | void msm_rd_dump_submit(struct msm_gem_submit *submit); |
Rob Clark | 70c70f0 | 2014-05-30 14:49:43 -0400 | [diff] [blame] | 489 | int msm_perf_debugfs_init(struct drm_minor *minor); |
| 490 | void msm_perf_debugfs_cleanup(struct drm_minor *minor); |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 491 | #else |
| 492 | static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } |
| 493 | static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {} |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 494 | #endif |
| 495 | |
| 496 | void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, |
| 497 | const char *dbgname); |
Lloyd Atkinson | 1a0c917 | 2016-10-04 10:01:24 -0400 | [diff] [blame] | 498 | void msm_iounmap(struct platform_device *dev, void __iomem *addr); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 499 | void msm_writel(u32 data, void __iomem *addr); |
| 500 | u32 msm_readl(const void __iomem *addr); |
| 501 | |
| 502 | #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) |
| 503 | #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) |
| 504 | |
| 505 | static inline int align_pitch(int width, int bpp) |
| 506 | { |
| 507 | int bytespp = (bpp + 7) / 8; |
| 508 | /* adreno needs pitch aligned to 32 pixels: */ |
| 509 | return bytespp * ALIGN(width, 32); |
| 510 | } |
| 511 | |
| 512 | /* for the generated headers: */ |
| 513 | #define INVALID_IDX(idx) ({BUG(); 0;}) |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 514 | #define fui(x) ({BUG(); 0;}) |
| 515 | #define util_float_to_half(x) ({BUG(); 0;}) |
| 516 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 517 | |
| 518 | #define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT) |
| 519 | |
| 520 | /* for conditionally setting boolean flag(s): */ |
| 521 | #define COND(bool, val) ((bool) ? (val) : 0) |
| 522 | |
Rob Clark | 340ff41 | 2016-03-16 14:57:22 -0400 | [diff] [blame] | 523 | static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) |
| 524 | { |
| 525 | ktime_t now = ktime_get(); |
| 526 | unsigned long remaining_jiffies; |
| 527 | |
| 528 | if (ktime_compare(*timeout, now) < 0) { |
| 529 | remaining_jiffies = 0; |
| 530 | } else { |
| 531 | ktime_t rem = ktime_sub(*timeout, now); |
| 532 | struct timespec ts = ktime_to_timespec(rem); |
| 533 | remaining_jiffies = timespec_to_jiffies(&ts); |
| 534 | } |
| 535 | |
| 536 | return remaining_jiffies; |
| 537 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 538 | |
| 539 | #endif /* __MSM_DRV_H__ */ |