Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 Red Hat |
| 3 | * Author: Rob Clark <robdclark@gmail.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 as published by |
| 7 | * the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with |
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
| 18 | #ifndef __MSM_DRV_H__ |
| 19 | #define __MSM_DRV_H__ |
| 20 | |
| 21 | #include <linux/kernel.h> |
| 22 | #include <linux/clk.h> |
| 23 | #include <linux/cpufreq.h> |
| 24 | #include <linux/module.h> |
Rob Clark | 060530f | 2014-03-03 14:19:12 -0500 | [diff] [blame] | 25 | #include <linux/component.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 26 | #include <linux/platform_device.h> |
| 27 | #include <linux/pm.h> |
| 28 | #include <linux/pm_runtime.h> |
| 29 | #include <linux/slab.h> |
| 30 | #include <linux/list.h> |
| 31 | #include <linux/iommu.h> |
| 32 | #include <linux/types.h> |
Archit Taneja | 3d6df06 | 2015-06-09 14:17:22 +0530 | [diff] [blame] | 33 | #include <linux/of_graph.h> |
Archit Taneja | e9fbdaf | 2015-11-18 12:15:14 +0530 | [diff] [blame] | 34 | #include <linux/of_device.h> |
Dhaval Patel | 3949f03 | 2016-06-20 16:24:33 -0700 | [diff] [blame] | 35 | #include <linux/mdss_io_util.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 36 | #include <asm/sizes.h> |
| 37 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 38 | #include <drm/drmP.h> |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 39 | #include <drm/drm_atomic.h> |
| 40 | #include <drm/drm_atomic_helper.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 41 | #include <drm/drm_crtc_helper.h> |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 42 | #include <drm/drm_plane_helper.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 43 | #include <drm/drm_fb_helper.h> |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 44 | #include <drm/msm_drm.h> |
Daniel Vetter | d9fc941 | 2014-09-23 15:46:53 +0200 | [diff] [blame] | 45 | #include <drm/drm_gem.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 46 | |
Lloyd Atkinson | 154b6aa | 2016-05-24 17:11:37 -0400 | [diff] [blame] | 47 | #include "msm_evtlog.h" |
Dhaval Patel | 3949f03 | 2016-06-20 16:24:33 -0700 | [diff] [blame] | 48 | #include "sde_power_handle.h" |
| 49 | |
| 50 | #define GET_MAJOR_REV(rev) ((rev) >> 28) |
| 51 | #define GET_MINOR_REV(rev) (((rev) >> 16) & 0xFFF) |
| 52 | #define GET_STEP_REV(rev) ((rev) & 0xFFFF) |
Lloyd Atkinson | 154b6aa | 2016-05-24 17:11:37 -0400 | [diff] [blame] | 53 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 54 | struct msm_kms; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 55 | struct msm_gpu; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 56 | struct msm_mmu; |
Archit Taneja | 990a400 | 2016-05-07 23:11:25 +0530 | [diff] [blame] | 57 | struct msm_mdss; |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 58 | struct msm_rd_state; |
Rob Clark | 70c70f0 | 2014-05-30 14:49:43 -0400 | [diff] [blame] | 59 | struct msm_perf_state; |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 60 | struct msm_gem_submit; |
Rob Clark | ca762a8 | 2016-03-15 17:22:13 -0400 | [diff] [blame] | 61 | struct msm_fence_context; |
Rob Clark | fde5de6 | 2016-03-15 15:35:08 -0400 | [diff] [blame] | 62 | struct msm_fence_cb; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 63 | |
Alan Kwong | 112a84f | 2016-05-24 20:49:21 -0400 | [diff] [blame] | 64 | #define NUM_DOMAINS 4 /* one for KMS, then one per gpu core (?) */ |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 65 | #define MAX_CRTCS 8 |
| 66 | #define MAX_PLANES 12 |
| 67 | #define MAX_ENCODERS 8 |
| 68 | #define MAX_BRIDGES 8 |
| 69 | #define MAX_CONNECTORS 8 |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 70 | |
| 71 | struct msm_file_private { |
| 72 | /* currently we don't do anything useful with this.. but when |
| 73 | * per-context address spaces are supported we'd keep track of |
| 74 | * the context's page-tables here. |
| 75 | */ |
| 76 | int dummy; |
| 77 | }; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 78 | |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 79 | enum msm_mdp_plane_property { |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 80 | /* blob properties, always put these first */ |
Clarence Ip | b43d459 | 2016-09-08 14:21:35 -0400 | [diff] [blame] | 81 | PLANE_PROP_SCALER_V1, |
Clarence Ip | 5fc00c5 | 2016-09-23 15:03:34 -0400 | [diff] [blame] | 82 | PLANE_PROP_CSC_V1, |
Dhaval Patel | 4e57484 | 2016-08-23 15:11:37 -0700 | [diff] [blame] | 83 | PLANE_PROP_INFO, |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 84 | |
| 85 | /* # of blob properties */ |
| 86 | PLANE_PROP_BLOBCOUNT, |
| 87 | |
Clarence Ip | e78efb7 | 2016-06-24 18:35:21 -0400 | [diff] [blame] | 88 | /* range properties */ |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 89 | PLANE_PROP_ZPOS = PLANE_PROP_BLOBCOUNT, |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 90 | PLANE_PROP_ALPHA, |
Clarence Ip | cb410d4 | 2016-06-26 22:52:33 -0400 | [diff] [blame] | 91 | PLANE_PROP_COLOR_FILL, |
Clarence Ip | dedbba9 | 2016-09-27 17:43:10 -0400 | [diff] [blame] | 92 | PLANE_PROP_H_DECIMATE, |
| 93 | PLANE_PROP_V_DECIMATE, |
Clarence Ip | cae1bb6 | 2016-07-07 12:07:13 -0400 | [diff] [blame] | 94 | PLANE_PROP_INPUT_FENCE, |
Clarence Ip | e78efb7 | 2016-06-24 18:35:21 -0400 | [diff] [blame] | 95 | |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 96 | /* enum/bitmask properties */ |
| 97 | PLANE_PROP_ROTATION, |
| 98 | PLANE_PROP_BLEND_OP, |
| 99 | PLANE_PROP_SRC_CONFIG, |
Clarence Ip | e78efb7 | 2016-06-24 18:35:21 -0400 | [diff] [blame] | 100 | |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 101 | /* total # of properties */ |
| 102 | PLANE_PROP_COUNT |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 103 | }; |
| 104 | |
Clarence Ip | 7a753bb | 2016-07-07 11:47:44 -0400 | [diff] [blame] | 105 | enum msm_mdp_crtc_property { |
Dhaval Patel | e4a5dda | 2016-10-13 19:29:30 -0700 | [diff] [blame] | 106 | CRTC_PROP_INFO, |
| 107 | |
Clarence Ip | 7a753bb | 2016-07-07 11:47:44 -0400 | [diff] [blame] | 108 | /* # of blob properties */ |
| 109 | CRTC_PROP_BLOBCOUNT, |
| 110 | |
| 111 | /* range properties */ |
Clarence Ip | cae1bb6 | 2016-07-07 12:07:13 -0400 | [diff] [blame] | 112 | CRTC_PROP_INPUT_FENCE_TIMEOUT = CRTC_PROP_BLOBCOUNT, |
Clarence Ip | 24f8066 | 2016-06-13 19:05:32 -0400 | [diff] [blame] | 113 | CRTC_PROP_OUTPUT_FENCE, |
Clarence Ip | 1d9728b | 2016-09-01 11:10:54 -0400 | [diff] [blame] | 114 | CRTC_PROP_OUTPUT_FENCE_OFFSET, |
Clarence Ip | 7a753bb | 2016-07-07 11:47:44 -0400 | [diff] [blame] | 115 | |
| 116 | /* total # of properties */ |
| 117 | CRTC_PROP_COUNT |
| 118 | }; |
| 119 | |
Clarence Ip | dd8021c | 2016-07-20 16:39:47 -0400 | [diff] [blame] | 120 | enum msm_mdp_conn_property { |
| 121 | /* blob properties, always put these first */ |
| 122 | CONNECTOR_PROP_SDE_INFO, |
| 123 | |
| 124 | /* # of blob properties */ |
| 125 | CONNECTOR_PROP_BLOBCOUNT, |
| 126 | |
| 127 | /* range properties */ |
| 128 | CONNECTOR_PROP_OUT_FB = CONNECTOR_PROP_BLOBCOUNT, |
| 129 | CONNECTOR_PROP_RETIRE_FENCE, |
Alan Kwong | bb27c09 | 2016-07-20 16:41:25 -0400 | [diff] [blame] | 130 | CONNECTOR_PROP_DST_X, |
| 131 | CONNECTOR_PROP_DST_Y, |
| 132 | CONNECTOR_PROP_DST_W, |
| 133 | CONNECTOR_PROP_DST_H, |
Clarence Ip | dd8021c | 2016-07-20 16:39:47 -0400 | [diff] [blame] | 134 | |
| 135 | /* enum/bitmask properties */ |
Lloyd Atkinson | b619197 | 2016-08-10 18:31:46 -0400 | [diff] [blame] | 136 | CONNECTOR_PROP_TOPOLOGY_NAME, |
| 137 | CONNECTOR_PROP_TOPOLOGY_CONTROL, |
Clarence Ip | dd8021c | 2016-07-20 16:39:47 -0400 | [diff] [blame] | 138 | |
| 139 | /* total # of properties */ |
| 140 | CONNECTOR_PROP_COUNT |
| 141 | }; |
| 142 | |
Hai Li | 78b1d47 | 2015-07-27 13:49:45 -0400 | [diff] [blame] | 143 | struct msm_vblank_ctrl { |
| 144 | struct work_struct work; |
| 145 | struct list_head event_list; |
| 146 | spinlock_t lock; |
| 147 | }; |
| 148 | |
Clarence Ip | a403932 | 2016-07-15 16:23:59 -0400 | [diff] [blame] | 149 | #define MAX_H_TILES_PER_DISPLAY 2 |
| 150 | |
| 151 | /** |
| 152 | * enum msm_display_compression - compression method used for pixel stream |
| 153 | * @MSM_DISPLAY_COMPRESS_NONE: Pixel data is not compressed |
| 154 | * @MSM_DISPLAY_COMPRESS_DSC: DSC compresison is used |
| 155 | * @MSM_DISPLAY_COMPRESS_FBC: FBC compression is used |
| 156 | */ |
| 157 | enum msm_display_compression { |
| 158 | MSM_DISPLAY_COMPRESS_NONE, |
| 159 | MSM_DISPLAY_COMPRESS_DSC, |
| 160 | MSM_DISPLAY_COMPRESS_FBC, |
| 161 | }; |
| 162 | |
| 163 | /** |
| 164 | * enum msm_display_caps - features/capabilities supported by displays |
| 165 | * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported |
| 166 | * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported |
| 167 | * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported |
| 168 | * @MSM_DISPLAY_CAP_EDID: EDID supported |
| 169 | */ |
| 170 | enum msm_display_caps { |
| 171 | MSM_DISPLAY_CAP_VID_MODE = BIT(0), |
| 172 | MSM_DISPLAY_CAP_CMD_MODE = BIT(1), |
| 173 | MSM_DISPLAY_CAP_HOT_PLUG = BIT(2), |
| 174 | MSM_DISPLAY_CAP_EDID = BIT(3), |
| 175 | }; |
| 176 | |
| 177 | /** |
| 178 | * struct msm_display_info - defines display properties |
| 179 | * @intf_type: DRM_MODE_CONNECTOR_ display type |
| 180 | * @capabilities: Bitmask of display flags |
| 181 | * @num_of_h_tiles: Number of horizontal tiles in case of split interface |
| 182 | * @h_tile_instance: Controller instance used per tile. Number of elements is |
| 183 | * based on num_of_h_tiles |
| 184 | * @is_connected: Set to true if display is connected |
| 185 | * @width_mm: Physical width |
| 186 | * @height_mm: Physical height |
| 187 | * @max_width: Max width of display. In case of hot pluggable display |
| 188 | * this is max width supported by controller |
| 189 | * @max_height: Max height of display. In case of hot pluggable display |
| 190 | * this is max height supported by controller |
| 191 | * @compression: Compression supported by the display |
| 192 | */ |
| 193 | struct msm_display_info { |
| 194 | int intf_type; |
| 195 | uint32_t capabilities; |
| 196 | |
| 197 | uint32_t num_of_h_tiles; |
| 198 | uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY]; |
| 199 | |
| 200 | bool is_connected; |
| 201 | |
| 202 | unsigned int width_mm; |
| 203 | unsigned int height_mm; |
| 204 | |
| 205 | uint32_t max_width; |
| 206 | uint32_t max_height; |
| 207 | |
| 208 | enum msm_display_compression compression; |
| 209 | }; |
| 210 | |
Clarence Ip | 3649f8b | 2016-10-31 09:59:44 -0400 | [diff] [blame] | 211 | /** |
| 212 | * struct msm_drm_event - defines custom event notification struct |
| 213 | * @base: base object required for event notification by DRM framework. |
| 214 | * @event: event object required for event notification by DRM framework. |
| 215 | * @info: contains information of DRM object for which events has been |
| 216 | * requested. |
| 217 | * @data: memory location which contains response payload for event. |
| 218 | */ |
| 219 | struct msm_drm_event { |
| 220 | struct drm_pending_event base; |
| 221 | struct drm_event event; |
| 222 | struct drm_msm_event_req info; |
| 223 | u8 data[]; |
| 224 | }; |
Ajay Singh Parmar | 64c1919 | 2016-06-10 16:44:56 -0700 | [diff] [blame] | 225 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 226 | struct msm_drm_private { |
| 227 | |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 228 | struct drm_device *dev; |
| 229 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 230 | struct msm_kms *kms; |
| 231 | |
Dhaval Patel | 3949f03 | 2016-06-20 16:24:33 -0700 | [diff] [blame] | 232 | struct sde_power_handle phandle; |
| 233 | struct sde_power_client *pclient; |
| 234 | |
Rob Clark | 060530f | 2014-03-03 14:19:12 -0500 | [diff] [blame] | 235 | /* subordinate devices, if present: */ |
Rob Clark | 067fef3 | 2014-11-04 13:33:14 -0500 | [diff] [blame] | 236 | struct platform_device *gpu_pdev; |
| 237 | |
Archit Taneja | 990a400 | 2016-05-07 23:11:25 +0530 | [diff] [blame] | 238 | /* top level MDSS wrapper device (for MDP5 only) */ |
| 239 | struct msm_mdss *mdss; |
| 240 | |
Rob Clark | 067fef3 | 2014-11-04 13:33:14 -0500 | [diff] [blame] | 241 | /* possibly this should be in the kms component, but it is |
| 242 | * shared by both mdp4 and mdp5.. |
| 243 | */ |
| 244 | struct hdmi *hdmi; |
Rob Clark | 060530f | 2014-03-03 14:19:12 -0500 | [diff] [blame] | 245 | |
Hai Li | ab5b010 | 2015-01-07 18:47:44 -0500 | [diff] [blame] | 246 | /* eDP is for mdp5 only, but kms has not been created |
| 247 | * when edp_bind() and edp_init() are called. Here is the only |
| 248 | * place to keep the edp instance. |
| 249 | */ |
| 250 | struct msm_edp *edp; |
| 251 | |
Hai Li | a689554 | 2015-03-31 14:36:33 -0400 | [diff] [blame] | 252 | /* DSI is shared by mdp4 and mdp5 */ |
| 253 | struct msm_dsi *dsi[2]; |
| 254 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 255 | /* when we have more than one 'msm_gpu' these need to be an array: */ |
| 256 | struct msm_gpu *gpu; |
| 257 | struct msm_file_private *lastctx; |
| 258 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 259 | struct drm_fb_helper *fbdev; |
| 260 | |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 261 | struct msm_rd_state *rd; |
Rob Clark | 70c70f0 | 2014-05-30 14:49:43 -0400 | [diff] [blame] | 262 | struct msm_perf_state *perf; |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 263 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 264 | /* list of GEM objects: */ |
| 265 | struct list_head inactive_list; |
| 266 | |
| 267 | struct workqueue_struct *wq; |
Rob Clark | ba00c3f | 2016-03-16 18:18:17 -0400 | [diff] [blame] | 268 | struct workqueue_struct *atomic_wq; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 269 | |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame] | 270 | /* crtcs pending async atomic updates: */ |
| 271 | uint32_t pending_crtcs; |
| 272 | wait_queue_head_t pending_crtcs_event; |
| 273 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 274 | /* registered MMUs: */ |
| 275 | unsigned int num_mmus; |
| 276 | struct msm_mmu *mmus[NUM_DOMAINS]; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 277 | |
Rob Clark | a862391 | 2013-10-08 12:57:48 -0400 | [diff] [blame] | 278 | unsigned int num_planes; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 279 | struct drm_plane *planes[MAX_PLANES]; |
Rob Clark | a862391 | 2013-10-08 12:57:48 -0400 | [diff] [blame] | 280 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 281 | unsigned int num_crtcs; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 282 | struct drm_crtc *crtcs[MAX_CRTCS]; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 283 | |
| 284 | unsigned int num_encoders; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 285 | struct drm_encoder *encoders[MAX_ENCODERS]; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 286 | |
Rob Clark | a3376e3 | 2013-08-30 13:02:15 -0400 | [diff] [blame] | 287 | unsigned int num_bridges; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 288 | struct drm_bridge *bridges[MAX_BRIDGES]; |
Rob Clark | a3376e3 | 2013-08-30 13:02:15 -0400 | [diff] [blame] | 289 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 290 | unsigned int num_connectors; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 291 | struct drm_connector *connectors[MAX_CONNECTORS]; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 292 | |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 293 | /* Properties */ |
Clarence Ip | e78efb7 | 2016-06-24 18:35:21 -0400 | [diff] [blame] | 294 | struct drm_property *plane_property[PLANE_PROP_COUNT]; |
Clarence Ip | 7a753bb | 2016-07-07 11:47:44 -0400 | [diff] [blame] | 295 | struct drm_property *crtc_property[CRTC_PROP_COUNT]; |
Clarence Ip | dd8021c | 2016-07-20 16:39:47 -0400 | [diff] [blame] | 296 | struct drm_property *conn_property[CONNECTOR_PROP_COUNT]; |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 297 | |
Gopikrishnaiah Anandan | e0e5e0c | 2016-05-25 11:05:33 -0700 | [diff] [blame] | 298 | /* Color processing properties for the crtc */ |
| 299 | struct drm_property **cp_property; |
| 300 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 301 | /* VRAM carveout, used when no IOMMU: */ |
| 302 | struct { |
| 303 | unsigned long size; |
| 304 | dma_addr_t paddr; |
| 305 | /* NOTE: mm managed at the page level, size is in # of pages |
| 306 | * and position mm_node->start is in # of pages: |
| 307 | */ |
| 308 | struct drm_mm mm; |
| 309 | } vram; |
Hai Li | 78b1d47 | 2015-07-27 13:49:45 -0400 | [diff] [blame] | 310 | |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 311 | struct notifier_block vmap_notifier; |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 312 | struct shrinker shrinker; |
| 313 | |
Hai Li | 78b1d47 | 2015-07-27 13:49:45 -0400 | [diff] [blame] | 314 | struct msm_vblank_ctrl vblank_ctrl; |
Rob Clark | d78d383 | 2016-08-22 15:28:38 -0400 | [diff] [blame] | 315 | |
| 316 | /* task holding struct_mutex.. currently only used in submit path |
| 317 | * to detect and reject faults from copy_from_user() for submit |
| 318 | * ioctl. |
| 319 | */ |
| 320 | struct task_struct *struct_mutex_task; |
Lloyd Atkinson | 154b6aa | 2016-05-24 17:11:37 -0400 | [diff] [blame] | 321 | |
| 322 | struct msm_evtlog evtlog; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 323 | }; |
| 324 | |
Clarence Ip | 7f23b89 | 2016-06-01 10:30:34 -0400 | [diff] [blame] | 325 | /* Helper macro for accessing msm_drm_private's event log */ |
| 326 | #define MSM_EVTMSG(dev, msg, x, y) do { \ |
| 327 | if ((dev) && ((struct drm_device *)(dev))->dev_private) \ |
| 328 | msm_evtlog_sample(&((struct msm_drm_private *) \ |
| 329 | ((struct drm_device *) \ |
| 330 | (dev))->dev_private)->evtlog, __func__,\ |
| 331 | (msg), (uint64_t)(x), (uint64_t)(y), \ |
| 332 | __LINE__); \ |
| 333 | } while (0) |
| 334 | |
| 335 | /* Helper macro for accessing msm_drm_private's event log */ |
| 336 | #define MSM_EVT(dev, x, y) MSM_EVTMSG((dev), 0, (x), (y)) |
| 337 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 338 | struct msm_format { |
| 339 | uint32_t pixel_format; |
| 340 | }; |
| 341 | |
Daniel Vetter | b4274fb | 2014-11-26 17:02:18 +0100 | [diff] [blame] | 342 | int msm_atomic_check(struct drm_device *dev, |
| 343 | struct drm_atomic_state *state); |
Dhaval Patel | 7a7d85d | 2016-08-26 16:35:34 -0700 | [diff] [blame] | 344 | /* callback from wq once fence has passed: */ |
| 345 | struct msm_fence_cb { |
| 346 | struct work_struct work; |
| 347 | uint32_t fence; |
| 348 | void (*func)(struct msm_fence_cb *cb); |
| 349 | }; |
| 350 | |
| 351 | void __msm_fence_worker(struct work_struct *work); |
| 352 | |
| 353 | #define INIT_FENCE_CB(_cb, _func) do { \ |
| 354 | INIT_WORK(&(_cb)->work, __msm_fence_worker); \ |
| 355 | (_cb)->func = _func; \ |
| 356 | } while (0) |
| 357 | |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 358 | int msm_atomic_commit(struct drm_device *dev, |
Maarten Lankhorst | a3ccfb9 | 2016-04-26 16:11:38 +0200 | [diff] [blame] | 359 | struct drm_atomic_state *state, bool nonblock); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 360 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 361 | int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 362 | |
Rob Clark | 40e6815 | 2016-05-03 09:50:26 -0400 | [diff] [blame] | 363 | void msm_gem_submit_free(struct msm_gem_submit *submit); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 364 | int msm_ioctl_gem_submit(struct drm_device *dev, void *data, |
| 365 | struct drm_file *file); |
| 366 | |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 367 | void msm_gem_shrinker_init(struct drm_device *dev); |
| 368 | void msm_gem_shrinker_cleanup(struct drm_device *dev); |
| 369 | |
Daniel Thompson | 77a147e | 2014-11-12 11:38:14 +0000 | [diff] [blame] | 370 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
| 371 | struct vm_area_struct *vma); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 372 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
| 373 | int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 374 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); |
| 375 | int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, |
| 376 | uint32_t *iova); |
| 377 | int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova); |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 378 | uint32_t msm_gem_iova(struct drm_gem_object *obj, int id); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 379 | struct page **msm_gem_get_pages(struct drm_gem_object *obj); |
| 380 | void msm_gem_put_pages(struct drm_gem_object *obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 381 | void msm_gem_put_iova(struct drm_gem_object *obj, int id); |
| 382 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
| 383 | struct drm_mode_create_dumb *args); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 384 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
| 385 | uint32_t handle, uint64_t *offset); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 386 | struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); |
| 387 | void *msm_gem_prime_vmap(struct drm_gem_object *obj); |
| 388 | void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
Daniel Thompson | 77a147e | 2014-11-12 11:38:14 +0000 | [diff] [blame] | 389 | int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 390 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, |
Maarten Lankhorst | b5e9c1a | 2014-01-09 11:03:14 +0100 | [diff] [blame] | 391 | struct dma_buf_attachment *attach, struct sg_table *sg); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 392 | int msm_gem_prime_pin(struct drm_gem_object *obj); |
| 393 | void msm_gem_prime_unpin(struct drm_gem_object *obj); |
Rob Clark | 18f2304 | 2016-05-26 16:24:35 -0400 | [diff] [blame] | 394 | void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj); |
| 395 | void *msm_gem_get_vaddr(struct drm_gem_object *obj); |
| 396 | void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); |
| 397 | void msm_gem_put_vaddr(struct drm_gem_object *obj); |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 398 | int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 399 | void msm_gem_purge(struct drm_gem_object *obj); |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 400 | void msm_gem_vunmap(struct drm_gem_object *obj); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 401 | int msm_gem_sync_object(struct drm_gem_object *obj, |
| 402 | struct msm_fence_context *fctx, bool exclusive); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 403 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 404 | struct msm_gpu *gpu, bool exclusive, struct fence *fence); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 405 | void msm_gem_move_to_inactive(struct drm_gem_object *obj); |
Rob Clark | ba00c3f | 2016-03-16 18:18:17 -0400 | [diff] [blame] | 406 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 407 | int msm_gem_cpu_fini(struct drm_gem_object *obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 408 | void msm_gem_free_object(struct drm_gem_object *obj); |
| 409 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
| 410 | uint32_t size, uint32_t flags, uint32_t *handle); |
| 411 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, |
| 412 | uint32_t size, uint32_t flags); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 413 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
Rob Clark | 79f0e20 | 2016-03-16 12:40:35 -0400 | [diff] [blame] | 414 | struct dma_buf *dmabuf, struct sg_table *sgt); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 415 | |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 416 | int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id); |
| 417 | void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id); |
| 418 | uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 419 | struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); |
| 420 | const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); |
| 421 | struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, |
Ville Syrjälä | 1eb8345 | 2015-11-11 19:11:29 +0200 | [diff] [blame] | 422 | const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 423 | struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, |
Ville Syrjälä | 1eb8345 | 2015-11-11 19:11:29 +0200 | [diff] [blame] | 424 | struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 425 | |
| 426 | struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); |
Archit Taneja | 1aaa57f | 2016-02-25 11:19:45 +0530 | [diff] [blame] | 427 | void msm_fbdev_free(struct drm_device *dev); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 428 | |
Rob Clark | dada25b | 2013-12-01 12:12:54 -0500 | [diff] [blame] | 429 | struct hdmi; |
Arnd Bergmann | fcda50c | 2016-02-22 22:08:35 +0100 | [diff] [blame] | 430 | int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, |
Rob Clark | 067fef3 | 2014-11-04 13:33:14 -0500 | [diff] [blame] | 431 | struct drm_encoder *encoder); |
Arnd Bergmann | fcda50c | 2016-02-22 22:08:35 +0100 | [diff] [blame] | 432 | void __init msm_hdmi_register(void); |
| 433 | void __exit msm_hdmi_unregister(void); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 434 | |
Hai Li | 0045398 | 2014-12-12 14:41:17 -0500 | [diff] [blame] | 435 | struct msm_edp; |
| 436 | void __init msm_edp_register(void); |
| 437 | void __exit msm_edp_unregister(void); |
| 438 | int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, |
| 439 | struct drm_encoder *encoder); |
| 440 | |
Hai Li | a689554 | 2015-03-31 14:36:33 -0400 | [diff] [blame] | 441 | struct msm_dsi; |
| 442 | enum msm_dsi_encoder_id { |
| 443 | MSM_DSI_VIDEO_ENCODER_ID = 0, |
| 444 | MSM_DSI_CMD_ENCODER_ID = 1, |
| 445 | MSM_DSI_ENCODER_NUM = 2 |
| 446 | }; |
| 447 | #ifdef CONFIG_DRM_MSM_DSI |
| 448 | void __init msm_dsi_register(void); |
| 449 | void __exit msm_dsi_unregister(void); |
| 450 | int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, |
| 451 | struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]); |
| 452 | #else |
| 453 | static inline void __init msm_dsi_register(void) |
| 454 | { |
| 455 | } |
| 456 | static inline void __exit msm_dsi_unregister(void) |
| 457 | { |
| 458 | } |
| 459 | static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, |
| 460 | struct drm_device *dev, |
| 461 | struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) |
| 462 | { |
| 463 | return -EINVAL; |
| 464 | } |
| 465 | #endif |
| 466 | |
Archit Taneja | 1dd0a0b | 2016-05-30 16:36:50 +0530 | [diff] [blame] | 467 | void __init msm_mdp_register(void); |
| 468 | void __exit msm_mdp_unregister(void); |
| 469 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 470 | #ifdef CONFIG_DEBUG_FS |
| 471 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); |
| 472 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); |
| 473 | void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 474 | int msm_debugfs_late_init(struct drm_device *dev); |
| 475 | int msm_rd_debugfs_init(struct drm_minor *minor); |
| 476 | void msm_rd_debugfs_cleanup(struct drm_minor *minor); |
| 477 | void msm_rd_dump_submit(struct msm_gem_submit *submit); |
Rob Clark | 70c70f0 | 2014-05-30 14:49:43 -0400 | [diff] [blame] | 478 | int msm_perf_debugfs_init(struct drm_minor *minor); |
| 479 | void msm_perf_debugfs_cleanup(struct drm_minor *minor); |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 480 | #else |
| 481 | static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } |
| 482 | static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {} |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 483 | #endif |
| 484 | |
| 485 | void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, |
| 486 | const char *dbgname); |
| 487 | void msm_writel(u32 data, void __iomem *addr); |
| 488 | u32 msm_readl(const void __iomem *addr); |
| 489 | |
| 490 | #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) |
| 491 | #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) |
| 492 | |
| 493 | static inline int align_pitch(int width, int bpp) |
| 494 | { |
| 495 | int bytespp = (bpp + 7) / 8; |
| 496 | /* adreno needs pitch aligned to 32 pixels: */ |
| 497 | return bytespp * ALIGN(width, 32); |
| 498 | } |
| 499 | |
| 500 | /* for the generated headers: */ |
| 501 | #define INVALID_IDX(idx) ({BUG(); 0;}) |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 502 | #define fui(x) ({BUG(); 0;}) |
| 503 | #define util_float_to_half(x) ({BUG(); 0;}) |
| 504 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 505 | |
| 506 | #define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT) |
| 507 | |
| 508 | /* for conditionally setting boolean flag(s): */ |
| 509 | #define COND(bool, val) ((bool) ? (val) : 0) |
| 510 | |
Rob Clark | 340ff41 | 2016-03-16 14:57:22 -0400 | [diff] [blame] | 511 | static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) |
| 512 | { |
| 513 | ktime_t now = ktime_get(); |
| 514 | unsigned long remaining_jiffies; |
| 515 | |
| 516 | if (ktime_compare(*timeout, now) < 0) { |
| 517 | remaining_jiffies = 0; |
| 518 | } else { |
| 519 | ktime_t rem = ktime_sub(*timeout, now); |
| 520 | struct timespec ts = ktime_to_timespec(rem); |
| 521 | remaining_jiffies = timespec_to_jiffies(&ts); |
| 522 | } |
| 523 | |
| 524 | return remaining_jiffies; |
| 525 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 526 | |
| 527 | #endif /* __MSM_DRV_H__ */ |