Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 Red Hat |
| 3 | * Author: Rob Clark <robdclark@gmail.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 as published by |
| 7 | * the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with |
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
| 18 | #ifndef __MSM_DRV_H__ |
| 19 | #define __MSM_DRV_H__ |
| 20 | |
| 21 | #include <linux/kernel.h> |
| 22 | #include <linux/clk.h> |
| 23 | #include <linux/cpufreq.h> |
| 24 | #include <linux/module.h> |
Rob Clark | 060530f | 2014-03-03 14:19:12 -0500 | [diff] [blame] | 25 | #include <linux/component.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 26 | #include <linux/platform_device.h> |
| 27 | #include <linux/pm.h> |
| 28 | #include <linux/pm_runtime.h> |
| 29 | #include <linux/slab.h> |
| 30 | #include <linux/list.h> |
| 31 | #include <linux/iommu.h> |
| 32 | #include <linux/types.h> |
Archit Taneja | 3d6df06 | 2015-06-09 14:17:22 +0530 | [diff] [blame] | 33 | #include <linux/of_graph.h> |
Archit Taneja | e9fbdaf | 2015-11-18 12:15:14 +0530 | [diff] [blame] | 34 | #include <linux/of_device.h> |
Dhaval Patel | 3949f03 | 2016-06-20 16:24:33 -0700 | [diff] [blame] | 35 | #include <linux/mdss_io_util.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 36 | #include <asm/sizes.h> |
| 37 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 38 | #include <drm/drmP.h> |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 39 | #include <drm/drm_atomic.h> |
| 40 | #include <drm/drm_atomic_helper.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 41 | #include <drm/drm_crtc_helper.h> |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 42 | #include <drm/drm_plane_helper.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 43 | #include <drm/drm_fb_helper.h> |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 44 | #include <drm/msm_drm.h> |
Daniel Vetter | d9fc941 | 2014-09-23 15:46:53 +0200 | [diff] [blame] | 45 | #include <drm/drm_gem.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 46 | |
Lloyd Atkinson | 154b6aa | 2016-05-24 17:11:37 -0400 | [diff] [blame] | 47 | #include "msm_evtlog.h" |
Dhaval Patel | 3949f03 | 2016-06-20 16:24:33 -0700 | [diff] [blame] | 48 | #include "sde_power_handle.h" |
| 49 | |
| 50 | #define GET_MAJOR_REV(rev) ((rev) >> 28) |
| 51 | #define GET_MINOR_REV(rev) (((rev) >> 16) & 0xFFF) |
| 52 | #define GET_STEP_REV(rev) ((rev) & 0xFFFF) |
Lloyd Atkinson | 154b6aa | 2016-05-24 17:11:37 -0400 | [diff] [blame] | 53 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 54 | struct msm_kms; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 55 | struct msm_gpu; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 56 | struct msm_mmu; |
Archit Taneja | 990a400 | 2016-05-07 23:11:25 +0530 | [diff] [blame] | 57 | struct msm_mdss; |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 58 | struct msm_rd_state; |
Rob Clark | 70c70f0 | 2014-05-30 14:49:43 -0400 | [diff] [blame] | 59 | struct msm_perf_state; |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 60 | struct msm_gem_submit; |
Rob Clark | ca762a8 | 2016-03-15 17:22:13 -0400 | [diff] [blame] | 61 | struct msm_fence_context; |
Rob Clark | fde5de6 | 2016-03-15 15:35:08 -0400 | [diff] [blame] | 62 | struct msm_fence_cb; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 63 | |
Alan Kwong | 112a84f | 2016-05-24 20:49:21 -0400 | [diff] [blame] | 64 | #define NUM_DOMAINS 4 /* one for KMS, then one per gpu core (?) */ |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 65 | #define MAX_CRTCS 8 |
| 66 | #define MAX_PLANES 12 |
| 67 | #define MAX_ENCODERS 8 |
| 68 | #define MAX_BRIDGES 8 |
| 69 | #define MAX_CONNECTORS 8 |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 70 | |
| 71 | struct msm_file_private { |
| 72 | /* currently we don't do anything useful with this.. but when |
| 73 | * per-context address spaces are supported we'd keep track of |
| 74 | * the context's page-tables here. |
| 75 | */ |
| 76 | int dummy; |
| 77 | }; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 78 | |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 79 | enum msm_mdp_plane_property { |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 80 | /* blob properties, always put these first */ |
| 81 | PLANE_PROP_SCALER, |
| 82 | PLANE_PROP_CSC, |
Dhaval Patel | 4e57484 | 2016-08-23 15:11:37 -0700 | [diff] [blame] | 83 | PLANE_PROP_INFO, |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 84 | |
| 85 | /* # of blob properties */ |
| 86 | PLANE_PROP_BLOBCOUNT, |
| 87 | |
Clarence Ip | e78efb7 | 2016-06-24 18:35:21 -0400 | [diff] [blame] | 88 | /* range properties */ |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 89 | PLANE_PROP_ZPOS = PLANE_PROP_BLOBCOUNT, |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 90 | PLANE_PROP_ALPHA, |
Clarence Ip | cb410d4 | 2016-06-26 22:52:33 -0400 | [diff] [blame] | 91 | PLANE_PROP_COLOR_FILL, |
Clarence Ip | cae1bb6 | 2016-07-07 12:07:13 -0400 | [diff] [blame] | 92 | PLANE_PROP_INPUT_FENCE, |
Clarence Ip | e78efb7 | 2016-06-24 18:35:21 -0400 | [diff] [blame] | 93 | |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 94 | /* enum/bitmask properties */ |
| 95 | PLANE_PROP_ROTATION, |
| 96 | PLANE_PROP_BLEND_OP, |
| 97 | PLANE_PROP_SRC_CONFIG, |
Clarence Ip | e78efb7 | 2016-06-24 18:35:21 -0400 | [diff] [blame] | 98 | |
Clarence Ip | 5e2a922 | 2016-06-26 22:38:24 -0400 | [diff] [blame] | 99 | /* total # of properties */ |
| 100 | PLANE_PROP_COUNT |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 101 | }; |
| 102 | |
Clarence Ip | 7a753bb | 2016-07-07 11:47:44 -0400 | [diff] [blame] | 103 | enum msm_mdp_crtc_property { |
| 104 | /* # of blob properties */ |
| 105 | CRTC_PROP_BLOBCOUNT, |
| 106 | |
| 107 | /* range properties */ |
Clarence Ip | cae1bb6 | 2016-07-07 12:07:13 -0400 | [diff] [blame] | 108 | CRTC_PROP_INPUT_FENCE_TIMEOUT = CRTC_PROP_BLOBCOUNT, |
Clarence Ip | 24f8066 | 2016-06-13 19:05:32 -0400 | [diff] [blame] | 109 | CRTC_PROP_OUTPUT_FENCE, |
Clarence Ip | 1d9728b | 2016-09-01 11:10:54 -0400 | [diff] [blame^] | 110 | CRTC_PROP_OUTPUT_FENCE_OFFSET, |
Clarence Ip | 7a753bb | 2016-07-07 11:47:44 -0400 | [diff] [blame] | 111 | |
| 112 | /* total # of properties */ |
| 113 | CRTC_PROP_COUNT |
| 114 | }; |
| 115 | |
Clarence Ip | dd8021c | 2016-07-20 16:39:47 -0400 | [diff] [blame] | 116 | enum msm_mdp_conn_property { |
| 117 | /* blob properties, always put these first */ |
| 118 | CONNECTOR_PROP_SDE_INFO, |
| 119 | |
| 120 | /* # of blob properties */ |
| 121 | CONNECTOR_PROP_BLOBCOUNT, |
| 122 | |
| 123 | /* range properties */ |
| 124 | CONNECTOR_PROP_OUT_FB = CONNECTOR_PROP_BLOBCOUNT, |
| 125 | CONNECTOR_PROP_RETIRE_FENCE, |
Alan Kwong | bb27c09 | 2016-07-20 16:41:25 -0400 | [diff] [blame] | 126 | CONNECTOR_PROP_DST_X, |
| 127 | CONNECTOR_PROP_DST_Y, |
| 128 | CONNECTOR_PROP_DST_W, |
| 129 | CONNECTOR_PROP_DST_H, |
Clarence Ip | dd8021c | 2016-07-20 16:39:47 -0400 | [diff] [blame] | 130 | |
| 131 | /* enum/bitmask properties */ |
Lloyd Atkinson | b619197 | 2016-08-10 18:31:46 -0400 | [diff] [blame] | 132 | CONNECTOR_PROP_TOPOLOGY_NAME, |
| 133 | CONNECTOR_PROP_TOPOLOGY_CONTROL, |
Clarence Ip | dd8021c | 2016-07-20 16:39:47 -0400 | [diff] [blame] | 134 | |
| 135 | /* total # of properties */ |
| 136 | CONNECTOR_PROP_COUNT |
| 137 | }; |
| 138 | |
Hai Li | 78b1d47 | 2015-07-27 13:49:45 -0400 | [diff] [blame] | 139 | struct msm_vblank_ctrl { |
| 140 | struct work_struct work; |
| 141 | struct list_head event_list; |
| 142 | spinlock_t lock; |
| 143 | }; |
| 144 | |
Clarence Ip | a403932 | 2016-07-15 16:23:59 -0400 | [diff] [blame] | 145 | #define MAX_H_TILES_PER_DISPLAY 2 |
| 146 | |
| 147 | /** |
| 148 | * enum msm_display_compression - compression method used for pixel stream |
| 149 | * @MSM_DISPLAY_COMPRESS_NONE: Pixel data is not compressed |
| 150 | * @MSM_DISPLAY_COMPRESS_DSC: DSC compresison is used |
| 151 | * @MSM_DISPLAY_COMPRESS_FBC: FBC compression is used |
| 152 | */ |
| 153 | enum msm_display_compression { |
| 154 | MSM_DISPLAY_COMPRESS_NONE, |
| 155 | MSM_DISPLAY_COMPRESS_DSC, |
| 156 | MSM_DISPLAY_COMPRESS_FBC, |
| 157 | }; |
| 158 | |
| 159 | /** |
| 160 | * enum msm_display_caps - features/capabilities supported by displays |
| 161 | * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported |
| 162 | * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported |
| 163 | * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported |
| 164 | * @MSM_DISPLAY_CAP_EDID: EDID supported |
| 165 | */ |
| 166 | enum msm_display_caps { |
| 167 | MSM_DISPLAY_CAP_VID_MODE = BIT(0), |
| 168 | MSM_DISPLAY_CAP_CMD_MODE = BIT(1), |
| 169 | MSM_DISPLAY_CAP_HOT_PLUG = BIT(2), |
| 170 | MSM_DISPLAY_CAP_EDID = BIT(3), |
| 171 | }; |
| 172 | |
| 173 | /** |
| 174 | * struct msm_display_info - defines display properties |
| 175 | * @intf_type: DRM_MODE_CONNECTOR_ display type |
| 176 | * @capabilities: Bitmask of display flags |
| 177 | * @num_of_h_tiles: Number of horizontal tiles in case of split interface |
| 178 | * @h_tile_instance: Controller instance used per tile. Number of elements is |
| 179 | * based on num_of_h_tiles |
| 180 | * @is_connected: Set to true if display is connected |
| 181 | * @width_mm: Physical width |
| 182 | * @height_mm: Physical height |
| 183 | * @max_width: Max width of display. In case of hot pluggable display |
| 184 | * this is max width supported by controller |
| 185 | * @max_height: Max height of display. In case of hot pluggable display |
| 186 | * this is max height supported by controller |
| 187 | * @compression: Compression supported by the display |
| 188 | */ |
| 189 | struct msm_display_info { |
| 190 | int intf_type; |
| 191 | uint32_t capabilities; |
| 192 | |
| 193 | uint32_t num_of_h_tiles; |
| 194 | uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY]; |
| 195 | |
| 196 | bool is_connected; |
| 197 | |
| 198 | unsigned int width_mm; |
| 199 | unsigned int height_mm; |
| 200 | |
| 201 | uint32_t max_width; |
| 202 | uint32_t max_height; |
| 203 | |
| 204 | enum msm_display_compression compression; |
| 205 | }; |
| 206 | |
Ajay Singh Parmar | 64c1919 | 2016-06-10 16:44:56 -0700 | [diff] [blame] | 207 | struct display_manager; |
| 208 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 209 | struct msm_drm_private { |
| 210 | |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 211 | struct drm_device *dev; |
| 212 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 213 | struct msm_kms *kms; |
| 214 | |
Dhaval Patel | 3949f03 | 2016-06-20 16:24:33 -0700 | [diff] [blame] | 215 | struct sde_power_handle phandle; |
| 216 | struct sde_power_client *pclient; |
| 217 | |
Rob Clark | 060530f | 2014-03-03 14:19:12 -0500 | [diff] [blame] | 218 | /* subordinate devices, if present: */ |
Rob Clark | 067fef3 | 2014-11-04 13:33:14 -0500 | [diff] [blame] | 219 | struct platform_device *gpu_pdev; |
| 220 | |
Archit Taneja | 990a400 | 2016-05-07 23:11:25 +0530 | [diff] [blame] | 221 | /* top level MDSS wrapper device (for MDP5 only) */ |
| 222 | struct msm_mdss *mdss; |
| 223 | |
Rob Clark | 067fef3 | 2014-11-04 13:33:14 -0500 | [diff] [blame] | 224 | /* possibly this should be in the kms component, but it is |
| 225 | * shared by both mdp4 and mdp5.. |
| 226 | */ |
| 227 | struct hdmi *hdmi; |
Rob Clark | 060530f | 2014-03-03 14:19:12 -0500 | [diff] [blame] | 228 | |
Hai Li | ab5b010 | 2015-01-07 18:47:44 -0500 | [diff] [blame] | 229 | /* eDP is for mdp5 only, but kms has not been created |
| 230 | * when edp_bind() and edp_init() are called. Here is the only |
| 231 | * place to keep the edp instance. |
| 232 | */ |
| 233 | struct msm_edp *edp; |
| 234 | |
Hai Li | a689554 | 2015-03-31 14:36:33 -0400 | [diff] [blame] | 235 | /* DSI is shared by mdp4 and mdp5 */ |
| 236 | struct msm_dsi *dsi[2]; |
| 237 | |
Ajay Singh Parmar | 64c1919 | 2016-06-10 16:44:56 -0700 | [diff] [blame] | 238 | /* Display manager for SDE driver */ |
| 239 | struct display_manager *dm; |
| 240 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 241 | /* when we have more than one 'msm_gpu' these need to be an array: */ |
| 242 | struct msm_gpu *gpu; |
| 243 | struct msm_file_private *lastctx; |
| 244 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 245 | struct drm_fb_helper *fbdev; |
| 246 | |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 247 | struct msm_rd_state *rd; |
Rob Clark | 70c70f0 | 2014-05-30 14:49:43 -0400 | [diff] [blame] | 248 | struct msm_perf_state *perf; |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 249 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 250 | /* list of GEM objects: */ |
| 251 | struct list_head inactive_list; |
| 252 | |
| 253 | struct workqueue_struct *wq; |
Rob Clark | ba00c3f | 2016-03-16 18:18:17 -0400 | [diff] [blame] | 254 | struct workqueue_struct *atomic_wq; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 255 | |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame] | 256 | /* crtcs pending async atomic updates: */ |
| 257 | uint32_t pending_crtcs; |
| 258 | wait_queue_head_t pending_crtcs_event; |
| 259 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 260 | /* registered MMUs: */ |
| 261 | unsigned int num_mmus; |
| 262 | struct msm_mmu *mmus[NUM_DOMAINS]; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 263 | |
Rob Clark | a862391 | 2013-10-08 12:57:48 -0400 | [diff] [blame] | 264 | unsigned int num_planes; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 265 | struct drm_plane *planes[MAX_PLANES]; |
Rob Clark | a862391 | 2013-10-08 12:57:48 -0400 | [diff] [blame] | 266 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 267 | unsigned int num_crtcs; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 268 | struct drm_crtc *crtcs[MAX_CRTCS]; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 269 | |
| 270 | unsigned int num_encoders; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 271 | struct drm_encoder *encoders[MAX_ENCODERS]; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 272 | |
Rob Clark | a3376e3 | 2013-08-30 13:02:15 -0400 | [diff] [blame] | 273 | unsigned int num_bridges; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 274 | struct drm_bridge *bridges[MAX_BRIDGES]; |
Rob Clark | a3376e3 | 2013-08-30 13:02:15 -0400 | [diff] [blame] | 275 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 276 | unsigned int num_connectors; |
Narendra Muppalla | 1b0b335 | 2015-09-29 10:16:51 -0700 | [diff] [blame] | 277 | struct drm_connector *connectors[MAX_CONNECTORS]; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 278 | |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 279 | /* Properties */ |
Clarence Ip | e78efb7 | 2016-06-24 18:35:21 -0400 | [diff] [blame] | 280 | struct drm_property *plane_property[PLANE_PROP_COUNT]; |
Clarence Ip | 7a753bb | 2016-07-07 11:47:44 -0400 | [diff] [blame] | 281 | struct drm_property *crtc_property[CRTC_PROP_COUNT]; |
Clarence Ip | dd8021c | 2016-07-20 16:39:47 -0400 | [diff] [blame] | 282 | struct drm_property *conn_property[CONNECTOR_PROP_COUNT]; |
jilai wang | 1298778 | 2015-06-25 17:37:42 -0400 | [diff] [blame] | 283 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 284 | /* VRAM carveout, used when no IOMMU: */ |
| 285 | struct { |
| 286 | unsigned long size; |
| 287 | dma_addr_t paddr; |
| 288 | /* NOTE: mm managed at the page level, size is in # of pages |
| 289 | * and position mm_node->start is in # of pages: |
| 290 | */ |
| 291 | struct drm_mm mm; |
| 292 | } vram; |
Hai Li | 78b1d47 | 2015-07-27 13:49:45 -0400 | [diff] [blame] | 293 | |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 294 | struct notifier_block vmap_notifier; |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 295 | struct shrinker shrinker; |
| 296 | |
Hai Li | 78b1d47 | 2015-07-27 13:49:45 -0400 | [diff] [blame] | 297 | struct msm_vblank_ctrl vblank_ctrl; |
Rob Clark | d78d383 | 2016-08-22 15:28:38 -0400 | [diff] [blame] | 298 | |
| 299 | /* task holding struct_mutex.. currently only used in submit path |
| 300 | * to detect and reject faults from copy_from_user() for submit |
| 301 | * ioctl. |
| 302 | */ |
| 303 | struct task_struct *struct_mutex_task; |
Lloyd Atkinson | 154b6aa | 2016-05-24 17:11:37 -0400 | [diff] [blame] | 304 | |
| 305 | struct msm_evtlog evtlog; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 306 | }; |
| 307 | |
Clarence Ip | 7f23b89 | 2016-06-01 10:30:34 -0400 | [diff] [blame] | 308 | /* Helper macro for accessing msm_drm_private's event log */ |
| 309 | #define MSM_EVTMSG(dev, msg, x, y) do { \ |
| 310 | if ((dev) && ((struct drm_device *)(dev))->dev_private) \ |
| 311 | msm_evtlog_sample(&((struct msm_drm_private *) \ |
| 312 | ((struct drm_device *) \ |
| 313 | (dev))->dev_private)->evtlog, __func__,\ |
| 314 | (msg), (uint64_t)(x), (uint64_t)(y), \ |
| 315 | __LINE__); \ |
| 316 | } while (0) |
| 317 | |
| 318 | /* Helper macro for accessing msm_drm_private's event log */ |
| 319 | #define MSM_EVT(dev, x, y) MSM_EVTMSG((dev), 0, (x), (y)) |
| 320 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 321 | struct msm_format { |
| 322 | uint32_t pixel_format; |
| 323 | }; |
| 324 | |
Daniel Vetter | b4274fb | 2014-11-26 17:02:18 +0100 | [diff] [blame] | 325 | int msm_atomic_check(struct drm_device *dev, |
| 326 | struct drm_atomic_state *state); |
Dhaval Patel | 7a7d85d | 2016-08-26 16:35:34 -0700 | [diff] [blame] | 327 | /* callback from wq once fence has passed: */ |
| 328 | struct msm_fence_cb { |
| 329 | struct work_struct work; |
| 330 | uint32_t fence; |
| 331 | void (*func)(struct msm_fence_cb *cb); |
| 332 | }; |
| 333 | |
| 334 | void __msm_fence_worker(struct work_struct *work); |
| 335 | |
| 336 | #define INIT_FENCE_CB(_cb, _func) do { \ |
| 337 | INIT_WORK(&(_cb)->work, __msm_fence_worker); \ |
| 338 | (_cb)->func = _func; \ |
| 339 | } while (0) |
| 340 | |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 341 | int msm_atomic_commit(struct drm_device *dev, |
Maarten Lankhorst | a3ccfb9 | 2016-04-26 16:11:38 +0200 | [diff] [blame] | 342 | struct drm_atomic_state *state, bool nonblock); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 343 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 344 | int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 345 | |
Rob Clark | 40e6815 | 2016-05-03 09:50:26 -0400 | [diff] [blame] | 346 | void msm_gem_submit_free(struct msm_gem_submit *submit); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 347 | int msm_ioctl_gem_submit(struct drm_device *dev, void *data, |
| 348 | struct drm_file *file); |
| 349 | |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 350 | void msm_gem_shrinker_init(struct drm_device *dev); |
| 351 | void msm_gem_shrinker_cleanup(struct drm_device *dev); |
| 352 | |
Daniel Thompson | 77a147e | 2014-11-12 11:38:14 +0000 | [diff] [blame] | 353 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
| 354 | struct vm_area_struct *vma); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 355 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
| 356 | int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 357 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); |
| 358 | int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, |
| 359 | uint32_t *iova); |
| 360 | int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova); |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 361 | uint32_t msm_gem_iova(struct drm_gem_object *obj, int id); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 362 | struct page **msm_gem_get_pages(struct drm_gem_object *obj); |
| 363 | void msm_gem_put_pages(struct drm_gem_object *obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 364 | void msm_gem_put_iova(struct drm_gem_object *obj, int id); |
| 365 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
| 366 | struct drm_mode_create_dumb *args); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 367 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
| 368 | uint32_t handle, uint64_t *offset); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 369 | struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); |
| 370 | void *msm_gem_prime_vmap(struct drm_gem_object *obj); |
| 371 | void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
Daniel Thompson | 77a147e | 2014-11-12 11:38:14 +0000 | [diff] [blame] | 372 | int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 373 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, |
Maarten Lankhorst | b5e9c1a | 2014-01-09 11:03:14 +0100 | [diff] [blame] | 374 | struct dma_buf_attachment *attach, struct sg_table *sg); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 375 | int msm_gem_prime_pin(struct drm_gem_object *obj); |
| 376 | void msm_gem_prime_unpin(struct drm_gem_object *obj); |
Rob Clark | 18f2304 | 2016-05-26 16:24:35 -0400 | [diff] [blame] | 377 | void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj); |
| 378 | void *msm_gem_get_vaddr(struct drm_gem_object *obj); |
| 379 | void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); |
| 380 | void msm_gem_put_vaddr(struct drm_gem_object *obj); |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 381 | int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 382 | void msm_gem_purge(struct drm_gem_object *obj); |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 383 | void msm_gem_vunmap(struct drm_gem_object *obj); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 384 | int msm_gem_sync_object(struct drm_gem_object *obj, |
| 385 | struct msm_fence_context *fctx, bool exclusive); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 386 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 387 | struct msm_gpu *gpu, bool exclusive, struct fence *fence); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 388 | void msm_gem_move_to_inactive(struct drm_gem_object *obj); |
Rob Clark | ba00c3f | 2016-03-16 18:18:17 -0400 | [diff] [blame] | 389 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 390 | int msm_gem_cpu_fini(struct drm_gem_object *obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 391 | void msm_gem_free_object(struct drm_gem_object *obj); |
| 392 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
| 393 | uint32_t size, uint32_t flags, uint32_t *handle); |
| 394 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, |
| 395 | uint32_t size, uint32_t flags); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 396 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
Rob Clark | 79f0e20 | 2016-03-16 12:40:35 -0400 | [diff] [blame] | 397 | struct dma_buf *dmabuf, struct sg_table *sgt); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 398 | |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 399 | int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id); |
| 400 | void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id); |
| 401 | uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 402 | struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); |
| 403 | const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); |
| 404 | struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, |
Ville Syrjälä | 1eb8345 | 2015-11-11 19:11:29 +0200 | [diff] [blame] | 405 | const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 406 | struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, |
Ville Syrjälä | 1eb8345 | 2015-11-11 19:11:29 +0200 | [diff] [blame] | 407 | struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 408 | |
| 409 | struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); |
Archit Taneja | 1aaa57f | 2016-02-25 11:19:45 +0530 | [diff] [blame] | 410 | void msm_fbdev_free(struct drm_device *dev); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 411 | |
Rob Clark | dada25b | 2013-12-01 12:12:54 -0500 | [diff] [blame] | 412 | struct hdmi; |
Arnd Bergmann | fcda50c | 2016-02-22 22:08:35 +0100 | [diff] [blame] | 413 | int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, |
Rob Clark | 067fef3 | 2014-11-04 13:33:14 -0500 | [diff] [blame] | 414 | struct drm_encoder *encoder); |
Arnd Bergmann | fcda50c | 2016-02-22 22:08:35 +0100 | [diff] [blame] | 415 | void __init msm_hdmi_register(void); |
| 416 | void __exit msm_hdmi_unregister(void); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 417 | |
Hai Li | 0045398 | 2014-12-12 14:41:17 -0500 | [diff] [blame] | 418 | struct msm_edp; |
| 419 | void __init msm_edp_register(void); |
| 420 | void __exit msm_edp_unregister(void); |
| 421 | int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, |
| 422 | struct drm_encoder *encoder); |
| 423 | |
Hai Li | a689554 | 2015-03-31 14:36:33 -0400 | [diff] [blame] | 424 | struct msm_dsi; |
| 425 | enum msm_dsi_encoder_id { |
| 426 | MSM_DSI_VIDEO_ENCODER_ID = 0, |
| 427 | MSM_DSI_CMD_ENCODER_ID = 1, |
| 428 | MSM_DSI_ENCODER_NUM = 2 |
| 429 | }; |
| 430 | #ifdef CONFIG_DRM_MSM_DSI |
| 431 | void __init msm_dsi_register(void); |
| 432 | void __exit msm_dsi_unregister(void); |
| 433 | int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, |
| 434 | struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]); |
| 435 | #else |
| 436 | static inline void __init msm_dsi_register(void) |
| 437 | { |
| 438 | } |
| 439 | static inline void __exit msm_dsi_unregister(void) |
| 440 | { |
| 441 | } |
| 442 | static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, |
| 443 | struct drm_device *dev, |
| 444 | struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) |
| 445 | { |
| 446 | return -EINVAL; |
| 447 | } |
| 448 | #endif |
| 449 | |
Archit Taneja | 1dd0a0b | 2016-05-30 16:36:50 +0530 | [diff] [blame] | 450 | void __init msm_mdp_register(void); |
| 451 | void __exit msm_mdp_unregister(void); |
| 452 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 453 | #ifdef CONFIG_DEBUG_FS |
| 454 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); |
| 455 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); |
| 456 | void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 457 | int msm_debugfs_late_init(struct drm_device *dev); |
| 458 | int msm_rd_debugfs_init(struct drm_minor *minor); |
| 459 | void msm_rd_debugfs_cleanup(struct drm_minor *minor); |
| 460 | void msm_rd_dump_submit(struct msm_gem_submit *submit); |
Rob Clark | 70c70f0 | 2014-05-30 14:49:43 -0400 | [diff] [blame] | 461 | int msm_perf_debugfs_init(struct drm_minor *minor); |
| 462 | void msm_perf_debugfs_cleanup(struct drm_minor *minor); |
Rob Clark | a7d3c95 | 2014-05-30 14:47:38 -0400 | [diff] [blame] | 463 | #else |
| 464 | static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } |
| 465 | static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {} |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 466 | #endif |
| 467 | |
| 468 | void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, |
| 469 | const char *dbgname); |
| 470 | void msm_writel(u32 data, void __iomem *addr); |
| 471 | u32 msm_readl(const void __iomem *addr); |
| 472 | |
| 473 | #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) |
| 474 | #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) |
| 475 | |
| 476 | static inline int align_pitch(int width, int bpp) |
| 477 | { |
| 478 | int bytespp = (bpp + 7) / 8; |
| 479 | /* adreno needs pitch aligned to 32 pixels: */ |
| 480 | return bytespp * ALIGN(width, 32); |
| 481 | } |
| 482 | |
| 483 | /* for the generated headers: */ |
| 484 | #define INVALID_IDX(idx) ({BUG(); 0;}) |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 485 | #define fui(x) ({BUG(); 0;}) |
| 486 | #define util_float_to_half(x) ({BUG(); 0;}) |
| 487 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 488 | |
| 489 | #define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT) |
| 490 | |
| 491 | /* for conditionally setting boolean flag(s): */ |
| 492 | #define COND(bool, val) ((bool) ? (val) : 0) |
| 493 | |
Rob Clark | 340ff41 | 2016-03-16 14:57:22 -0400 | [diff] [blame] | 494 | static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) |
| 495 | { |
| 496 | ktime_t now = ktime_get(); |
| 497 | unsigned long remaining_jiffies; |
| 498 | |
| 499 | if (ktime_compare(*timeout, now) < 0) { |
| 500 | remaining_jiffies = 0; |
| 501 | } else { |
| 502 | ktime_t rem = ktime_sub(*timeout, now); |
| 503 | struct timespec ts = ktime_to_timespec(rem); |
| 504 | remaining_jiffies = timespec_to_jiffies(&ts); |
| 505 | } |
| 506 | |
| 507 | return remaining_jiffies; |
| 508 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 509 | |
| 510 | #endif /* __MSM_DRV_H__ */ |