blob: ef2fa41f760fde47ebc714d53e2b66d84de49d2f [file] [log] [blame]
Dhaval Patel14d46ce2017-01-17 16:28:12 -08001/*
2 * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07005 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -08006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07009 *
Dhaval Patel14d46ce2017-01-17 16:28:12 -080010 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070017 */
18
Clarence Ipd9f9fa62016-09-09 13:42:32 -040019#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040020#include <linux/sort.h>
Clarence Ip8f7366c2016-07-05 12:15:26 -040021#include <linux/debugfs.h>
Clarence Ipcae1bb62016-07-07 12:07:13 -040022#include <linux/ktime.h>
Clarence Ip4c1d9772016-06-26 09:35:38 -040023#include <uapi/drm/sde_drm.h>
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070024#include <drm/drm_mode.h>
25#include <drm/drm_crtc.h>
26#include <drm/drm_crtc_helper.h>
27#include <drm/drm_flip_work.h>
28
29#include "sde_kms.h"
30#include "sde_hw_lm.h"
Clarence Ipc475b082016-06-26 09:27:23 -040031#include "sde_hw_ctl.h"
Abhijit Kulkarni40e38162016-06-26 22:12:09 -040032#include "sde_crtc.h"
Alan Kwong83285fb2016-10-21 20:51:17 -040033#include "sde_plane.h"
Gopikrishnaiah Anandane0e5e0c2016-05-25 11:05:33 -070034#include "sde_color_processing.h"
Alan Kwong83285fb2016-10-21 20:51:17 -040035#include "sde_encoder.h"
36#include "sde_connector.h"
Alan Kwong67a3f792016-11-01 23:16:53 -040037#include "sde_power_handle.h"
Alan Kwong9aa061c2016-11-06 21:17:12 -050038#include "sde_core_perf.h"
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040039
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -070040struct sde_crtc_irq_info {
41 struct sde_irq_callback irq;
42 u32 event;
43 int (*func)(struct drm_crtc *crtc, bool en,
44 struct sde_irq_callback *irq);
45 struct list_head list;
46};
47
Gopikrishnaiah Anandanb6b401f2017-03-14 16:39:49 -070048struct sde_crtc_custom_events {
49 u32 event;
50 int (*func)(struct drm_crtc *crtc, bool en,
51 struct sde_irq_callback *irq);
52};
53
54static struct sde_crtc_custom_events custom_events[] = {
55 {DRM_EVENT_AD_BACKLIGHT, sde_cp_ad_interrupt}
56};
57
Clarence Ipcae1bb62016-07-07 12:07:13 -040058/* default input fence timeout, in ms */
59#define SDE_CRTC_INPUT_FENCE_TIMEOUT 2000
60
Dhaval Patel4e574842016-08-23 15:11:37 -070061/*
62 * The default input fence timeout is 2 seconds while max allowed
63 * range is 10 seconds. Any value above 10 seconds adds glitches beyond
64 * tolerance limit.
65 */
66#define SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT 10000
67
Dhaval Patel48c76022016-09-01 17:51:23 -070068/* layer mixer index on sde_crtc */
69#define LEFT_MIXER 0
70#define RIGHT_MIXER 1
71
Dhaval Patelf9245d62017-03-28 16:24:00 -070072#define MISR_BUFF_SIZE 256
73
Lloyd Atkinson4f1c8692016-09-14 14:04:25 -040074static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040075{
Clarence Ip7f70ce42017-03-20 06:53:46 -070076 struct msm_drm_private *priv;
77
78 if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
79 SDE_ERROR("invalid crtc\n");
80 return NULL;
81 }
82 priv = crtc->dev->dev_private;
83 if (!priv || !priv->kms) {
84 SDE_ERROR("invalid kms\n");
85 return NULL;
86 }
Abhijit Kulkarni40e38162016-06-26 22:12:09 -040087
Ben Chan78647cd2016-06-26 22:02:47 -040088 return to_sde_kms(priv->kms);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040089}
90
Dhaval Patelf9245d62017-03-28 16:24:00 -070091static inline int _sde_crtc_power_enable(struct sde_crtc *sde_crtc, bool enable)
92{
93 struct drm_crtc *crtc;
94 struct msm_drm_private *priv;
95 struct sde_kms *sde_kms;
96
97 if (!sde_crtc) {
98 SDE_ERROR("invalid sde crtc\n");
99 return -EINVAL;
100 }
101
102 crtc = &sde_crtc->base;
103 if (!crtc->dev || !crtc->dev->dev_private) {
104 SDE_ERROR("invalid drm device\n");
105 return -EINVAL;
106 }
107
108 priv = crtc->dev->dev_private;
109 if (!priv->kms) {
110 SDE_ERROR("invalid kms\n");
111 return -EINVAL;
112 }
113
114 sde_kms = to_sde_kms(priv->kms);
115
116 return sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
117 enable);
118}
119
Alan Kwongcdb2f282017-03-18 13:42:06 -0700120/**
121 * _sde_crtc_rp_to_crtc - get crtc from resource pool object
122 * @rp: Pointer to resource pool
123 * return: Pointer to drm crtc if success; null otherwise
124 */
125static struct drm_crtc *_sde_crtc_rp_to_crtc(struct sde_crtc_respool *rp)
126{
127 if (!rp)
128 return NULL;
129
130 return container_of(rp, struct sde_crtc_state, rp)->base.crtc;
131}
132
133/**
134 * _sde_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
135 * @rp: Pointer to resource pool
136 * @force: True to reclaim all resources; otherwise, reclaim only unused ones
137 * return: None
138 */
139static void _sde_crtc_rp_reclaim(struct sde_crtc_respool *rp, bool force)
140{
141 struct sde_crtc_res *res, *next;
142 struct drm_crtc *crtc;
143
144 crtc = _sde_crtc_rp_to_crtc(rp);
145 if (!crtc) {
146 SDE_ERROR("invalid crtc\n");
147 return;
148 }
149
150 SDE_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
151 force ? "destroy" : "free_unused");
152
153 list_for_each_entry_safe(res, next, &rp->res_list, list) {
154 if (!force && !(res->flags & SDE_CRTC_RES_FLAG_FREE))
155 continue;
156 SDE_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
157 crtc->base.id, rp->sequence_id,
158 res->type, res->tag, res->val,
159 atomic_read(&res->refcount));
160 list_del(&res->list);
161 if (res->ops.put)
162 res->ops.put(res->val);
163 kfree(res);
164 }
165}
166
167/**
168 * _sde_crtc_rp_free_unused - free unused resource in pool
169 * @rp: Pointer to resource pool
170 * return: none
171 */
172static void _sde_crtc_rp_free_unused(struct sde_crtc_respool *rp)
173{
174 _sde_crtc_rp_reclaim(rp, false);
175}
176
177/**
178 * _sde_crtc_rp_destroy - destroy resource pool
179 * @rp: Pointer to resource pool
180 * return: None
181 */
182static void _sde_crtc_rp_destroy(struct sde_crtc_respool *rp)
183{
184 _sde_crtc_rp_reclaim(rp, true);
185}
186
187/**
188 * _sde_crtc_hw_blk_get - get callback for hardware block
189 * @val: Resource handle
190 * @type: Resource type
191 * @tag: Search tag for given resource
192 * return: Resource handle
193 */
194static void *_sde_crtc_hw_blk_get(void *val, u32 type, u64 tag)
195{
196 SDE_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
197 return sde_hw_blk_get(val, type, tag);
198}
199
200/**
201 * _sde_crtc_hw_blk_put - put callback for hardware block
202 * @val: Resource handle
203 * return: None
204 */
205static void _sde_crtc_hw_blk_put(void *val)
206{
207 SDE_DEBUG("res://%pK\n", val);
208 sde_hw_blk_put(val);
209}
210
211/**
212 * _sde_crtc_rp_duplicate - duplicate resource pool and reset reference count
213 * @rp: Pointer to original resource pool
214 * @dup_rp: Pointer to duplicated resource pool
215 * return: None
216 */
217static void _sde_crtc_rp_duplicate(struct sde_crtc_respool *rp,
218 struct sde_crtc_respool *dup_rp)
219{
220 struct sde_crtc_res *res, *dup_res;
221 struct drm_crtc *crtc;
222
223 if (!rp || !dup_rp) {
224 SDE_ERROR("invalid resource pool\n");
225 return;
226 }
227
228 crtc = _sde_crtc_rp_to_crtc(rp);
229 if (!crtc) {
230 SDE_ERROR("invalid crtc\n");
231 return;
232 }
233
234 SDE_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
235
236 dup_rp->sequence_id = rp->sequence_id + 1;
237 INIT_LIST_HEAD(&dup_rp->res_list);
238 dup_rp->ops = rp->ops;
239 list_for_each_entry(res, &rp->res_list, list) {
240 dup_res = kzalloc(sizeof(struct sde_crtc_res), GFP_KERNEL);
241 if (!dup_res)
242 return;
243 INIT_LIST_HEAD(&dup_res->list);
244 atomic_set(&dup_res->refcount, 0);
245 dup_res->type = res->type;
246 dup_res->tag = res->tag;
247 dup_res->val = res->val;
248 dup_res->ops = res->ops;
249 dup_res->flags = SDE_CRTC_RES_FLAG_FREE;
250 SDE_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
251 crtc->base.id, dup_rp->sequence_id,
252 dup_res->type, dup_res->tag, dup_res->val,
253 atomic_read(&dup_res->refcount));
254 list_add_tail(&dup_res->list, &dup_rp->res_list);
255 if (dup_res->ops.get)
256 dup_res->ops.get(dup_res->val, 0, -1);
257 }
258}
259
260/**
261 * _sde_crtc_rp_reset - reset resource pool after allocation
262 * @rp: Pointer to original resource pool
263 * return: None
264 */
265static void _sde_crtc_rp_reset(struct sde_crtc_respool *rp)
266{
267 if (!rp) {
268 SDE_ERROR("invalid resource pool\n");
269 return;
270 }
271
272 rp->sequence_id = 0;
273 INIT_LIST_HEAD(&rp->res_list);
274 rp->ops.get = _sde_crtc_hw_blk_get;
275 rp->ops.put = _sde_crtc_hw_blk_put;
276}
277
278/**
279 * _sde_crtc_rp_add - add given resource to resource pool
280 * @rp: Pointer to original resource pool
281 * @type: Resource type
282 * @tag: Search tag for given resource
283 * @val: Resource handle
284 * @ops: Resource callback operations
285 * return: 0 if success; error code otherwise
286 */
287static int _sde_crtc_rp_add(struct sde_crtc_respool *rp, u32 type, u64 tag,
288 void *val, struct sde_crtc_res_ops *ops)
289{
290 struct sde_crtc_res *res;
291 struct drm_crtc *crtc;
292
293 if (!rp || !ops) {
294 SDE_ERROR("invalid resource pool/ops\n");
295 return -EINVAL;
296 }
297
298 crtc = _sde_crtc_rp_to_crtc(rp);
299 if (!crtc) {
300 SDE_ERROR("invalid crtc\n");
301 return -EINVAL;
302 }
303
304 list_for_each_entry(res, &rp->res_list, list) {
305 if (res->type != type || res->tag != tag)
306 continue;
307 SDE_ERROR("crtc%d.%u already exist res:0x%x/0x%llx/%pK/%d\n",
308 crtc->base.id, rp->sequence_id,
309 res->type, res->tag, res->val,
310 atomic_read(&res->refcount));
311 return -EEXIST;
312 }
313 res = kzalloc(sizeof(struct sde_crtc_res), GFP_KERNEL);
314 if (!res)
315 return -ENOMEM;
316 INIT_LIST_HEAD(&res->list);
317 atomic_set(&res->refcount, 1);
318 res->type = type;
319 res->tag = tag;
320 res->val = val;
321 res->ops = *ops;
322 list_add_tail(&res->list, &rp->res_list);
323 SDE_DEBUG("crtc%d.%u added res:0x%x/0x%llx\n",
324 crtc->base.id, rp->sequence_id, type, tag);
325 return 0;
326}
327
328/**
329 * _sde_crtc_rp_get - lookup the resource from given resource pool and obtain
330 * if available; otherwise, obtain resource from global pool
331 * @rp: Pointer to original resource pool
332 * @type: Resource type
333 * @tag: Search tag for given resource
334 * return: Resource handle if success; pointer error or null otherwise
335 */
336static void *_sde_crtc_rp_get(struct sde_crtc_respool *rp, u32 type, u64 tag)
337{
338 struct sde_crtc_res *res;
339 void *val = NULL;
340 int rc;
341 struct drm_crtc *crtc;
342
343 if (!rp) {
344 SDE_ERROR("invalid resource pool\n");
345 return NULL;
346 }
347
348 crtc = _sde_crtc_rp_to_crtc(rp);
349 if (!crtc) {
350 SDE_ERROR("invalid crtc\n");
351 return NULL;
352 }
353
354 list_for_each_entry(res, &rp->res_list, list) {
355 if (res->type != type || res->tag != tag)
356 continue;
357 SDE_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
358 crtc->base.id, rp->sequence_id,
359 res->type, res->tag, res->val,
360 atomic_read(&res->refcount));
361 atomic_inc(&res->refcount);
362 res->flags &= ~SDE_CRTC_RES_FLAG_FREE;
363 return res->val;
364 }
365 list_for_each_entry(res, &rp->res_list, list) {
366 if (res->type != type || !(res->flags & SDE_CRTC_RES_FLAG_FREE))
367 continue;
368 SDE_DEBUG("crtc%d.%u retag res:0x%x/0x%llx/%pK/%d\n",
369 crtc->base.id, rp->sequence_id,
370 res->type, res->tag, res->val,
371 atomic_read(&res->refcount));
372 atomic_inc(&res->refcount);
373 res->tag = tag;
374 res->flags &= ~SDE_CRTC_RES_FLAG_FREE;
375 return res->val;
376 }
377 if (rp->ops.get)
378 val = rp->ops.get(NULL, type, -1);
379 if (IS_ERR_OR_NULL(val)) {
380 SDE_ERROR("crtc%d.%u failed to get res:0x%x//\n",
381 crtc->base.id, rp->sequence_id, type);
382 return NULL;
383 }
384 rc = _sde_crtc_rp_add(rp, type, tag, val, &rp->ops);
385 if (rc) {
386 SDE_ERROR("crtc%d.%u failed to add res:0x%x/0x%llx\n",
387 crtc->base.id, rp->sequence_id, type, tag);
388 if (rp->ops.put)
389 rp->ops.put(val);
390 val = NULL;
391 }
392 return val;
393}
394
395/**
396 * _sde_crtc_rp_put - return given resource to resource pool
397 * @rp: Pointer to original resource pool
398 * @type: Resource type
399 * @tag: Search tag for given resource
400 * return: None
401 */
402static void _sde_crtc_rp_put(struct sde_crtc_respool *rp, u32 type, u64 tag)
403{
404 struct sde_crtc_res *res, *next;
405 struct drm_crtc *crtc;
406
407 if (!rp) {
408 SDE_ERROR("invalid resource pool\n");
409 return;
410 }
411
412 crtc = _sde_crtc_rp_to_crtc(rp);
413 if (!crtc) {
414 SDE_ERROR("invalid crtc\n");
415 return;
416 }
417
418 list_for_each_entry_safe(res, next, &rp->res_list, list) {
419 if (res->type != type || res->tag != tag)
420 continue;
421 SDE_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
422 crtc->base.id, rp->sequence_id,
423 res->type, res->tag, res->val,
424 atomic_read(&res->refcount));
425 if (res->flags & SDE_CRTC_RES_FLAG_FREE)
426 SDE_ERROR(
427 "crtc%d.%u already free res:0x%x/0x%llx/%pK/%d\n",
428 crtc->base.id, rp->sequence_id,
429 res->type, res->tag, res->val,
430 atomic_read(&res->refcount));
431 else if (atomic_dec_return(&res->refcount) == 0)
432 res->flags |= SDE_CRTC_RES_FLAG_FREE;
433
434 return;
435 }
436 SDE_ERROR("crtc%d.%u not found res:0x%x/0x%llx\n",
437 crtc->base.id, rp->sequence_id, type, tag);
438}
439
440int sde_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
441 void *val, struct sde_crtc_res_ops *ops)
442{
443 struct sde_crtc_respool *rp;
444
445 if (!state) {
446 SDE_ERROR("invalid parameters\n");
447 return -EINVAL;
448 }
449
450 rp = &to_sde_crtc_state(state)->rp;
451 return _sde_crtc_rp_add(rp, type, tag, val, ops);
452}
453
454void *sde_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag)
455{
456 struct sde_crtc_respool *rp;
457 void *val;
458
459 if (!state) {
460 SDE_ERROR("invalid parameters\n");
461 return NULL;
462 }
463
464 rp = &to_sde_crtc_state(state)->rp;
465 val = _sde_crtc_rp_get(rp, type, tag);
466 if (IS_ERR(val)) {
467 SDE_ERROR("failed to get res type:0x%x:0x%llx\n",
468 type, tag);
469 return NULL;
470 }
471
472 return val;
473}
474
475void sde_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag)
476{
477 struct sde_crtc_respool *rp;
478
479 if (!state) {
480 SDE_ERROR("invalid parameters\n");
481 return;
482 }
483
484 rp = &to_sde_crtc_state(state)->rp;
485 _sde_crtc_rp_put(rp, type, tag);
486}
487
Clarence Ipa18d4832017-03-13 12:35:44 -0700488static void _sde_crtc_deinit_events(struct sde_crtc *sde_crtc)
489{
490 if (!sde_crtc)
491 return;
492
493 if (sde_crtc->event_thread) {
494 kthread_flush_worker(&sde_crtc->event_worker);
495 kthread_stop(sde_crtc->event_thread);
496 sde_crtc->event_thread = NULL;
497 }
498}
499
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700500static void sde_crtc_destroy(struct drm_crtc *crtc)
501{
502 struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
503
Lloyd Atkinson4f1c8692016-09-14 14:04:25 -0400504 SDE_DEBUG("\n");
Clarence Ip7a753bb2016-07-07 11:47:44 -0400505
506 if (!crtc)
507 return;
508
Dhaval Patele4a5dda2016-10-13 19:29:30 -0700509 if (sde_crtc->blob_info)
510 drm_property_unreference_blob(sde_crtc->blob_info);
Clarence Ip7a753bb2016-07-07 11:47:44 -0400511 msm_property_destroy(&sde_crtc->property_info);
Gopikrishnaiah Anandane0e5e0c2016-05-25 11:05:33 -0700512 sde_cp_crtc_destroy_properties(crtc);
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -0700513
Clarence Ip24f80662016-06-13 19:05:32 -0400514 sde_fence_deinit(&sde_crtc->output_fence);
Clarence Ipa18d4832017-03-13 12:35:44 -0700515 _sde_crtc_deinit_events(sde_crtc);
Clarence Ip7a753bb2016-07-07 11:47:44 -0400516
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700517 drm_crtc_cleanup(crtc);
Clarence Ip7f70ce42017-03-20 06:53:46 -0700518 mutex_destroy(&sde_crtc->crtc_lock);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700519 kfree(sde_crtc);
520}
521
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700522static bool sde_crtc_mode_fixup(struct drm_crtc *crtc,
523 const struct drm_display_mode *mode,
524 struct drm_display_mode *adjusted_mode)
525{
Lloyd Atkinson4f1c8692016-09-14 14:04:25 -0400526 SDE_DEBUG("\n");
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400527
Lloyd Atkinson4f1c8692016-09-14 14:04:25 -0400528 if (msm_is_mode_seamless(adjusted_mode) &&
529 (!crtc->enabled || crtc->state->active_changed)) {
530 SDE_ERROR("crtc state prevents seamless transition\n");
531 return false;
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400532 }
533
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700534 return true;
535}
536
Dhaval Patel48c76022016-09-01 17:51:23 -0700537static void _sde_crtc_setup_blend_cfg(struct sde_crtc_mixer *mixer,
538 struct sde_plane_state *pstate, struct sde_format *format)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400539{
Dhaval Patel48c76022016-09-01 17:51:23 -0700540 uint32_t blend_op, fg_alpha, bg_alpha;
541 uint32_t blend_type;
Dhaval Patel44f12472016-08-29 12:19:47 -0700542 struct sde_hw_mixer *lm = mixer->hw_lm;
543
Dhaval Patel48c76022016-09-01 17:51:23 -0700544 /* default to opaque blending */
545 fg_alpha = sde_plane_get_property(pstate, PLANE_PROP_ALPHA);
546 bg_alpha = 0xFF - fg_alpha;
547 blend_op = SDE_BLEND_FG_ALPHA_FG_CONST | SDE_BLEND_BG_ALPHA_BG_CONST;
548 blend_type = sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP);
Dhaval Patel44f12472016-08-29 12:19:47 -0700549
Dhaval Patel48c76022016-09-01 17:51:23 -0700550 SDE_DEBUG("blend type:0x%x blend alpha:0x%x\n", blend_type, fg_alpha);
551
552 switch (blend_type) {
553
554 case SDE_DRM_BLEND_OP_OPAQUE:
555 blend_op = SDE_BLEND_FG_ALPHA_FG_CONST |
556 SDE_BLEND_BG_ALPHA_BG_CONST;
557 break;
558
559 case SDE_DRM_BLEND_OP_PREMULTIPLIED:
560 if (format->alpha_enable) {
561 blend_op = SDE_BLEND_FG_ALPHA_FG_CONST |
562 SDE_BLEND_BG_ALPHA_FG_PIXEL;
563 if (fg_alpha != 0xff) {
564 bg_alpha = fg_alpha;
565 blend_op |= SDE_BLEND_BG_MOD_ALPHA |
566 SDE_BLEND_BG_INV_MOD_ALPHA;
567 } else {
568 blend_op |= SDE_BLEND_BG_INV_ALPHA;
569 }
570 }
571 break;
572
573 case SDE_DRM_BLEND_OP_COVERAGE:
574 if (format->alpha_enable) {
575 blend_op = SDE_BLEND_FG_ALPHA_FG_PIXEL |
576 SDE_BLEND_BG_ALPHA_FG_PIXEL;
577 if (fg_alpha != 0xff) {
578 bg_alpha = fg_alpha;
579 blend_op |= SDE_BLEND_FG_MOD_ALPHA |
580 SDE_BLEND_FG_INV_MOD_ALPHA |
581 SDE_BLEND_BG_MOD_ALPHA |
582 SDE_BLEND_BG_INV_MOD_ALPHA;
583 } else {
584 blend_op |= SDE_BLEND_BG_INV_ALPHA;
585 }
586 }
587 break;
588 default:
589 /* do nothing */
590 break;
Clarence Ipd9f9fa62016-09-09 13:42:32 -0400591 }
Dhaval Patel48c76022016-09-01 17:51:23 -0700592
593 lm->ops.setup_blend_config(lm, pstate->stage, fg_alpha,
594 bg_alpha, blend_op);
Dhaval Patel6c666622017-03-21 23:02:59 -0700595 SDE_DEBUG(
596 "format: %4.4s, alpha_enable %u fg alpha:0x%x bg alpha:0x%x blend_op:0x%x\n",
597 (char *) &format->base.pixel_format,
Dhaval Patel48c76022016-09-01 17:51:23 -0700598 format->alpha_enable, fg_alpha, bg_alpha, blend_op);
599}
600
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -0800601static void _sde_crtc_setup_dim_layer_cfg(struct drm_crtc *crtc,
602 struct sde_crtc *sde_crtc, struct sde_crtc_mixer *mixer,
603 struct sde_hw_dim_layer *dim_layer)
604{
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -0500605 struct sde_crtc_state *cstate;
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -0800606 struct sde_hw_mixer *lm;
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -0800607 struct sde_hw_dim_layer split_dim_layer;
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -0800608 int i;
609
610 if (!dim_layer->rect.w || !dim_layer->rect.h) {
611 SDE_DEBUG("empty dim layer\n");
612 return;
613 }
614
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -0500615 cstate = to_sde_crtc_state(crtc->state);
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -0800616
617 split_dim_layer.stage = dim_layer->stage;
618 split_dim_layer.color_fill = dim_layer->color_fill;
619
620 /*
621 * traverse through the layer mixers attached to crtc and find the
622 * intersecting dim layer rect in each LM and program accordingly.
623 */
624 for (i = 0; i < sde_crtc->num_mixers; i++) {
625 split_dim_layer.flags = dim_layer->flags;
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -0800626
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -0500627 sde_kms_rect_intersect(&cstate->lm_bounds[i], &dim_layer->rect,
Lloyd Atkinsone0e11e22017-01-17 12:08:48 -0500628 &split_dim_layer.rect);
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -0500629 if (sde_kms_rect_is_null(&split_dim_layer.rect)) {
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -0800630 /*
631 * no extra programming required for non-intersecting
632 * layer mixers with INCLUSIVE dim layer
633 */
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -0500634 if (split_dim_layer.flags & SDE_DRM_DIM_LAYER_INCLUSIVE)
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -0800635 continue;
636
637 /*
638 * program the other non-intersecting layer mixers with
639 * INCLUSIVE dim layer of full size for uniformity
640 * with EXCLUSIVE dim layer config.
641 */
642 split_dim_layer.flags &= ~SDE_DRM_DIM_LAYER_EXCLUSIVE;
643 split_dim_layer.flags |= SDE_DRM_DIM_LAYER_INCLUSIVE;
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -0500644 memcpy(&split_dim_layer.rect, &cstate->lm_bounds[i],
645 sizeof(split_dim_layer.rect));
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -0800646
647 } else {
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -0500648 split_dim_layer.rect.x =
649 split_dim_layer.rect.x -
650 cstate->lm_bounds[i].w;
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -0800651 }
652
653 lm = mixer[i].hw_lm;
654 mixer[i].mixer_op_mode |= 1 << split_dim_layer.stage;
655 lm->ops.setup_dim_layer(lm, &split_dim_layer);
656 }
657}
658
Lloyd Atkinson8ba47032017-03-22 17:13:32 -0400659void sde_crtc_get_crtc_roi(struct drm_crtc_state *state,
660 const struct sde_rect **crtc_roi)
661{
662 struct sde_crtc_state *crtc_state;
663
664 if (!state || !crtc_roi)
665 return;
666
667 crtc_state = to_sde_crtc_state(state);
668 *crtc_roi = &crtc_state->crtc_roi;
669}
670
671static int _sde_crtc_set_roi_v1(struct drm_crtc_state *state,
672 void *usr_ptr)
673{
674 struct drm_crtc *crtc;
675 struct sde_crtc_state *cstate;
676 struct sde_drm_roi_v1 roi_v1;
677 int i;
678
679 if (!state) {
680 SDE_ERROR("invalid args\n");
681 return -EINVAL;
682 }
683
684 cstate = to_sde_crtc_state(state);
685 crtc = cstate->base.crtc;
686
687 memset(&cstate->user_roi_list, 0, sizeof(cstate->user_roi_list));
688
689 if (!usr_ptr) {
690 SDE_DEBUG("crtc%d: rois cleared\n", DRMID(crtc));
691 return 0;
692 }
693
694 if (copy_from_user(&roi_v1, usr_ptr, sizeof(roi_v1))) {
695 SDE_ERROR("crtc%d: failed to copy roi_v1 data\n", DRMID(crtc));
696 return -EINVAL;
697 }
698
699 SDE_DEBUG("crtc%d: num_rects %d\n", DRMID(crtc), roi_v1.num_rects);
700
701 if (roi_v1.num_rects == 0) {
702 SDE_DEBUG("crtc%d: rois cleared\n", DRMID(crtc));
703 return 0;
704 }
705
706 if (roi_v1.num_rects > SDE_MAX_ROI_V1) {
707 SDE_ERROR("crtc%d: too many rects specified: %d\n", DRMID(crtc),
708 roi_v1.num_rects);
709 return -EINVAL;
710 }
711
712 cstate->user_roi_list.num_rects = roi_v1.num_rects;
713 for (i = 0; i < roi_v1.num_rects; ++i) {
714 cstate->user_roi_list.roi[i] = roi_v1.roi[i];
715 SDE_DEBUG("crtc%d: roi%d: roi (%d,%d) (%d,%d)\n",
716 DRMID(crtc), i,
717 cstate->user_roi_list.roi[i].x1,
718 cstate->user_roi_list.roi[i].y1,
719 cstate->user_roi_list.roi[i].x2,
720 cstate->user_roi_list.roi[i].y2);
721 }
722
723 return 0;
724}
725
726static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc,
727 struct drm_crtc_state *state)
728{
729 struct drm_connector *conn;
730 struct drm_connector_state *conn_state;
731 struct sde_crtc *sde_crtc;
732 struct sde_crtc_state *crtc_state;
733 struct sde_rect *crtc_roi;
734 struct drm_clip_rect crtc_clip, *user_rect;
735 int i, num_attached_conns = 0;
736
737 if (!crtc || !state)
738 return -EINVAL;
739
740 sde_crtc = to_sde_crtc(crtc);
741 crtc_state = to_sde_crtc_state(state);
742 crtc_roi = &crtc_state->crtc_roi;
743
744 /* init to invalid range maxes */
745 crtc_clip.x1 = ~0;
746 crtc_clip.y1 = ~0;
747 crtc_clip.x2 = 0;
748 crtc_clip.y2 = 0;
749
750 for_each_connector_in_state(state->state, conn, conn_state, i) {
751 struct sde_connector_state *sde_conn_state;
752
753 if (!conn_state || conn_state->crtc != crtc)
754 continue;
755
756 if (num_attached_conns) {
757 SDE_ERROR(
758 "crtc%d: unsupported: roi on crtc w/ >1 connectors\n",
759 DRMID(crtc));
760 return -EINVAL;
761 }
762 ++num_attached_conns;
763
764 sde_conn_state = to_sde_connector_state(conn_state);
765
766 if (memcmp(&sde_conn_state->rois, &crtc_state->user_roi_list,
767 sizeof(crtc_state->user_roi_list))) {
768 SDE_ERROR("%s: crtc -> conn roi scaling unsupported\n",
769 sde_crtc->name);
770 return -EINVAL;
771 }
772 }
773
774 /* aggregate all clipping rectangles together for overall crtc roi */
775 for (i = 0; i < crtc_state->user_roi_list.num_rects; i++) {
776 user_rect = &crtc_state->user_roi_list.roi[i];
777
778 crtc_clip.x1 = min(crtc_clip.x1, user_rect->x1);
779 crtc_clip.y1 = min(crtc_clip.y1, user_rect->y1);
780 crtc_clip.x2 = max(crtc_clip.x2, user_rect->x2);
781 crtc_clip.y2 = max(crtc_clip.y2, user_rect->y2);
782
783 SDE_DEBUG(
784 "%s: conn%d roi%d (%d,%d),(%d,%d) -> crtc (%d,%d),(%d,%d)\n",
785 sde_crtc->name, DRMID(crtc), i,
786 user_rect->x1, user_rect->y1,
787 user_rect->x2, user_rect->y2,
788 crtc_clip.x1, crtc_clip.y1,
789 crtc_clip.x2, crtc_clip.y2);
790
791 }
792
793 if (crtc_clip.x2 && crtc_clip.y2) {
794 crtc_roi->x = crtc_clip.x1;
795 crtc_roi->y = crtc_clip.y1;
796 crtc_roi->w = crtc_clip.x2 - crtc_clip.x1;
797 crtc_roi->h = crtc_clip.y2 - crtc_clip.y1;
798 } else {
799 crtc_roi->x = 0;
800 crtc_roi->y = 0;
801 crtc_roi->w = 0;
802 crtc_roi->h = 0;
803 }
804
805 SDE_DEBUG("%s: crtc roi (%d,%d,%d,%d)\n", sde_crtc->name,
806 crtc_roi->x, crtc_roi->y, crtc_roi->w, crtc_roi->h);
807
808 return 0;
809}
810
811static int _sde_crtc_set_lm_roi(struct drm_crtc *crtc,
812 struct drm_crtc_state *state, int lm_idx)
813{
814 struct sde_crtc *sde_crtc;
815 struct sde_crtc_state *crtc_state;
816 const struct sde_rect *crtc_roi;
817 const struct sde_rect *lm_bounds;
818 struct sde_rect *lm_roi;
819
820 if (!crtc || !state || lm_idx >= ARRAY_SIZE(crtc_state->lm_bounds))
821 return -EINVAL;
822
823 sde_crtc = to_sde_crtc(crtc);
824 crtc_state = to_sde_crtc_state(state);
825 crtc_roi = &crtc_state->crtc_roi;
826 lm_bounds = &crtc_state->lm_bounds[lm_idx];
827 lm_roi = &crtc_state->lm_roi[lm_idx];
828
829 if (!sde_kms_rect_is_null(crtc_roi)) {
830 sde_kms_rect_intersect(crtc_roi, lm_bounds, lm_roi);
831 if (sde_kms_rect_is_null(lm_roi)) {
832 SDE_ERROR("unsupported R/L only partial update\n");
833 return -EINVAL;
834 }
835 } else {
836 memcpy(lm_roi, lm_bounds, sizeof(*lm_roi));
837 }
838
839 SDE_DEBUG("%s: lm%d roi (%d,%d,%d,%d)\n", sde_crtc->name, lm_idx,
840 lm_roi->x, lm_roi->y, lm_roi->w, lm_roi->h);
841
842 return 0;
843}
844
845static int _sde_crtc_check_rois_centered_and_symmetric(struct drm_crtc *crtc,
846 struct drm_crtc_state *state)
847{
848 struct sde_crtc *sde_crtc;
849 struct sde_crtc_state *crtc_state;
850 const struct sde_rect *roi_prv, *roi_cur;
851 int lm_idx;
852
853 if (!crtc || !state)
854 return -EINVAL;
855
856 /*
857 * On certain HW, ROIs must be centered on the split between LMs,
858 * and be of equal width.
859 */
860
861 sde_crtc = to_sde_crtc(crtc);
862 crtc_state = to_sde_crtc_state(state);
863
864 roi_prv = &crtc_state->lm_roi[0];
865 for (lm_idx = 1; lm_idx < sde_crtc->num_mixers; lm_idx++) {
866 roi_cur = &crtc_state->lm_roi[lm_idx];
867
868 /* check lm rois are equal width & first roi ends at 2nd roi */
869 if (((roi_prv->x + roi_prv->w) != roi_cur->x) ||
870 (roi_prv->w != roi_cur->w)) {
871 SDE_ERROR("%s: roi lm%d x %d w %d lm%d x %d w %d\n",
872 sde_crtc->name,
873 lm_idx-1, roi_prv->x, roi_prv->w,
874 lm_idx, roi_cur->x, roi_cur->w);
875 return -EINVAL;
876 }
877 roi_prv = roi_cur;
878 }
879
880 return 0;
881}
882
883static int _sde_crtc_check_planes_within_crtc_roi(struct drm_crtc *crtc,
884 struct drm_crtc_state *state)
885{
886 struct sde_crtc *sde_crtc;
887 struct sde_crtc_state *crtc_state;
888 const struct sde_rect *crtc_roi;
889 struct drm_plane_state *pstate;
890 struct drm_plane *plane;
891
892 if (!crtc || !state)
893 return -EINVAL;
894
895 /*
896 * Reject commit if a Plane CRTC destination coordinates fall outside
897 * the partial CRTC ROI. LM output is determined via connector ROIs,
898 * if they are specified, not Plane CRTC ROIs.
899 */
900
901 sde_crtc = to_sde_crtc(crtc);
902 crtc_state = to_sde_crtc_state(state);
903 crtc_roi = &crtc_state->crtc_roi;
904
905 if (sde_kms_rect_is_null(crtc_roi))
906 return 0;
907
908 drm_atomic_crtc_state_for_each_plane(plane, state) {
909 struct sde_rect plane_roi, intersection;
910
911 pstate = drm_atomic_get_plane_state(state->state, plane);
912 if (IS_ERR_OR_NULL(pstate)) {
913 int rc = PTR_ERR(pstate);
914
915 SDE_ERROR("%s: failed to get plane%d state, %d\n",
916 sde_crtc->name, plane->base.id, rc);
917 return rc;
918 }
919
920 plane_roi.x = pstate->crtc_x;
921 plane_roi.y = pstate->crtc_y;
922 plane_roi.w = pstate->crtc_w;
923 plane_roi.h = pstate->crtc_h;
924 sde_kms_rect_intersect(crtc_roi, &plane_roi, &intersection);
925 if (!sde_kms_rect_is_equal(&plane_roi, &intersection)) {
926 SDE_ERROR(
927 "%s: plane%d crtc roi (%d,%d,%d,%d) outside crtc roi (%d,%d,%d,%d)\n",
928 sde_crtc->name, plane->base.id,
929 plane_roi.x, plane_roi.y,
930 plane_roi.w, plane_roi.h,
931 crtc_roi->x, crtc_roi->y,
932 crtc_roi->w, crtc_roi->h);
933 return -E2BIG;
934 }
935 }
936
937 return 0;
938}
939
940static int _sde_crtc_check_rois(struct drm_crtc *crtc,
941 struct drm_crtc_state *state)
942{
943 struct sde_crtc *sde_crtc;
944 int lm_idx;
945 int rc;
946
947 if (!crtc || !state)
948 return -EINVAL;
949
950 sde_crtc = to_sde_crtc(crtc);
951
952 rc = _sde_crtc_set_crtc_roi(crtc, state);
953 if (rc)
954 return rc;
955
956 for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
957 rc = _sde_crtc_set_lm_roi(crtc, state, lm_idx);
958 if (rc)
959 return rc;
960 }
961
962 rc = _sde_crtc_check_rois_centered_and_symmetric(crtc, state);
963 if (rc)
964 return rc;
965
966 rc = _sde_crtc_check_planes_within_crtc_roi(crtc, state);
967 if (rc)
968 return rc;
969
970 return 0;
971}
972
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -0500973static void _sde_crtc_program_lm_output_roi(struct drm_crtc *crtc)
974{
975 struct sde_crtc *sde_crtc;
976 struct sde_crtc_state *crtc_state;
Lloyd Atkinson8ba47032017-03-22 17:13:32 -0400977 const struct sde_rect *lm_roi;
978 struct sde_hw_mixer *hw_lm;
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -0500979 int lm_idx, lm_horiz_position;
980
Lloyd Atkinson8ba47032017-03-22 17:13:32 -0400981 if (!crtc)
982 return;
983
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -0500984 sde_crtc = to_sde_crtc(crtc);
985 crtc_state = to_sde_crtc_state(crtc->state);
986
987 lm_horiz_position = 0;
988 for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -0500989 struct sde_hw_mixer_cfg cfg;
990
Lloyd Atkinson8ba47032017-03-22 17:13:32 -0400991 lm_roi = &crtc_state->lm_roi[lm_idx];
992 hw_lm = sde_crtc->mixers[lm_idx].hw_lm;
993
994 SDE_EVT32(DRMID(crtc_state->base.crtc), lm_idx,
995 lm_roi->x, lm_roi->y, lm_roi->w, lm_roi->h);
996
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -0500997 if (sde_kms_rect_is_null(lm_roi))
998 continue;
999
1000 cfg.out_width = lm_roi->w;
1001 cfg.out_height = lm_roi->h;
1002 cfg.right_mixer = lm_horiz_position++;
1003 cfg.flags = 0;
1004 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
1005 }
1006}
1007
Dhaval Patel48c76022016-09-01 17:51:23 -07001008static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
1009 struct sde_crtc *sde_crtc, struct sde_crtc_mixer *mixer)
1010{
1011 struct drm_plane *plane;
Dhaval Patel6c666622017-03-21 23:02:59 -07001012 struct drm_framebuffer *fb;
1013 struct drm_plane_state *state;
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08001014 struct sde_crtc_state *cstate;
Dhaval Patel48c76022016-09-01 17:51:23 -07001015 struct sde_plane_state *pstate = NULL;
1016 struct sde_format *format;
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08001017 struct sde_hw_ctl *ctl;
1018 struct sde_hw_mixer *lm;
1019 struct sde_hw_stage_cfg *stage_cfg;
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001020 struct sde_rect plane_crtc_roi;
Dhaval Patel48c76022016-09-01 17:51:23 -07001021
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001022 u32 flush_mask = 0;
1023 uint32_t lm_idx = LEFT_MIXER, stage_idx;
Dhaval Patel48c76022016-09-01 17:51:23 -07001024 bool bg_alpha_enable[CRTC_DUAL_MIXERS] = {false};
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001025 int zpos_cnt[CRTC_DUAL_MIXERS][SDE_STAGE_MAX + 1] = { {0} };
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08001026 int i;
Alan Kwong4dd64c82017-02-04 18:41:51 -08001027 bool sbuf_mode = false;
1028 u32 prefill = 0;
Dhaval Patel48c76022016-09-01 17:51:23 -07001029
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08001030 if (!sde_crtc || !mixer) {
1031 SDE_ERROR("invalid sde_crtc or mixer\n");
1032 return;
1033 }
1034
1035 ctl = mixer->hw_ctl;
1036 lm = mixer->hw_lm;
1037 stage_cfg = &sde_crtc->stage_cfg;
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001038 cstate = to_sde_crtc_state(crtc->state);
Dhaval Patel44f12472016-08-29 12:19:47 -07001039
1040 drm_atomic_crtc_for_each_plane(plane, crtc) {
Dhaval Patel6c666622017-03-21 23:02:59 -07001041 state = plane->state;
1042 if (!state)
1043 continue;
Dhaval Patel48c76022016-09-01 17:51:23 -07001044
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001045 plane_crtc_roi.x = state->crtc_x;
1046 plane_crtc_roi.y = state->crtc_y;
1047 plane_crtc_roi.w = state->crtc_w;
1048 plane_crtc_roi.h = state->crtc_h;
1049
Dhaval Patel6c666622017-03-21 23:02:59 -07001050 pstate = to_sde_plane_state(state);
1051 fb = state->fb;
Dhaval Patel44f12472016-08-29 12:19:47 -07001052
Alan Kwong4dd64c82017-02-04 18:41:51 -08001053 if (sde_plane_is_sbuf_mode(plane, &prefill))
1054 sbuf_mode = true;
1055
1056 sde_plane_get_ctl_flush(plane, ctl, &flush_mask);
Dhaval Patel44f12472016-08-29 12:19:47 -07001057
Dhaval Patel48c76022016-09-01 17:51:23 -07001058
1059 SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
Clarence Ipd9f9fa62016-09-09 13:42:32 -04001060 crtc->base.id,
Clarence Ipd9f9fa62016-09-09 13:42:32 -04001061 pstate->stage,
1062 plane->base.id,
1063 sde_plane_pipe(plane) - SSPP_VIG0,
Dhaval Patel6c666622017-03-21 23:02:59 -07001064 state->fb ? state->fb->base.id : -1);
Dhaval Patel44f12472016-08-29 12:19:47 -07001065
Dhaval Patel48c76022016-09-01 17:51:23 -07001066 format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
Dhaval Patel44f12472016-08-29 12:19:47 -07001067
Lloyd Atkinson8ba47032017-03-22 17:13:32 -04001068 SDE_EVT32(DRMID(crtc), DRMID(plane),
1069 state->fb ? state->fb->base.id : -1,
1070 state->src_x >> 16, state->src_y >> 16,
1071 state->src_w >> 16, state->src_h >> 16,
1072 state->crtc_x, state->crtc_y,
1073 state->crtc_w, state->crtc_h);
Dhaval Patel6c666622017-03-21 23:02:59 -07001074
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001075 for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
1076 struct sde_rect intersect;
Dhaval Patel48c76022016-09-01 17:51:23 -07001077
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001078 /* skip if the roi doesn't fall within LM's bounds */
1079 sde_kms_rect_intersect(&plane_crtc_roi,
1080 &cstate->lm_bounds[lm_idx],
1081 &intersect);
1082 if (sde_kms_rect_is_null(&intersect))
1083 continue;
Dhaval Patel48c76022016-09-01 17:51:23 -07001084
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001085 stage_idx = zpos_cnt[lm_idx][pstate->stage]++;
1086 stage_cfg->stage[lm_idx][pstate->stage][stage_idx] =
1087 sde_plane_pipe(plane);
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08001088 stage_cfg->multirect_index
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001089 [lm_idx][pstate->stage][stage_idx] =
1090 pstate->multirect_index;
1091
1092 mixer[lm_idx].flush_mask |= flush_mask;
1093
1094
1095 SDE_EVT32(DRMID(plane), DRMID(crtc), lm_idx, stage_idx,
1096 pstate->stage, pstate->multirect_index,
1097 pstate->multirect_mode,
1098 format->base.pixel_format,
1099 fb ? fb->modifier[0] : 0);
Dhaval Patel48c76022016-09-01 17:51:23 -07001100
1101 /* blend config update */
1102 if (pstate->stage != SDE_STAGE_BASE) {
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001103 _sde_crtc_setup_blend_cfg(mixer + lm_idx,
1104 pstate, format);
Dhaval Patel48c76022016-09-01 17:51:23 -07001105
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001106 if (bg_alpha_enable[lm_idx] &&
Dhaval Patel48c76022016-09-01 17:51:23 -07001107 !format->alpha_enable)
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001108 mixer[lm_idx].mixer_op_mode = 0;
Dhaval Patel48c76022016-09-01 17:51:23 -07001109 else
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001110 mixer[lm_idx].mixer_op_mode |=
Dhaval Patel48c76022016-09-01 17:51:23 -07001111 1 << pstate->stage;
1112 } else if (format->alpha_enable) {
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001113 bg_alpha_enable[lm_idx] = true;
Dhaval Patel48c76022016-09-01 17:51:23 -07001114 }
1115 }
Dhaval Patel44f12472016-08-29 12:19:47 -07001116 }
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08001117
1118 if (lm && lm->ops.setup_dim_layer) {
1119 cstate = to_sde_crtc_state(crtc->state);
1120 for (i = 0; i < cstate->num_dim_layers; i++)
1121 _sde_crtc_setup_dim_layer_cfg(crtc, sde_crtc,
1122 mixer, &cstate->dim_layer[i]);
1123 }
Alan Kwong4dd64c82017-02-04 18:41:51 -08001124
1125 if (ctl->ops.setup_sbuf_cfg) {
1126 cstate = to_sde_crtc_state(crtc->state);
1127 if (!sbuf_mode) {
1128 cstate->sbuf_cfg.rot_op_mode =
1129 SDE_CTL_ROT_OP_MODE_OFFLINE;
Alan Kwong4aacd532017-02-04 18:51:33 -08001130 cstate->sbuf_prefill_line = 0;
Alan Kwong4dd64c82017-02-04 18:41:51 -08001131 } else {
1132 cstate->sbuf_cfg.rot_op_mode =
1133 SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
Alan Kwong4aacd532017-02-04 18:51:33 -08001134 cstate->sbuf_prefill_line = prefill;
Alan Kwong4dd64c82017-02-04 18:41:51 -08001135 }
1136
1137 ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
1138 }
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001139
1140 _sde_crtc_program_lm_output_roi(crtc);
Dhaval Patel44f12472016-08-29 12:19:47 -07001141}
1142
Clarence Ipd9f9fa62016-09-09 13:42:32 -04001143/**
1144 * _sde_crtc_blend_setup - configure crtc mixers
1145 * @crtc: Pointer to drm crtc structure
1146 */
1147static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001148{
1149 struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001150 struct sde_crtc_mixer *mixer = sde_crtc->mixers;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001151 struct sde_hw_ctl *ctl;
1152 struct sde_hw_mixer *lm;
Dhaval Patel44f12472016-08-29 12:19:47 -07001153
1154 int i;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001155
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001156 SDE_DEBUG("%s\n", sde_crtc->name);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001157
Dhaval Patel48c76022016-09-01 17:51:23 -07001158 if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
1159 SDE_ERROR("invalid number mixers: %d\n", sde_crtc->num_mixers);
1160 return;
1161 }
1162
1163 for (i = 0; i < sde_crtc->num_mixers; i++) {
1164 if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
1165 SDE_ERROR("invalid lm or ctl assigned to mixer\n");
1166 return;
1167 }
1168 mixer[i].mixer_op_mode = 0;
1169 mixer[i].flush_mask = 0;
Lloyd Atkinsone5ec30d2016-08-23 14:32:32 -04001170 if (mixer[i].hw_ctl->ops.clear_all_blendstages)
1171 mixer[i].hw_ctl->ops.clear_all_blendstages(
1172 mixer[i].hw_ctl);
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08001173
1174 /* clear dim_layer settings */
1175 lm = mixer[i].hw_lm;
1176 if (lm->ops.clear_dim_layer)
1177 lm->ops.clear_dim_layer(lm);
Dhaval Patel48c76022016-09-01 17:51:23 -07001178 }
1179
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001180 /* initialize stage cfg */
Clarence Ip8f7366c2016-07-05 12:15:26 -04001181 memset(&sde_crtc->stage_cfg, 0, sizeof(struct sde_hw_stage_cfg));
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001182
Dhaval Patel48c76022016-09-01 17:51:23 -07001183 _sde_crtc_blend_setup_mixer(crtc, sde_crtc, mixer);
1184
Abhijit Kulkarni71002ba2016-06-24 18:36:28 -04001185 for (i = 0; i < sde_crtc->num_mixers; i++) {
Abhijit Kulkarni71002ba2016-06-24 18:36:28 -04001186 ctl = mixer[i].hw_ctl;
1187 lm = mixer[i].hw_lm;
Abhijit Kulkarni71002ba2016-06-24 18:36:28 -04001188
Dhaval Patel48c76022016-09-01 17:51:23 -07001189 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001190
Dhaval Patel48c76022016-09-01 17:51:23 -07001191 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
Abhijit Kulkarni71002ba2016-06-24 18:36:28 -04001192 mixer[i].hw_lm->idx);
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001193
1194 /* stage config flush mask */
Dhaval Patel48c76022016-09-01 17:51:23 -07001195 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
1196
Clarence Ip8e69ad02016-12-09 09:43:57 -05001197 SDE_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
1198 mixer[i].hw_lm->idx - LM_0,
1199 mixer[i].mixer_op_mode,
1200 ctl->idx - CTL_0,
1201 mixer[i].flush_mask);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001202
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001203 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
Dhaval Patel44f12472016-08-29 12:19:47 -07001204 &sde_crtc->stage_cfg, i);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001205 }
Lloyd Atkinson8ba47032017-03-22 17:13:32 -04001206
1207 _sde_crtc_program_lm_output_roi(crtc);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001208}
1209
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001210void sde_crtc_prepare_commit(struct drm_crtc *crtc,
1211 struct drm_crtc_state *old_state)
Clarence Ip24f80662016-06-13 19:05:32 -04001212{
1213 struct sde_crtc *sde_crtc;
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001214 struct sde_crtc_state *cstate;
1215 struct drm_connector *conn;
Clarence Ip24f80662016-06-13 19:05:32 -04001216
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001217 if (!crtc || !crtc->state) {
Clarence Ip24f80662016-06-13 19:05:32 -04001218 SDE_ERROR("invalid crtc\n");
1219 return;
1220 }
1221
1222 sde_crtc = to_sde_crtc(crtc);
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001223 cstate = to_sde_crtc_state(crtc->state);
Dhaval Patel6c666622017-03-21 23:02:59 -07001224 SDE_EVT32_VERBOSE(DRMID(crtc));
Clarence Ip24f80662016-06-13 19:05:32 -04001225
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001226 /* identify connectors attached to this crtc */
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001227 cstate->num_connectors = 0;
1228
1229 drm_for_each_connector(conn, crtc->dev)
1230 if (conn->state && conn->state->crtc == crtc &&
1231 cstate->num_connectors < MAX_CONNECTORS) {
1232 cstate->connectors[cstate->num_connectors++] = conn;
1233 sde_connector_prepare_fence(conn);
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001234 }
1235
1236 /* prepare main output fence */
Clarence Ip24f80662016-06-13 19:05:32 -04001237 sde_fence_prepare(&sde_crtc->output_fence);
1238}
1239
Abhinav Kumarf2e94b52017-02-09 20:27:24 -08001240/**
1241 * _sde_crtc_complete_flip - signal pending page_flip events
1242 * Any pending vblank events are added to the vblank_event_list
1243 * so that the next vblank interrupt shall signal them.
1244 * However PAGE_FLIP events are not handled through the vblank_event_list.
1245 * This API signals any pending PAGE_FLIP events requested through
1246 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the sde_crtc->event.
1247 * if file!=NULL, this is preclose potential cancel-flip path
1248 * @crtc: Pointer to drm crtc structure
1249 * @file: Pointer to drm file
1250 */
Lloyd Atkinson4f1c8692016-09-14 14:04:25 -04001251static void _sde_crtc_complete_flip(struct drm_crtc *crtc,
1252 struct drm_file *file)
Abhijit Kulkarni40e38162016-06-26 22:12:09 -04001253{
1254 struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
1255 struct drm_device *dev = crtc->dev;
1256 struct drm_pending_vblank_event *event;
1257 unsigned long flags;
1258
1259 spin_lock_irqsave(&dev->event_lock, flags);
1260 event = sde_crtc->event;
1261 if (event) {
1262 /* if regular vblank case (!file) or if cancel-flip from
1263 * preclose on file that requested flip, then send the
1264 * event:
1265 */
1266 if (!file || (event->base.file_priv == file)) {
1267 sde_crtc->event = NULL;
Lloyd Atkinson4f1c8692016-09-14 14:04:25 -04001268 DRM_DEBUG_VBL("%s: send event: %pK\n",
Dhaval Patelec10fad2016-08-22 14:40:48 -07001269 sde_crtc->name, event);
Lloyd Atkinson5d40d312016-09-06 08:34:13 -04001270 SDE_EVT32(DRMID(crtc));
Lloyd Atkinsonac933642016-09-14 11:52:00 -04001271 drm_crtc_send_vblank_event(crtc, event);
Abhijit Kulkarni40e38162016-06-26 22:12:09 -04001272 }
1273 }
1274 spin_unlock_irqrestore(&dev->event_lock, flags);
1275}
1276
Alan Kwong3e985f02017-02-12 15:08:44 -08001277enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc)
1278{
1279 struct drm_encoder *encoder;
1280
1281 if (!crtc || !crtc->dev) {
1282 SDE_ERROR("invalid crtc\n");
1283 return INTF_MODE_NONE;
1284 }
1285
1286 drm_for_each_encoder(encoder, crtc->dev)
1287 if (encoder->crtc == crtc)
1288 return sde_encoder_get_intf_mode(encoder);
1289
1290 return INTF_MODE_NONE;
1291}
1292
Abhijit Kulkarni40e38162016-06-26 22:12:09 -04001293static void sde_crtc_vblank_cb(void *data)
1294{
1295 struct drm_crtc *crtc = (struct drm_crtc *)data;
Alan Kwong07da0982016-11-04 12:57:45 -04001296 struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
1297
1298 /* keep statistics on vblank callback - with auto reset via debugfs */
1299 if (ktime_equal(sde_crtc->vblank_cb_time, ktime_set(0, 0)))
1300 sde_crtc->vblank_cb_time = ktime_get();
1301 else
1302 sde_crtc->vblank_cb_count++;
Abhinav Kumarf2e94b52017-02-09 20:27:24 -08001303 _sde_crtc_complete_flip(crtc, NULL);
Lloyd Atkinsonac933642016-09-14 11:52:00 -04001304 drm_crtc_handle_vblank(crtc);
Lloyd Atkinson9eabe7a2016-09-14 13:39:15 -04001305 DRM_DEBUG_VBL("crtc%d\n", crtc->base.id);
Dhaval Patel6c666622017-03-21 23:02:59 -07001306 SDE_EVT32_VERBOSE(DRMID(crtc));
Abhijit Kulkarni40e38162016-06-26 22:12:09 -04001307}
1308
Alan Kwong628d19e2016-10-31 13:50:13 -04001309static void sde_crtc_frame_event_work(struct kthread_work *work)
1310{
Alan Kwong67a3f792016-11-01 23:16:53 -04001311 struct msm_drm_private *priv;
Alan Kwong628d19e2016-10-31 13:50:13 -04001312 struct sde_crtc_frame_event *fevent;
1313 struct drm_crtc *crtc;
1314 struct sde_crtc *sde_crtc;
1315 struct sde_kms *sde_kms;
1316 unsigned long flags;
1317
1318 if (!work) {
1319 SDE_ERROR("invalid work handle\n");
1320 return;
1321 }
1322
1323 fevent = container_of(work, struct sde_crtc_frame_event, work);
1324 if (!fevent->crtc) {
1325 SDE_ERROR("invalid crtc\n");
1326 return;
1327 }
1328
1329 crtc = fevent->crtc;
1330 sde_crtc = to_sde_crtc(crtc);
1331
1332 sde_kms = _sde_crtc_get_kms(crtc);
1333 if (!sde_kms) {
1334 SDE_ERROR("invalid kms handle\n");
1335 return;
1336 }
Alan Kwong67a3f792016-11-01 23:16:53 -04001337 priv = sde_kms->dev->dev_private;
Alan Kwong628d19e2016-10-31 13:50:13 -04001338
1339 SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
1340 ktime_to_ns(fevent->ts));
1341
1342 if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
Lloyd Atkinson8c49c582016-11-18 14:23:54 -05001343 (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR) ||
1344 (fevent->event & SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
Alan Kwong628d19e2016-10-31 13:50:13 -04001345
1346 if (atomic_read(&sde_crtc->frame_pending) < 1) {
1347 /* this should not happen */
1348 SDE_ERROR("crtc%d ts:%lld invalid frame_pending:%d\n",
1349 crtc->base.id,
1350 ktime_to_ns(fevent->ts),
1351 atomic_read(&sde_crtc->frame_pending));
Dhaval Patel6c666622017-03-21 23:02:59 -07001352 SDE_EVT32(DRMID(crtc), fevent->event,
1353 SDE_EVTLOG_FUNC_CASE1);
Clarence Ip9c65f7b2017-03-20 06:48:15 -07001354
1355 /* don't propagate unexpected frame done events */
1356 return;
Alan Kwong628d19e2016-10-31 13:50:13 -04001357 } else if (atomic_dec_return(&sde_crtc->frame_pending) == 0) {
1358 /* release bandwidth and other resources */
1359 SDE_DEBUG("crtc%d ts:%lld last pending\n",
1360 crtc->base.id,
1361 ktime_to_ns(fevent->ts));
Dhaval Patel6c666622017-03-21 23:02:59 -07001362 SDE_EVT32(DRMID(crtc), fevent->event,
1363 SDE_EVTLOG_FUNC_CASE2);
Alan Kwong9aa061c2016-11-06 21:17:12 -05001364 sde_core_perf_crtc_release_bw(crtc);
Alan Kwong628d19e2016-10-31 13:50:13 -04001365 } else {
Dhaval Patel6c666622017-03-21 23:02:59 -07001366 SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
1367 SDE_EVTLOG_FUNC_CASE3);
Alan Kwong628d19e2016-10-31 13:50:13 -04001368 }
1369 } else {
1370 SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
1371 ktime_to_ns(fevent->ts),
1372 fevent->event);
Dhaval Patel6c666622017-03-21 23:02:59 -07001373 SDE_EVT32(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_CASE4);
Alan Kwong628d19e2016-10-31 13:50:13 -04001374 }
1375
Lloyd Atkinson8c49c582016-11-18 14:23:54 -05001376 if (fevent->event & SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)
1377 SDE_ERROR("crtc%d ts:%lld received panel dead event\n",
1378 crtc->base.id, ktime_to_ns(fevent->ts));
1379
Alan Kwong628d19e2016-10-31 13:50:13 -04001380 spin_lock_irqsave(&sde_crtc->spin_lock, flags);
1381 list_add_tail(&fevent->list, &sde_crtc->frame_event_list);
1382 spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
1383}
1384
1385static void sde_crtc_frame_event_cb(void *data, u32 event)
1386{
1387 struct drm_crtc *crtc = (struct drm_crtc *)data;
1388 struct sde_crtc *sde_crtc;
1389 struct msm_drm_private *priv;
Alan Kwong628d19e2016-10-31 13:50:13 -04001390 struct sde_crtc_frame_event *fevent;
1391 unsigned long flags;
1392 int pipe_id;
1393
1394 if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
1395 SDE_ERROR("invalid parameters\n");
1396 return;
1397 }
1398 sde_crtc = to_sde_crtc(crtc);
1399 priv = crtc->dev->dev_private;
1400 pipe_id = drm_crtc_index(crtc);
1401
1402 SDE_DEBUG("crtc%d\n", crtc->base.id);
Dhaval Patel6c666622017-03-21 23:02:59 -07001403 SDE_EVT32_VERBOSE(DRMID(crtc));
Alan Kwong628d19e2016-10-31 13:50:13 -04001404
1405 spin_lock_irqsave(&sde_crtc->spin_lock, flags);
Lloyd Atkinson78831f82016-12-09 11:24:56 -05001406 fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
1407 struct sde_crtc_frame_event, list);
1408 if (fevent)
1409 list_del_init(&fevent->list);
Alan Kwong628d19e2016-10-31 13:50:13 -04001410 spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
1411
Lloyd Atkinson78831f82016-12-09 11:24:56 -05001412 if (!fevent) {
Alan Kwong628d19e2016-10-31 13:50:13 -04001413 SDE_ERROR("crtc%d event %d overflow\n",
1414 crtc->base.id, event);
1415 SDE_EVT32(DRMID(crtc), event);
1416 return;
1417 }
1418
Alan Kwong628d19e2016-10-31 13:50:13 -04001419 fevent->event = event;
1420 fevent->crtc = crtc;
1421 fevent->ts = ktime_get();
1422 kthread_queue_work(&priv->disp_thread[pipe_id].worker, &fevent->work);
1423}
1424
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001425void sde_crtc_complete_commit(struct drm_crtc *crtc,
1426 struct drm_crtc_state *old_state)
Clarence Ip24f80662016-06-13 19:05:32 -04001427{
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001428 struct sde_crtc *sde_crtc;
1429 struct sde_crtc_state *cstate;
1430 int i;
1431
1432 if (!crtc || !crtc->state) {
Clarence Ip24f80662016-06-13 19:05:32 -04001433 SDE_ERROR("invalid crtc\n");
1434 return;
1435 }
1436
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001437 sde_crtc = to_sde_crtc(crtc);
1438 cstate = to_sde_crtc_state(crtc->state);
Dhaval Patel6c666622017-03-21 23:02:59 -07001439 SDE_EVT32_VERBOSE(DRMID(crtc));
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001440
1441 /* signal output fence(s) at end of commit */
1442 sde_fence_signal(&sde_crtc->output_fence, 0);
1443
1444 for (i = 0; i < cstate->num_connectors; ++i)
1445 sde_connector_complete_commit(cstate->connectors[i]);
Clarence Ip24f80662016-06-13 19:05:32 -04001446}
1447
Lloyd Atkinson5d722782016-05-30 14:09:41 -04001448/**
Clarence Ipcae1bb62016-07-07 12:07:13 -04001449 * _sde_crtc_set_input_fence_timeout - update ns version of in fence timeout
1450 * @cstate: Pointer to sde crtc state
1451 */
1452static void _sde_crtc_set_input_fence_timeout(struct sde_crtc_state *cstate)
1453{
1454 if (!cstate) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07001455 SDE_ERROR("invalid cstate\n");
Clarence Ipcae1bb62016-07-07 12:07:13 -04001456 return;
1457 }
1458 cstate->input_fence_timeout_ns =
1459 sde_crtc_get_property(cstate, CRTC_PROP_INPUT_FENCE_TIMEOUT);
1460 cstate->input_fence_timeout_ns *= NSEC_PER_MSEC;
1461}
1462
1463/**
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08001464 * _sde_crtc_set_dim_layer_v1 - copy dim layer settings from userspace
1465 * @cstate: Pointer to sde crtc state
1466 * @user_ptr: User ptr for sde_drm_dim_layer_v1 struct
1467 */
1468static void _sde_crtc_set_dim_layer_v1(struct sde_crtc_state *cstate,
1469 void *usr_ptr)
1470{
1471 struct sde_drm_dim_layer_v1 dim_layer_v1;
1472 struct sde_drm_dim_layer_cfg *user_cfg;
1473 u32 count, i;
1474
1475 if (!cstate) {
1476 SDE_ERROR("invalid cstate\n");
1477 return;
1478 }
1479
1480 if (!usr_ptr) {
1481 SDE_DEBUG("dim layer data removed\n");
1482 return;
1483 }
1484
1485 if (copy_from_user(&dim_layer_v1, usr_ptr, sizeof(dim_layer_v1))) {
1486 SDE_ERROR("failed to copy dim layer data\n");
1487 return;
1488 }
1489
1490 count = dim_layer_v1.num_layers;
1491 if (!count || (count > SDE_MAX_DIM_LAYERS)) {
1492 SDE_ERROR("invalid number of Dim Layers:%d", count);
1493 return;
1494 }
1495
1496 /* populate from user space */
1497 cstate->num_dim_layers = count;
1498 for (i = 0; i < count; i++) {
1499 user_cfg = &dim_layer_v1.layer_cfg[i];
1500 cstate->dim_layer[i].flags = user_cfg->flags;
1501 cstate->dim_layer[i].stage = user_cfg->stage + SDE_STAGE_0;
1502
1503 cstate->dim_layer[i].rect.x = user_cfg->rect.x1;
1504 cstate->dim_layer[i].rect.y = user_cfg->rect.y1;
1505 cstate->dim_layer[i].rect.w = user_cfg->rect.x2 -
1506 user_cfg->rect.x1 + 1;
1507 cstate->dim_layer[i].rect.h = user_cfg->rect.y2 -
1508 user_cfg->rect.y1 + 1;
1509
1510 cstate->dim_layer[i].color_fill = (struct sde_mdss_color) {
1511 user_cfg->color_fill.color_0,
1512 user_cfg->color_fill.color_1,
1513 user_cfg->color_fill.color_2,
1514 user_cfg->color_fill.color_3,
1515 };
1516 }
1517}
1518
1519/**
Clarence Ipcae1bb62016-07-07 12:07:13 -04001520 * _sde_crtc_wait_for_fences - wait for incoming framebuffer sync fences
1521 * @crtc: Pointer to CRTC object
1522 */
1523static void _sde_crtc_wait_for_fences(struct drm_crtc *crtc)
1524{
1525 struct drm_plane *plane = NULL;
1526 uint32_t wait_ms = 1;
Clarence Ip8dedc232016-09-09 16:41:00 -04001527 ktime_t kt_end, kt_wait;
Dhaval Patel39323d42017-03-01 23:48:24 -08001528 int rc = 0;
Clarence Ipcae1bb62016-07-07 12:07:13 -04001529
Lloyd Atkinson4f1c8692016-09-14 14:04:25 -04001530 SDE_DEBUG("\n");
Clarence Ipcae1bb62016-07-07 12:07:13 -04001531
1532 if (!crtc || !crtc->state) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07001533 SDE_ERROR("invalid crtc/state %pK\n", crtc);
Clarence Ipcae1bb62016-07-07 12:07:13 -04001534 return;
1535 }
1536
1537 /* use monotonic timer to limit total fence wait time */
Clarence Ip8dedc232016-09-09 16:41:00 -04001538 kt_end = ktime_add_ns(ktime_get(),
1539 to_sde_crtc_state(crtc->state)->input_fence_timeout_ns);
Clarence Ipcae1bb62016-07-07 12:07:13 -04001540
1541 /*
1542 * Wait for fences sequentially, as all of them need to be signalled
1543 * before we can proceed.
1544 *
1545 * Limit total wait time to INPUT_FENCE_TIMEOUT, but still call
1546 * sde_plane_wait_input_fence with wait_ms == 0 after the timeout so
1547 * that each plane can check its fence status and react appropriately
Dhaval Patel39323d42017-03-01 23:48:24 -08001548 * if its fence has timed out. Call input fence wait multiple times if
1549 * fence wait is interrupted due to interrupt call.
Clarence Ipcae1bb62016-07-07 12:07:13 -04001550 */
1551 drm_atomic_crtc_for_each_plane(plane, crtc) {
Dhaval Patel39323d42017-03-01 23:48:24 -08001552 do {
Clarence Ip8dedc232016-09-09 16:41:00 -04001553 kt_wait = ktime_sub(kt_end, ktime_get());
1554 if (ktime_compare(kt_wait, ktime_set(0, 0)) >= 0)
1555 wait_ms = ktime_to_ms(kt_wait);
Clarence Ipcae1bb62016-07-07 12:07:13 -04001556 else
1557 wait_ms = 0;
Dhaval Patel39323d42017-03-01 23:48:24 -08001558
1559 rc = sde_plane_wait_input_fence(plane, wait_ms);
1560 } while (wait_ms && rc == -ERESTARTSYS);
Clarence Ipcae1bb62016-07-07 12:07:13 -04001561 }
1562}
1563
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001564static void _sde_crtc_setup_mixer_for_encoder(
1565 struct drm_crtc *crtc,
1566 struct drm_encoder *enc)
1567{
1568 struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
Lloyd Atkinson4f1c8692016-09-14 14:04:25 -04001569 struct sde_kms *sde_kms = _sde_crtc_get_kms(crtc);
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001570 struct sde_rm *rm = &sde_kms->rm;
1571 struct sde_crtc_mixer *mixer;
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001572 struct sde_hw_ctl *last_valid_ctl = NULL;
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001573 int i;
Gopikrishnaiah Anandane0e5e0c2016-05-25 11:05:33 -07001574 struct sde_rm_hw_iter lm_iter, ctl_iter, dspp_iter;
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001575
1576 sde_rm_init_hw_iter(&lm_iter, enc->base.id, SDE_HW_BLK_LM);
1577 sde_rm_init_hw_iter(&ctl_iter, enc->base.id, SDE_HW_BLK_CTL);
Gopikrishnaiah Anandane0e5e0c2016-05-25 11:05:33 -07001578 sde_rm_init_hw_iter(&dspp_iter, enc->base.id, SDE_HW_BLK_DSPP);
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001579
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001580 /* Set up all the mixers and ctls reserved by this encoder */
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001581 for (i = sde_crtc->num_mixers; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
1582 mixer = &sde_crtc->mixers[i];
1583
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001584 if (!sde_rm_get_hw(rm, &lm_iter))
1585 break;
1586 mixer->hw_lm = (struct sde_hw_mixer *)lm_iter.hw;
1587
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001588 /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
1589 if (!sde_rm_get_hw(rm, &ctl_iter)) {
1590 SDE_DEBUG("no ctl assigned to lm %d, using previous\n",
Clarence Ip8e69ad02016-12-09 09:43:57 -05001591 mixer->hw_lm->idx - LM_0);
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001592 mixer->hw_ctl = last_valid_ctl;
1593 } else {
1594 mixer->hw_ctl = (struct sde_hw_ctl *)ctl_iter.hw;
1595 last_valid_ctl = mixer->hw_ctl;
1596 }
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001597
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001598 /* Shouldn't happen, mixers are always >= ctls */
1599 if (!mixer->hw_ctl) {
1600 SDE_ERROR("no valid ctls found for lm %d\n",
Clarence Ip8e69ad02016-12-09 09:43:57 -05001601 mixer->hw_lm->idx - LM_0);
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001602 return;
1603 }
1604
Gopikrishnaiah Anandane0e5e0c2016-05-25 11:05:33 -07001605 /* Dspp may be null */
1606 (void) sde_rm_get_hw(rm, &dspp_iter);
1607 mixer->hw_dspp = (struct sde_hw_dspp *)dspp_iter.hw;
1608
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001609 mixer->encoder = enc;
1610
1611 sde_crtc->num_mixers++;
Clarence Ipd9f9fa62016-09-09 13:42:32 -04001612 SDE_DEBUG("setup mixer %d: lm %d\n",
1613 i, mixer->hw_lm->idx - LM_0);
1614 SDE_DEBUG("setup mixer %d: ctl %d\n",
1615 i, mixer->hw_ctl->idx - CTL_0);
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001616 }
1617}
1618
1619static void _sde_crtc_setup_mixers(struct drm_crtc *crtc)
1620{
1621 struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
1622 struct drm_encoder *enc;
1623
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001624 sde_crtc->num_mixers = 0;
1625 memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
1626
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07001627 mutex_lock(&sde_crtc->crtc_lock);
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001628 /* Check for mixers on all encoders attached to this crtc */
1629 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1630 if (enc->crtc != crtc)
1631 continue;
1632
1633 _sde_crtc_setup_mixer_for_encoder(crtc, enc);
1634 }
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001635
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07001636 mutex_unlock(&sde_crtc->crtc_lock);
Lloyd Atkinson11f34442016-08-11 11:19:52 -04001637}
1638
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001639static void _sde_crtc_setup_lm_bounds(struct drm_crtc *crtc,
1640 struct drm_crtc_state *state)
1641{
1642 struct sde_crtc *sde_crtc;
1643 struct sde_crtc_state *cstate;
1644 struct drm_display_mode *adj_mode;
1645 u32 crtc_split_width;
1646 int i;
1647
1648 if (!crtc || !state) {
1649 SDE_ERROR("invalid args\n");
1650 return;
1651 }
1652
1653 sde_crtc = to_sde_crtc(crtc);
1654 cstate = to_sde_crtc_state(state);
1655
1656 adj_mode = &state->adjusted_mode;
1657 crtc_split_width = sde_crtc_mixer_width(sde_crtc, adj_mode);
1658
1659 for (i = 0; i < sde_crtc->num_mixers; i++) {
Lloyd Atkinson8ba47032017-03-22 17:13:32 -04001660 cstate->lm_bounds[i].x = crtc_split_width * i;
1661 cstate->lm_bounds[i].y = 0;
1662 cstate->lm_bounds[i].w = crtc_split_width;
1663 cstate->lm_bounds[i].h = adj_mode->vdisplay;
1664 memcpy(&cstate->lm_roi[i], &cstate->lm_bounds[i],
1665 sizeof(cstate->lm_roi[i]));
1666 SDE_EVT32(DRMID(crtc), i,
1667 cstate->lm_bounds[i].x, cstate->lm_bounds[i].y,
1668 cstate->lm_bounds[i].w, cstate->lm_bounds[i].h);
1669 SDE_DEBUG("%s: lm%d bnd&roi (%d,%d,%d,%d)\n", sde_crtc->name, i,
1670 cstate->lm_roi[i].x, cstate->lm_roi[i].y,
1671 cstate->lm_roi[i].w, cstate->lm_roi[i].h);
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001672 }
1673
1674 drm_mode_debug_printmodeline(adj_mode);
1675}
1676
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001677static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
Clarence Ip0d0e96d2016-10-24 18:13:13 -04001678 struct drm_crtc_state *old_state)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001679{
Clarence Ipcae1bb62016-07-07 12:07:13 -04001680 struct sde_crtc *sde_crtc;
1681 struct drm_device *dev;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001682 unsigned long flags;
Lloyd Atkinson5d722782016-05-30 14:09:41 -04001683 u32 i;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001684
Clarence Ipcae1bb62016-07-07 12:07:13 -04001685 if (!crtc) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07001686 SDE_ERROR("invalid crtc\n");
Clarence Ipcae1bb62016-07-07 12:07:13 -04001687 return;
1688 }
1689
Alan Kwong163d2612016-11-03 00:56:56 -04001690 if (!crtc->state->enable) {
1691 SDE_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
1692 crtc->base.id, crtc->state->enable);
1693 return;
1694 }
1695
1696 SDE_DEBUG("crtc%d\n", crtc->base.id);
1697
Clarence Ipcae1bb62016-07-07 12:07:13 -04001698 sde_crtc = to_sde_crtc(crtc);
1699 dev = crtc->dev;
1700
Lloyd Atkinson8ba47032017-03-22 17:13:32 -04001701 if (!sde_crtc->num_mixers) {
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001702 _sde_crtc_setup_mixers(crtc);
Lloyd Atkinson8ba47032017-03-22 17:13:32 -04001703 _sde_crtc_setup_lm_bounds(crtc, crtc->state);
1704 }
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05001705
Lloyd Atkinson265d2212016-05-30 13:12:01 -04001706 if (sde_crtc->event) {
1707 WARN_ON(sde_crtc->event);
1708 } else {
1709 spin_lock_irqsave(&dev->event_lock, flags);
1710 sde_crtc->event = crtc->state->event;
1711 spin_unlock_irqrestore(&dev->event_lock, flags);
1712 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001713
Lloyd Atkinson5d722782016-05-30 14:09:41 -04001714 /* Reset flush mask from previous commit */
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001715 for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001716 struct sde_hw_ctl *ctl = sde_crtc->mixers[i].hw_ctl;
Lloyd Atkinson5d722782016-05-30 14:09:41 -04001717
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001718 if (ctl)
1719 ctl->ops.clear_pending_flush(ctl);
Lloyd Atkinson5d722782016-05-30 14:09:41 -04001720 }
1721
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001722 /*
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001723 * If no mixers have been allocated in sde_crtc_atomic_check(),
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001724 * it means we are trying to flush a CRTC whose state is disabled:
1725 * nothing else needs to be done.
1726 */
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001727 if (unlikely(!sde_crtc->num_mixers))
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001728 return;
1729
Clarence Ipd9f9fa62016-09-09 13:42:32 -04001730 _sde_crtc_blend_setup(crtc);
Gopikrishnaiah Anandane0e5e0c2016-05-25 11:05:33 -07001731 sde_cp_crtc_apply_properties(crtc);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001732
1733 /*
1734 * PP_DONE irq is only used by command mode for now.
1735 * It is better to request pending before FLUSH and START trigger
1736 * to make sure no pp_done irq missed.
1737 * This is safe because no pp_done will happen before SW trigger
1738 * in command mode.
1739 */
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001740}
1741
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001742static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
1743 struct drm_crtc_state *old_crtc_state)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001744{
Dhaval Patel82c8dbc2017-02-18 23:15:10 -08001745 struct drm_encoder *encoder;
Clarence Ipcae1bb62016-07-07 12:07:13 -04001746 struct sde_crtc *sde_crtc;
1747 struct drm_device *dev;
Lloyd Atkinson265d2212016-05-30 13:12:01 -04001748 struct drm_plane *plane;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001749 unsigned long flags;
Dhaval Patel82c8dbc2017-02-18 23:15:10 -08001750 struct sde_crtc_state *cstate;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001751
Clarence Ipcae1bb62016-07-07 12:07:13 -04001752 if (!crtc) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07001753 SDE_ERROR("invalid crtc\n");
Clarence Ipcae1bb62016-07-07 12:07:13 -04001754 return;
1755 }
1756
Alan Kwong163d2612016-11-03 00:56:56 -04001757 if (!crtc->state->enable) {
1758 SDE_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
1759 crtc->base.id, crtc->state->enable);
1760 return;
1761 }
1762
1763 SDE_DEBUG("crtc%d\n", crtc->base.id);
Clarence Ipcae1bb62016-07-07 12:07:13 -04001764
1765 sde_crtc = to_sde_crtc(crtc);
Dhaval Patel82c8dbc2017-02-18 23:15:10 -08001766 cstate = to_sde_crtc_state(crtc->state);
Clarence Ipcae1bb62016-07-07 12:07:13 -04001767 dev = crtc->dev;
1768
Lloyd Atkinson265d2212016-05-30 13:12:01 -04001769 if (sde_crtc->event) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07001770 SDE_DEBUG("already received sde_crtc->event\n");
Lloyd Atkinson265d2212016-05-30 13:12:01 -04001771 } else {
Lloyd Atkinson265d2212016-05-30 13:12:01 -04001772 spin_lock_irqsave(&dev->event_lock, flags);
1773 sde_crtc->event = crtc->state->event;
1774 spin_unlock_irqrestore(&dev->event_lock, flags);
1775 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001776
1777 /*
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001778 * If no mixers has been allocated in sde_crtc_atomic_check(),
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001779 * it means we are trying to flush a CRTC whose state is disabled:
1780 * nothing else needs to be done.
1781 */
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04001782 if (unlikely(!sde_crtc->num_mixers))
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04001783 return;
1784
Clarence Ipcae1bb62016-07-07 12:07:13 -04001785 /* wait for acquire fences before anything else is done */
1786 _sde_crtc_wait_for_fences(crtc);
1787
Dhaval Patel82c8dbc2017-02-18 23:15:10 -08001788 if (!cstate->rsc_update) {
1789 drm_for_each_encoder(encoder, dev) {
1790 if (encoder->crtc != crtc)
1791 continue;
1792
1793 cstate->rsc_client =
1794 sde_encoder_update_rsc_client(encoder, true);
1795 }
1796 cstate->rsc_update = true;
1797 }
1798
Alan Kwong9aa061c2016-11-06 21:17:12 -05001799 /* update performance setting before crtc kickoff */
1800 sde_core_perf_crtc_update(crtc, 1, false);
1801
Clarence Ipcae1bb62016-07-07 12:07:13 -04001802 /*
1803 * Final plane updates: Give each plane a chance to complete all
1804 * required writes/flushing before crtc's "flush
1805 * everything" call below.
1806 */
1807 drm_atomic_crtc_for_each_plane(plane, crtc)
1808 sde_plane_flush(plane);
1809
Lloyd Atkinson5d722782016-05-30 14:09:41 -04001810 /* Kickoff will be scheduled by outer layer */
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001811}
1812
Clarence Ip7a753bb2016-07-07 11:47:44 -04001813/**
1814 * sde_crtc_destroy_state - state destroy hook
1815 * @crtc: drm CRTC
1816 * @state: CRTC state object to release
1817 */
1818static void sde_crtc_destroy_state(struct drm_crtc *crtc,
1819 struct drm_crtc_state *state)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07001820{
Clarence Ip7a753bb2016-07-07 11:47:44 -04001821 struct sde_crtc *sde_crtc;
1822 struct sde_crtc_state *cstate;
1823
1824 if (!crtc || !state) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07001825 SDE_ERROR("invalid argument(s)\n");
Clarence Ip7a753bb2016-07-07 11:47:44 -04001826 return;
1827 }
1828
1829 sde_crtc = to_sde_crtc(crtc);
1830 cstate = to_sde_crtc_state(state);
1831
Alan Kwong163d2612016-11-03 00:56:56 -04001832 SDE_DEBUG("crtc%d\n", crtc->base.id);
Clarence Ip7a753bb2016-07-07 11:47:44 -04001833
Alan Kwongcdb2f282017-03-18 13:42:06 -07001834 _sde_crtc_rp_destroy(&cstate->rp);
1835
Dhaval Patel04c7e8e2016-09-26 20:14:31 -07001836 __drm_atomic_helper_crtc_destroy_state(state);
Clarence Ip7a753bb2016-07-07 11:47:44 -04001837
1838 /* destroy value helper */
1839 msm_property_destroy_state(&sde_crtc->property_info, cstate,
1840 cstate->property_values, cstate->property_blobs);
1841}
1842
Lloyd Atkinson5d722782016-05-30 14:09:41 -04001843void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
1844{
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001845 struct drm_encoder *encoder;
1846 struct drm_device *dev;
Alan Kwong628d19e2016-10-31 13:50:13 -04001847 struct sde_crtc *sde_crtc;
Alan Kwong67a3f792016-11-01 23:16:53 -04001848 struct msm_drm_private *priv;
1849 struct sde_kms *sde_kms;
Alan Kwong4aacd532017-02-04 18:51:33 -08001850 struct sde_crtc_state *cstate;
Lloyd Atkinson5d722782016-05-30 14:09:41 -04001851
1852 if (!crtc) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07001853 SDE_ERROR("invalid argument\n");
Lloyd Atkinson5d722782016-05-30 14:09:41 -04001854 return;
1855 }
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001856 dev = crtc->dev;
Alan Kwong628d19e2016-10-31 13:50:13 -04001857 sde_crtc = to_sde_crtc(crtc);
Alan Kwong67a3f792016-11-01 23:16:53 -04001858 sde_kms = _sde_crtc_get_kms(crtc);
1859 priv = sde_kms->dev->dev_private;
Alan Kwong4aacd532017-02-04 18:51:33 -08001860 cstate = to_sde_crtc_state(crtc->state);
Lloyd Atkinson5d722782016-05-30 14:09:41 -04001861
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001862 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
Alan Kwong4aacd532017-02-04 18:51:33 -08001863 struct sde_encoder_kickoff_params params = { 0 };
1864
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001865 if (encoder->crtc != crtc)
1866 continue;
1867
1868 /*
1869 * Encoder will flush/start now, unless it has a tx pending.
1870 * If so, it may delay and flush at an irq event (e.g. ppdone)
1871 */
Alan Kwong4aacd532017-02-04 18:51:33 -08001872 params.inline_rotate_prefill = cstate->sbuf_prefill_line;
1873 sde_encoder_prepare_for_kickoff(encoder, &params);
Alan Kwong628d19e2016-10-31 13:50:13 -04001874 }
1875
1876 if (atomic_read(&sde_crtc->frame_pending) > 2) {
1877 /* framework allows only 1 outstanding + current */
1878 SDE_ERROR("crtc%d invalid frame pending\n",
1879 crtc->base.id);
1880 SDE_EVT32(DRMID(crtc), 0);
1881 return;
1882 } else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
1883 /* acquire bandwidth and other resources */
1884 SDE_DEBUG("crtc%d first commit\n", crtc->base.id);
1885 SDE_EVT32(DRMID(crtc), 1);
1886 } else {
1887 SDE_DEBUG("crtc%d commit\n", crtc->base.id);
1888 SDE_EVT32(DRMID(crtc), 2);
1889 }
1890
1891 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1892 if (encoder->crtc != crtc)
1893 continue;
1894
1895 sde_encoder_kickoff(encoder);
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04001896 }
Lloyd Atkinson5d722782016-05-30 14:09:41 -04001897}
1898
Clarence Ip7a753bb2016-07-07 11:47:44 -04001899/**
Clarence Ip7f70ce42017-03-20 06:53:46 -07001900 * _sde_crtc_vblank_enable_nolock - update power resource and vblank request
1901 * @sde_crtc: Pointer to sde crtc structure
1902 * @enable: Whether to enable/disable vblanks
1903 */
1904static void _sde_crtc_vblank_enable_nolock(
1905 struct sde_crtc *sde_crtc, bool enable)
1906{
1907 struct drm_device *dev;
1908 struct drm_crtc *crtc;
1909 struct drm_encoder *enc;
Clarence Ip7f70ce42017-03-20 06:53:46 -07001910
1911 if (!sde_crtc) {
1912 SDE_ERROR("invalid crtc\n");
1913 return;
1914 }
1915
1916 crtc = &sde_crtc->base;
1917 dev = crtc->dev;
Clarence Ip7f70ce42017-03-20 06:53:46 -07001918
1919 if (enable) {
Dhaval Patelf9245d62017-03-28 16:24:00 -07001920 if (_sde_crtc_power_enable(sde_crtc, true))
1921 return;
1922
Clarence Ip7f70ce42017-03-20 06:53:46 -07001923 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
1924 if (enc->crtc != crtc)
1925 continue;
1926
1927 SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
1928
1929 sde_encoder_register_vblank_callback(enc,
1930 sde_crtc_vblank_cb, (void *)crtc);
1931 }
1932 } else {
1933 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
1934 if (enc->crtc != crtc)
1935 continue;
1936
1937 SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
1938
1939 sde_encoder_register_vblank_callback(enc, NULL, NULL);
1940 }
Dhaval Patelf9245d62017-03-28 16:24:00 -07001941 _sde_crtc_power_enable(sde_crtc, false);
Clarence Ip7f70ce42017-03-20 06:53:46 -07001942 }
1943}
1944
1945/**
1946 * _sde_crtc_set_suspend - notify crtc of suspend enable/disable
1947 * @crtc: Pointer to drm crtc object
1948 * @enable: true to enable suspend, false to indicate resume
1949 */
1950static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
1951{
1952 struct sde_crtc *sde_crtc;
1953 struct msm_drm_private *priv;
1954 struct sde_kms *sde_kms;
1955
1956 if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
1957 SDE_ERROR("invalid crtc\n");
1958 return;
1959 }
1960 sde_crtc = to_sde_crtc(crtc);
1961 priv = crtc->dev->dev_private;
1962
1963 if (!priv->kms) {
1964 SDE_ERROR("invalid crtc kms\n");
1965 return;
1966 }
1967 sde_kms = to_sde_kms(priv->kms);
1968
1969 SDE_DEBUG("crtc%d suspend = %d\n", crtc->base.id, enable);
1970
1971 mutex_lock(&sde_crtc->crtc_lock);
1972
1973 /*
Clarence Ip2f9beeb2017-03-16 11:04:53 -04001974 * Update CP on suspend/resume transitions
1975 */
1976 if (enable && !sde_crtc->suspend)
1977 sde_cp_crtc_suspend(crtc);
1978 else if (!enable && sde_crtc->suspend)
1979 sde_cp_crtc_resume(crtc);
1980
1981 /*
Clarence Ip7f70ce42017-03-20 06:53:46 -07001982 * If the vblank refcount != 0, release a power reference on suspend
1983 * and take it back during resume (if it is still != 0).
1984 */
1985 if (sde_crtc->suspend == enable)
1986 SDE_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
1987 crtc->base.id, enable);
1988 else if (atomic_read(&sde_crtc->vblank_refcount) != 0)
1989 _sde_crtc_vblank_enable_nolock(sde_crtc, !enable);
1990
1991 sde_crtc->suspend = enable;
1992
1993 mutex_unlock(&sde_crtc->crtc_lock);
1994}
1995
1996/**
Clarence Ip7a753bb2016-07-07 11:47:44 -04001997 * sde_crtc_duplicate_state - state duplicate hook
1998 * @crtc: Pointer to drm crtc structure
1999 * @Returns: Pointer to new drm_crtc_state structure
2000 */
2001static struct drm_crtc_state *sde_crtc_duplicate_state(struct drm_crtc *crtc)
2002{
2003 struct sde_crtc *sde_crtc;
2004 struct sde_crtc_state *cstate, *old_cstate;
2005
2006 if (!crtc || !crtc->state) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07002007 SDE_ERROR("invalid argument(s)\n");
Clarence Ip7a753bb2016-07-07 11:47:44 -04002008 return NULL;
2009 }
2010
2011 sde_crtc = to_sde_crtc(crtc);
2012 old_cstate = to_sde_crtc_state(crtc->state);
2013 cstate = msm_property_alloc_state(&sde_crtc->property_info);
2014 if (!cstate) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07002015 SDE_ERROR("failed to allocate state\n");
Clarence Ip7a753bb2016-07-07 11:47:44 -04002016 return NULL;
2017 }
2018
2019 /* duplicate value helper */
2020 msm_property_duplicate_state(&sde_crtc->property_info,
2021 old_cstate, cstate,
2022 cstate->property_values, cstate->property_blobs);
2023
2024 /* duplicate base helper */
2025 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
2026
Alan Kwongcdb2f282017-03-18 13:42:06 -07002027 _sde_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
2028
Clarence Ip7a753bb2016-07-07 11:47:44 -04002029 return &cstate->base;
2030}
2031
2032/**
2033 * sde_crtc_reset - reset hook for CRTCs
2034 * Resets the atomic state for @crtc by freeing the state pointer (which might
2035 * be NULL, e.g. at driver load time) and allocating a new empty state object.
2036 * @crtc: Pointer to drm crtc structure
2037 */
2038static void sde_crtc_reset(struct drm_crtc *crtc)
2039{
2040 struct sde_crtc *sde_crtc;
2041 struct sde_crtc_state *cstate;
2042
2043 if (!crtc) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07002044 SDE_ERROR("invalid crtc\n");
Clarence Ip7a753bb2016-07-07 11:47:44 -04002045 return;
2046 }
2047
Clarence Ip7f70ce42017-03-20 06:53:46 -07002048 /* revert suspend actions, if necessary */
2049 if (msm_is_suspend_state(crtc->dev))
2050 _sde_crtc_set_suspend(crtc, false);
2051
Clarence Ip7a753bb2016-07-07 11:47:44 -04002052 /* remove previous state, if present */
2053 if (crtc->state) {
2054 sde_crtc_destroy_state(crtc, crtc->state);
2055 crtc->state = 0;
2056 }
2057
2058 sde_crtc = to_sde_crtc(crtc);
2059 cstate = msm_property_alloc_state(&sde_crtc->property_info);
2060 if (!cstate) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07002061 SDE_ERROR("failed to allocate state\n");
Clarence Ip7a753bb2016-07-07 11:47:44 -04002062 return;
2063 }
2064
2065 /* reset value helper */
2066 msm_property_reset_state(&sde_crtc->property_info, cstate,
2067 cstate->property_values, cstate->property_blobs);
2068
Clarence Ipcae1bb62016-07-07 12:07:13 -04002069 _sde_crtc_set_input_fence_timeout(cstate);
2070
Alan Kwongcdb2f282017-03-18 13:42:06 -07002071 _sde_crtc_rp_reset(&cstate->rp);
2072
Clarence Ip7a753bb2016-07-07 11:47:44 -04002073 cstate->base.crtc = crtc;
2074 crtc->state = &cstate->base;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002075}
2076
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002077static void sde_crtc_disable(struct drm_crtc *crtc)
2078{
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04002079 struct sde_crtc *sde_crtc;
Dhaval Patel82c8dbc2017-02-18 23:15:10 -08002080 struct sde_crtc_state *cstate;
Alan Kwong07da0982016-11-04 12:57:45 -04002081 struct drm_encoder *encoder;
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07002082 unsigned long flags;
2083 struct sde_crtc_irq_info *node = NULL;
2084 int ret;
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04002085
Clarence Ip7f70ce42017-03-20 06:53:46 -07002086 if (!crtc || !crtc->dev || !crtc->state) {
Lloyd Atkinson4f1c8692016-09-14 14:04:25 -04002087 SDE_ERROR("invalid crtc\n");
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04002088 return;
2089 }
2090 sde_crtc = to_sde_crtc(crtc);
Dhaval Patel82c8dbc2017-02-18 23:15:10 -08002091 cstate = to_sde_crtc_state(crtc->state);
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04002092
Alan Kwong163d2612016-11-03 00:56:56 -04002093 SDE_DEBUG("crtc%d\n", crtc->base.id);
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04002094
Clarence Ip7f70ce42017-03-20 06:53:46 -07002095 if (msm_is_suspend_state(crtc->dev))
2096 _sde_crtc_set_suspend(crtc, true);
2097
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07002098 mutex_lock(&sde_crtc->crtc_lock);
Alan Kwong628d19e2016-10-31 13:50:13 -04002099 SDE_EVT32(DRMID(crtc));
2100
Clarence Ip7f70ce42017-03-20 06:53:46 -07002101 if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) {
Alan Kwong628d19e2016-10-31 13:50:13 -04002102 SDE_ERROR("crtc%d invalid vblank refcount\n",
2103 crtc->base.id);
Dhaval Patel6c666622017-03-21 23:02:59 -07002104 SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->vblank_refcount),
2105 SDE_EVTLOG_FUNC_CASE1);
Alan Kwong07da0982016-11-04 12:57:45 -04002106 drm_for_each_encoder(encoder, crtc->dev) {
2107 if (encoder->crtc != crtc)
2108 continue;
2109 sde_encoder_register_vblank_callback(encoder, NULL,
2110 NULL);
2111 }
2112 atomic_set(&sde_crtc->vblank_refcount, 0);
2113 }
2114
Alan Kwong628d19e2016-10-31 13:50:13 -04002115 if (atomic_read(&sde_crtc->frame_pending)) {
2116 /* release bandwidth and other resources */
2117 SDE_ERROR("crtc%d invalid frame pending\n",
2118 crtc->base.id);
Dhaval Patel6c666622017-03-21 23:02:59 -07002119 SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->frame_pending),
2120 SDE_EVTLOG_FUNC_CASE2);
Alan Kwong9aa061c2016-11-06 21:17:12 -05002121 sde_core_perf_crtc_release_bw(crtc);
Alan Kwong628d19e2016-10-31 13:50:13 -04002122 atomic_set(&sde_crtc->frame_pending, 0);
2123 }
2124
Alan Kwong9aa061c2016-11-06 21:17:12 -05002125 sde_core_perf_crtc_update(crtc, 0, true);
2126
Alan Kwong628d19e2016-10-31 13:50:13 -04002127 drm_for_each_encoder(encoder, crtc->dev) {
2128 if (encoder->crtc != crtc)
2129 continue;
2130 sde_encoder_register_frame_event_callback(encoder, NULL, NULL);
Dhaval Patel82c8dbc2017-02-18 23:15:10 -08002131 sde_encoder_update_rsc_client(encoder, false);
2132 cstate->rsc_client = NULL;
2133 cstate->rsc_update = false;
Alan Kwong628d19e2016-10-31 13:50:13 -04002134 }
2135
Lloyd Atkinsonc44a52e2016-08-16 16:40:17 -04002136 memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
2137 sde_crtc->num_mixers = 0;
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07002138
2139 spin_lock_irqsave(&sde_crtc->spin_lock, flags);
2140 list_for_each_entry(node, &sde_crtc->user_event_list, list) {
2141 ret = 0;
2142 if (node->func)
2143 ret = node->func(crtc, false, &node->irq);
2144 if (ret)
2145 SDE_ERROR("%s failed to disable event %x\n",
2146 sde_crtc->name, node->event);
2147 }
2148 spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
2149
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07002150 mutex_unlock(&sde_crtc->crtc_lock);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002151}
2152
2153static void sde_crtc_enable(struct drm_crtc *crtc)
2154{
Clarence Ipcae1bb62016-07-07 12:07:13 -04002155 struct sde_crtc *sde_crtc;
Alan Kwong628d19e2016-10-31 13:50:13 -04002156 struct drm_encoder *encoder;
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07002157 unsigned long flags;
2158 struct sde_crtc_irq_info *node = NULL;
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05002159 int ret;
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -04002160
Clarence Ipcae1bb62016-07-07 12:07:13 -04002161 if (!crtc) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07002162 SDE_ERROR("invalid crtc\n");
Clarence Ipcae1bb62016-07-07 12:07:13 -04002163 return;
2164 }
2165
Alan Kwong163d2612016-11-03 00:56:56 -04002166 SDE_DEBUG("crtc%d\n", crtc->base.id);
Alan Kwong628d19e2016-10-31 13:50:13 -04002167 SDE_EVT32(DRMID(crtc));
Clarence Ipcae1bb62016-07-07 12:07:13 -04002168 sde_crtc = to_sde_crtc(crtc);
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -04002169
Alan Kwong628d19e2016-10-31 13:50:13 -04002170 drm_for_each_encoder(encoder, crtc->dev) {
2171 if (encoder->crtc != crtc)
2172 continue;
2173 sde_encoder_register_frame_event_callback(encoder,
2174 sde_crtc_frame_event_cb, (void *)crtc);
2175 }
2176
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07002177 spin_lock_irqsave(&sde_crtc->spin_lock, flags);
2178 list_for_each_entry(node, &sde_crtc->user_event_list, list) {
2179 ret = 0;
2180 if (node->func)
2181 ret = node->func(crtc, true, &node->irq);
2182 if (ret)
2183 SDE_ERROR("%s failed to enable event %x\n",
2184 sde_crtc->name, node->event);
2185 }
2186 spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002187}
2188
2189struct plane_state {
Dhaval Patelec10fad2016-08-22 14:40:48 -07002190 struct sde_plane_state *sde_pstate;
Dhaval Patel04c7e8e2016-09-26 20:14:31 -07002191 const struct drm_plane_state *drm_pstate;
Clarence Ipc47a0692016-10-11 10:54:17 -04002192 int stage;
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08002193 u32 pipe_id;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002194};
2195
Clarence Ipc47a0692016-10-11 10:54:17 -04002196static int pstate_cmp(const void *a, const void *b)
2197{
2198 struct plane_state *pa = (struct plane_state *)a;
2199 struct plane_state *pb = (struct plane_state *)b;
2200 int rc = 0;
2201 int pa_zpos, pb_zpos;
2202
2203 pa_zpos = sde_plane_get_property(pa->sde_pstate, PLANE_PROP_ZPOS);
2204 pb_zpos = sde_plane_get_property(pb->sde_pstate, PLANE_PROP_ZPOS);
2205
2206 if (pa_zpos != pb_zpos)
2207 rc = pa_zpos - pb_zpos;
2208 else
2209 rc = pa->drm_pstate->crtc_x - pb->drm_pstate->crtc_x;
2210
2211 return rc;
2212}
2213
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002214static int sde_crtc_atomic_check(struct drm_crtc *crtc,
2215 struct drm_crtc_state *state)
2216{
Clarence Ipcae1bb62016-07-07 12:07:13 -04002217 struct sde_crtc *sde_crtc;
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08002218 struct plane_state pstates[SDE_STAGE_MAX * 4];
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08002219 struct sde_crtc_state *cstate;
Dhaval Patelec10fad2016-08-22 14:40:48 -07002220
Dhaval Patel04c7e8e2016-09-26 20:14:31 -07002221 const struct drm_plane_state *pstate;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002222 struct drm_plane *plane;
Dhaval Patelec10fad2016-08-22 14:40:48 -07002223 struct drm_display_mode *mode;
2224
2225 int cnt = 0, rc = 0, mixer_width, i, z_pos;
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08002226
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08002227 struct sde_multirect_plane_states multirect_plane[SDE_STAGE_MAX * 2];
2228 int multirect_count = 0;
2229 const struct drm_plane_state *pipe_staged[SSPP_MAX];
Lloyd Atkinson629ce1f2016-10-27 16:50:26 -04002230 int left_zpos_cnt = 0, right_zpos_cnt = 0;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002231
Clarence Ipcae1bb62016-07-07 12:07:13 -04002232 if (!crtc) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07002233 SDE_ERROR("invalid crtc\n");
Clarence Ipcae1bb62016-07-07 12:07:13 -04002234 return -EINVAL;
2235 }
2236
Alan Kwongcdb2f282017-03-18 13:42:06 -07002237 sde_crtc = to_sde_crtc(crtc);
2238 cstate = to_sde_crtc_state(state);
2239
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002240 if (!state->enable || !state->active) {
2241 SDE_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
2242 crtc->base.id, state->enable, state->active);
Alan Kwongcdb2f282017-03-18 13:42:06 -07002243 goto end;
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002244 }
2245
Dhaval Patelec10fad2016-08-22 14:40:48 -07002246 mode = &state->adjusted_mode;
2247 SDE_DEBUG("%s: check", sde_crtc->name);
Clarence Ipcae1bb62016-07-07 12:07:13 -04002248
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08002249 memset(pipe_staged, 0, sizeof(pipe_staged));
2250
Dhaval Patelec10fad2016-08-22 14:40:48 -07002251 mixer_width = sde_crtc_mixer_width(sde_crtc, mode);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002252
Lloyd Atkinsona9d7e752017-01-17 16:31:43 -05002253 _sde_crtc_setup_lm_bounds(crtc, state);
2254
Dhaval Patelec10fad2016-08-22 14:40:48 -07002255 /* get plane state for all drm planes associated with crtc state */
Dhaval Patel04c7e8e2016-09-26 20:14:31 -07002256 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
Clarence Ipc47a0692016-10-11 10:54:17 -04002257 if (IS_ERR_OR_NULL(pstate)) {
2258 rc = PTR_ERR(pstate);
2259 SDE_ERROR("%s: failed to get plane%d state, %d\n",
2260 sde_crtc->name, plane->base.id, rc);
Alan Kwong85767282016-10-03 18:03:37 -04002261 goto end;
2262 }
Clarence Ipc47a0692016-10-11 10:54:17 -04002263 if (cnt >= ARRAY_SIZE(pstates))
2264 continue;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002265
Dhaval Patelec10fad2016-08-22 14:40:48 -07002266 pstates[cnt].sde_pstate = to_sde_plane_state(pstate);
2267 pstates[cnt].drm_pstate = pstate;
Clarence Ipc47a0692016-10-11 10:54:17 -04002268 pstates[cnt].stage = sde_plane_get_property(
2269 pstates[cnt].sde_pstate, PLANE_PROP_ZPOS);
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08002270 pstates[cnt].pipe_id = sde_plane_pipe(plane);
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08002271
2272 /* check dim layer stage with every plane */
2273 for (i = 0; i < cstate->num_dim_layers; i++) {
2274 if (pstates[cnt].stage == cstate->dim_layer[i].stage) {
2275 SDE_ERROR("plane%d/dimlayer in same stage:%d\n",
2276 plane->base.id,
2277 cstate->dim_layer[i].stage);
2278 rc = -EINVAL;
2279 goto end;
2280 }
2281 }
2282
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08002283 if (pipe_staged[pstates[cnt].pipe_id]) {
2284 multirect_plane[multirect_count].r0 =
2285 pipe_staged[pstates[cnt].pipe_id];
2286 multirect_plane[multirect_count].r1 = pstate;
2287 multirect_count++;
2288
2289 pipe_staged[pstates[cnt].pipe_id] = NULL;
2290 } else {
2291 pipe_staged[pstates[cnt].pipe_id] = pstate;
2292 }
2293
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002294 cnt++;
Dhaval Patelec10fad2016-08-22 14:40:48 -07002295
2296 if (CHECK_LAYER_BOUNDS(pstate->crtc_y, pstate->crtc_h,
2297 mode->vdisplay) ||
2298 CHECK_LAYER_BOUNDS(pstate->crtc_x, pstate->crtc_w,
2299 mode->hdisplay)) {
2300 SDE_ERROR("invalid vertical/horizontal destination\n");
2301 SDE_ERROR("y:%d h:%d vdisp:%d x:%d w:%d hdisp:%d\n",
2302 pstate->crtc_y, pstate->crtc_h, mode->vdisplay,
2303 pstate->crtc_x, pstate->crtc_w, mode->hdisplay);
2304 rc = -E2BIG;
2305 goto end;
2306 }
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002307 }
2308
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08002309 for (i = 1; i < SSPP_MAX; i++) {
2310 if (pipe_staged[i] &&
2311 is_sde_plane_virtual(pipe_staged[i]->plane)) {
2312 SDE_ERROR("invalid use of virtual plane: %d\n",
2313 pipe_staged[i]->plane->base.id);
2314 goto end;
2315 }
2316 }
2317
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08002318 /* Check dim layer rect bounds and stage */
2319 for (i = 0; i < cstate->num_dim_layers; i++) {
2320 if ((CHECK_LAYER_BOUNDS(cstate->dim_layer[i].rect.y,
2321 cstate->dim_layer[i].rect.h, mode->vdisplay)) ||
2322 (CHECK_LAYER_BOUNDS(cstate->dim_layer[i].rect.x,
2323 cstate->dim_layer[i].rect.w, mode->hdisplay)) ||
2324 (cstate->dim_layer[i].stage >= SDE_STAGE_MAX) ||
2325 (!cstate->dim_layer[i].rect.w) ||
2326 (!cstate->dim_layer[i].rect.h)) {
2327 SDE_ERROR("invalid dim_layer:{%d,%d,%d,%d}, stage:%d\n",
2328 cstate->dim_layer[i].rect.x,
2329 cstate->dim_layer[i].rect.y,
2330 cstate->dim_layer[i].rect.w,
2331 cstate->dim_layer[i].rect.h,
2332 cstate->dim_layer[i].stage);
2333 SDE_ERROR("display: %dx%d\n", mode->hdisplay,
2334 mode->vdisplay);
2335 rc = -E2BIG;
2336 goto end;
2337 }
2338 }
2339
Lloyd Atkinson629ce1f2016-10-27 16:50:26 -04002340 /* assign mixer stages based on sorted zpos property */
2341 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
2342
Clarence Ipc47a0692016-10-11 10:54:17 -04002343 if (!sde_is_custom_client()) {
2344 int stage_old = pstates[0].stage;
Dhaval Patelec10fad2016-08-22 14:40:48 -07002345
Clarence Ipc47a0692016-10-11 10:54:17 -04002346 z_pos = 0;
2347 for (i = 0; i < cnt; i++) {
2348 if (stage_old != pstates[i].stage)
2349 ++z_pos;
2350 stage_old = pstates[i].stage;
2351 pstates[i].stage = z_pos;
2352 }
2353 }
2354
Lloyd Atkinson629ce1f2016-10-27 16:50:26 -04002355 z_pos = -1;
Clarence Ipc47a0692016-10-11 10:54:17 -04002356 for (i = 0; i < cnt; i++) {
Lloyd Atkinson629ce1f2016-10-27 16:50:26 -04002357 /* reset counts at every new blend stage */
2358 if (pstates[i].stage != z_pos) {
2359 left_zpos_cnt = 0;
2360 right_zpos_cnt = 0;
2361 z_pos = pstates[i].stage;
2362 }
Clarence Ipc47a0692016-10-11 10:54:17 -04002363
2364 /* verify z_pos setting before using it */
Clarence Ip649989a2016-10-21 14:28:34 -04002365 if (z_pos >= SDE_STAGE_MAX - SDE_STAGE_0) {
Clarence Ipc47a0692016-10-11 10:54:17 -04002366 SDE_ERROR("> %d plane stages assigned\n",
2367 SDE_STAGE_MAX - SDE_STAGE_0);
2368 rc = -EINVAL;
2369 goto end;
2370 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
Lloyd Atkinson629ce1f2016-10-27 16:50:26 -04002371 if (left_zpos_cnt == 2) {
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08002372 SDE_ERROR("> 2 planes @ stage %d on left\n",
Dhaval Patelec10fad2016-08-22 14:40:48 -07002373 z_pos);
2374 rc = -EINVAL;
2375 goto end;
2376 }
Lloyd Atkinson629ce1f2016-10-27 16:50:26 -04002377 left_zpos_cnt++;
2378
Dhaval Patelec10fad2016-08-22 14:40:48 -07002379 } else {
Lloyd Atkinson629ce1f2016-10-27 16:50:26 -04002380 if (right_zpos_cnt == 2) {
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08002381 SDE_ERROR("> 2 planes @ stage %d on right\n",
Dhaval Patelec10fad2016-08-22 14:40:48 -07002382 z_pos);
2383 rc = -EINVAL;
2384 goto end;
2385 }
Lloyd Atkinson629ce1f2016-10-27 16:50:26 -04002386 right_zpos_cnt++;
Dhaval Patelec10fad2016-08-22 14:40:48 -07002387 }
Lloyd Atkinson629ce1f2016-10-27 16:50:26 -04002388
Clarence Ipc47a0692016-10-11 10:54:17 -04002389 pstates[i].sde_pstate->stage = z_pos + SDE_STAGE_0;
Dhaval Patelec10fad2016-08-22 14:40:48 -07002390 SDE_DEBUG("%s: zpos %d", sde_crtc->name, z_pos);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002391 }
2392
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08002393 for (i = 0; i < multirect_count; i++) {
2394 if (sde_plane_validate_multirect_v2(&multirect_plane[i])) {
2395 SDE_ERROR(
2396 "multirect validation failed for planes (%d - %d)\n",
2397 multirect_plane[i].r0->plane->base.id,
2398 multirect_plane[i].r1->plane->base.id);
2399 rc = -EINVAL;
Alan Kwong9aa061c2016-11-06 21:17:12 -05002400 goto end;
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08002401 }
2402 }
2403
Alan Kwong9aa061c2016-11-06 21:17:12 -05002404 rc = sde_core_perf_crtc_check(crtc, state);
2405 if (rc) {
2406 SDE_ERROR("crtc%d failed performance check %d\n",
2407 crtc->base.id, rc);
2408 goto end;
2409 }
2410
Lloyd Atkinson629ce1f2016-10-27 16:50:26 -04002411 /*
2412 * enforce pipe priority restrictions
2413 * use pstates sorted by stage to check planes on same stage
2414 * we assume that all pipes are in source split so its valid to compare
2415 * without taking into account left/right mixer placement
2416 */
2417 for (i = 1; i < cnt; i++) {
2418 struct plane_state *prv_pstate, *cur_pstate;
2419 int32_t prv_x, cur_x, prv_id, cur_id;
2420
2421 prv_pstate = &pstates[i - 1];
2422 cur_pstate = &pstates[i];
2423 if (prv_pstate->stage != cur_pstate->stage)
2424 continue;
2425
2426 prv_x = prv_pstate->drm_pstate->crtc_x;
2427 cur_x = cur_pstate->drm_pstate->crtc_x;
2428 prv_id = prv_pstate->sde_pstate->base.plane->base.id;
2429 cur_id = cur_pstate->sde_pstate->base.plane->base.id;
2430
2431 /*
2432 * Planes are enumerated in pipe-priority order such that planes
2433 * with lower drm_id must be left-most in a shared blend-stage
2434 * when using source split.
2435 */
2436 if (cur_x > prv_x && cur_id < prv_id) {
2437 SDE_ERROR(
2438 "shared z_pos %d lower id plane%d @ x%d should be left of plane%d @ x %d\n",
2439 cur_pstate->stage, cur_id, cur_x,
2440 prv_id, prv_x);
2441 rc = -EINVAL;
2442 goto end;
2443 } else if (cur_x < prv_x && cur_id > prv_id) {
2444 SDE_ERROR(
2445 "shared z_pos %d lower id plane%d @ x%d should be left of plane%d @ x %d\n",
2446 cur_pstate->stage, prv_id, prv_x,
2447 cur_id, cur_x);
2448 rc = -EINVAL;
2449 goto end;
2450 }
2451 }
2452
Lloyd Atkinson8ba47032017-03-22 17:13:32 -04002453 rc = _sde_crtc_check_rois(crtc, state);
2454 if (rc) {
2455 SDE_ERROR("crtc%d failed roi check %d\n", crtc->base.id, rc);
2456 goto end;
2457 }
Lloyd Atkinson629ce1f2016-10-27 16:50:26 -04002458
Dhaval Patelec10fad2016-08-22 14:40:48 -07002459end:
Alan Kwongcdb2f282017-03-18 13:42:06 -07002460 _sde_crtc_rp_free_unused(&cstate->rp);
Dhaval Patelec10fad2016-08-22 14:40:48 -07002461 return rc;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04002462}
2463
Abhijit Kulkarni7acb3262016-07-05 15:27:25 -04002464int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002465{
Clarence Ip7f70ce42017-03-20 06:53:46 -07002466 struct sde_crtc *sde_crtc;
2467 int rc = 0;
Abhijit Kulkarni7acb3262016-07-05 15:27:25 -04002468
Clarence Ip7f70ce42017-03-20 06:53:46 -07002469 if (!crtc) {
2470 SDE_ERROR("invalid crtc\n");
2471 return -EINVAL;
2472 }
2473 sde_crtc = to_sde_crtc(crtc);
2474
2475 mutex_lock(&sde_crtc->crtc_lock);
Alan Kwong07da0982016-11-04 12:57:45 -04002476 if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
2477 SDE_DEBUG("crtc%d vblank enable\n", crtc->base.id);
Clarence Ip7f70ce42017-03-20 06:53:46 -07002478 if (!sde_crtc->suspend)
2479 _sde_crtc_vblank_enable_nolock(sde_crtc, true);
Alan Kwong07da0982016-11-04 12:57:45 -04002480 } else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
2481 SDE_ERROR("crtc%d invalid vblank disable\n", crtc->base.id);
Clarence Ip7f70ce42017-03-20 06:53:46 -07002482 rc = -EINVAL;
Alan Kwong07da0982016-11-04 12:57:45 -04002483 } else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
2484 SDE_DEBUG("crtc%d vblank disable\n", crtc->base.id);
Clarence Ip7f70ce42017-03-20 06:53:46 -07002485 if (!sde_crtc->suspend)
2486 _sde_crtc_vblank_enable_nolock(sde_crtc, false);
Alan Kwong07da0982016-11-04 12:57:45 -04002487 } else {
2488 SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
2489 crtc->base.id,
2490 en ? "enable" : "disable",
2491 atomic_read(&sde_crtc->vblank_refcount));
Alan Kwong07da0982016-11-04 12:57:45 -04002492 }
Lloyd Atkinsone5c2c0b2016-07-05 12:23:29 -04002493
Clarence Ip7f70ce42017-03-20 06:53:46 -07002494 mutex_unlock(&sde_crtc->crtc_lock);
2495 return rc;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002496}
2497
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002498void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
2499{
2500 struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
2501
Alan Kwong163d2612016-11-03 00:56:56 -04002502 SDE_DEBUG("%s: cancel: %p\n", sde_crtc->name, file);
Lloyd Atkinson4f1c8692016-09-14 14:04:25 -04002503 _sde_crtc_complete_flip(crtc, file);
Lloyd Atkinson5217336c2016-09-15 18:21:18 -04002504}
2505
Clarence Ip7a753bb2016-07-07 11:47:44 -04002506/**
2507 * sde_crtc_install_properties - install all drm properties for crtc
2508 * @crtc: Pointer to drm crtc structure
2509 */
Dhaval Patele4a5dda2016-10-13 19:29:30 -07002510static void sde_crtc_install_properties(struct drm_crtc *crtc,
2511 struct sde_mdss_cfg *catalog)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002512{
Clarence Ip7a753bb2016-07-07 11:47:44 -04002513 struct sde_crtc *sde_crtc;
2514 struct drm_device *dev;
Dhaval Patele4a5dda2016-10-13 19:29:30 -07002515 struct sde_kms_info *info;
Alan Kwong9aa061c2016-11-06 21:17:12 -05002516 struct sde_kms *sde_kms;
Clarence Ip7a753bb2016-07-07 11:47:44 -04002517
Lloyd Atkinson4f1c8692016-09-14 14:04:25 -04002518 SDE_DEBUG("\n");
Clarence Ip7a753bb2016-07-07 11:47:44 -04002519
Dhaval Patele4a5dda2016-10-13 19:29:30 -07002520 if (!crtc || !catalog) {
2521 SDE_ERROR("invalid crtc or catalog\n");
Clarence Ip7a753bb2016-07-07 11:47:44 -04002522 return;
2523 }
2524
2525 sde_crtc = to_sde_crtc(crtc);
2526 dev = crtc->dev;
Alan Kwong9aa061c2016-11-06 21:17:12 -05002527 sde_kms = _sde_crtc_get_kms(crtc);
Clarence Ip7a753bb2016-07-07 11:47:44 -04002528
Dhaval Patele4a5dda2016-10-13 19:29:30 -07002529 info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
2530 if (!info) {
2531 SDE_ERROR("failed to allocate info memory\n");
2532 return;
2533 }
2534
Clarence Ip7a753bb2016-07-07 11:47:44 -04002535 /* range properties */
2536 msm_property_install_range(&sde_crtc->property_info,
Dhaval Patel4e574842016-08-23 15:11:37 -07002537 "input_fence_timeout", 0x0, 0, SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT,
2538 SDE_CRTC_INPUT_FENCE_TIMEOUT, CRTC_PROP_INPUT_FENCE_TIMEOUT);
2539
2540 msm_property_install_range(&sde_crtc->property_info, "output_fence",
2541 0x0, 0, INR_OPEN_MAX, 0x0, CRTC_PROP_OUTPUT_FENCE);
Clarence Ip1d9728b2016-09-01 11:10:54 -04002542
2543 msm_property_install_range(&sde_crtc->property_info,
2544 "output_fence_offset", 0x0, 0, 1, 0,
2545 CRTC_PROP_OUTPUT_FENCE_OFFSET);
Dhaval Patele4a5dda2016-10-13 19:29:30 -07002546
Alan Kwong9aa061c2016-11-06 21:17:12 -05002547 msm_property_install_range(&sde_crtc->property_info,
2548 "core_clk", 0x0, 0, U64_MAX,
2549 sde_kms->perf.max_core_clk_rate,
2550 CRTC_PROP_CORE_CLK);
2551 msm_property_install_range(&sde_crtc->property_info,
2552 "core_ab", 0x0, 0, U64_MAX,
Dhaval Patel60c25062017-02-21 17:44:05 -08002553 SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
Alan Kwong9aa061c2016-11-06 21:17:12 -05002554 CRTC_PROP_CORE_AB);
2555 msm_property_install_range(&sde_crtc->property_info,
2556 "core_ib", 0x0, 0, U64_MAX,
Dhaval Patel60c25062017-02-21 17:44:05 -08002557 SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA,
Alan Kwong9aa061c2016-11-06 21:17:12 -05002558 CRTC_PROP_CORE_IB);
Alan Kwong4aacd532017-02-04 18:51:33 -08002559 msm_property_install_range(&sde_crtc->property_info,
Alan Kwong8c176bf2017-02-09 19:34:32 -08002560 "mem_ab", 0x0, 0, U64_MAX,
2561 SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
2562 CRTC_PROP_MEM_AB);
2563 msm_property_install_range(&sde_crtc->property_info,
2564 "mem_ib", 0x0, 0, U64_MAX,
2565 SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
2566 CRTC_PROP_MEM_IB);
2567 msm_property_install_range(&sde_crtc->property_info,
Alan Kwong4aacd532017-02-04 18:51:33 -08002568 "rot_prefill_bw", 0, 0, U64_MAX,
2569 catalog->perf.max_bw_high * 1000ULL,
2570 CRTC_PROP_ROT_PREFILL_BW);
Alan Kwong8c176bf2017-02-09 19:34:32 -08002571 msm_property_install_range(&sde_crtc->property_info,
2572 "rot_clk", 0, 0, U64_MAX,
2573 sde_kms->perf.max_core_clk_rate,
2574 CRTC_PROP_ROT_CLK);
Alan Kwong9aa061c2016-11-06 21:17:12 -05002575
Dhaval Patele4a5dda2016-10-13 19:29:30 -07002576 msm_property_install_blob(&sde_crtc->property_info, "capabilities",
2577 DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO);
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08002578
2579 if (catalog->has_dim_layer) {
2580 msm_property_install_volatile_range(&sde_crtc->property_info,
2581 "dim_layer_v1", 0x0, 0, ~0, 0, CRTC_PROP_DIM_LAYER_V1);
2582 }
2583
Lloyd Atkinson8ba47032017-03-22 17:13:32 -04002584 msm_property_install_volatile_range(&sde_crtc->property_info,
2585 "sde_drm_roi_v1", 0x0, 0, ~0, 0, CRTC_PROP_ROI_V1);
2586
Dhaval Patele4a5dda2016-10-13 19:29:30 -07002587 sde_kms_info_reset(info);
2588
2589 sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
2590 sde_kms_info_add_keyint(info, "max_linewidth",
2591 catalog->max_mixer_width);
2592 sde_kms_info_add_keyint(info, "max_blendstages",
2593 catalog->max_mixer_blendstages);
2594 if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED2)
2595 sde_kms_info_add_keystr(info, "qseed_type", "qseed2");
2596 if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3)
2597 sde_kms_info_add_keystr(info, "qseed_type", "qseed3");
Jeykumar Sankaran2e655032017-02-04 14:05:45 -08002598
2599 if (sde_is_custom_client()) {
2600 if (catalog->smart_dma_rev == SDE_SSPP_SMART_DMA_V1)
2601 sde_kms_info_add_keystr(info,
2602 "smart_dma_rev", "smart_dma_v1");
2603 if (catalog->smart_dma_rev == SDE_SSPP_SMART_DMA_V2)
2604 sde_kms_info_add_keystr(info,
2605 "smart_dma_rev", "smart_dma_v2");
2606 }
2607
Dhaval Patele4a5dda2016-10-13 19:29:30 -07002608 sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split);
Alan Kwong2f84f8a2016-12-29 13:07:47 -05002609 if (catalog->perf.max_bw_low)
2610 sde_kms_info_add_keyint(info, "max_bandwidth_low",
2611 catalog->perf.max_bw_low);
2612 if (catalog->perf.max_bw_high)
2613 sde_kms_info_add_keyint(info, "max_bandwidth_high",
2614 catalog->perf.max_bw_high);
2615 if (sde_kms->perf.max_core_clk_rate)
2616 sde_kms_info_add_keyint(info, "max_mdp_clk",
2617 sde_kms->perf.max_core_clk_rate);
Dhaval Patele4a5dda2016-10-13 19:29:30 -07002618 msm_property_set_blob(&sde_crtc->property_info, &sde_crtc->blob_info,
2619 info->data, info->len, CRTC_PROP_INFO);
2620
2621 kfree(info);
Clarence Ip7a753bb2016-07-07 11:47:44 -04002622}
2623
2624/**
2625 * sde_crtc_atomic_set_property - atomically set a crtc drm property
2626 * @crtc: Pointer to drm crtc structure
2627 * @state: Pointer to drm crtc state structure
2628 * @property: Pointer to targeted drm property
2629 * @val: Updated property value
2630 * @Returns: Zero on success
2631 */
2632static int sde_crtc_atomic_set_property(struct drm_crtc *crtc,
2633 struct drm_crtc_state *state,
2634 struct drm_property *property,
2635 uint64_t val)
2636{
2637 struct sde_crtc *sde_crtc;
2638 struct sde_crtc_state *cstate;
Clarence Ipcae1bb62016-07-07 12:07:13 -04002639 int idx, ret = -EINVAL;
Clarence Ip7a753bb2016-07-07 11:47:44 -04002640
2641 if (!crtc || !state || !property) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07002642 SDE_ERROR("invalid argument(s)\n");
Clarence Ip7a753bb2016-07-07 11:47:44 -04002643 } else {
2644 sde_crtc = to_sde_crtc(crtc);
2645 cstate = to_sde_crtc_state(state);
2646 ret = msm_property_atomic_set(&sde_crtc->property_info,
2647 cstate->property_values, cstate->property_blobs,
2648 property, val);
Clarence Ipcae1bb62016-07-07 12:07:13 -04002649 if (!ret) {
2650 idx = msm_property_index(&sde_crtc->property_info,
2651 property);
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08002652 switch (idx) {
2653 case CRTC_PROP_INPUT_FENCE_TIMEOUT:
Clarence Ipcae1bb62016-07-07 12:07:13 -04002654 _sde_crtc_set_input_fence_timeout(cstate);
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08002655 break;
2656 case CRTC_PROP_DIM_LAYER_V1:
2657 _sde_crtc_set_dim_layer_v1(cstate, (void *)val);
2658 break;
Lloyd Atkinson8ba47032017-03-22 17:13:32 -04002659 case CRTC_PROP_ROI_V1:
2660 ret = _sde_crtc_set_roi_v1(state, (void *)val);
2661 break;
Veera Sundaram Sankaran3171ff82017-01-04 14:34:47 -08002662 default:
2663 /* nothing to do */
2664 break;
2665 }
Gopikrishnaiah Anandane0e5e0c2016-05-25 11:05:33 -07002666 } else {
2667 ret = sde_cp_crtc_set_property(crtc,
2668 property, val);
Clarence Ipcae1bb62016-07-07 12:07:13 -04002669 }
Gopikrishnaiah Anandane0e5e0c2016-05-25 11:05:33 -07002670 if (ret)
2671 DRM_ERROR("failed to set the property\n");
Alan Kwongcdb2f282017-03-18 13:42:06 -07002672
2673 SDE_DEBUG("crtc%d %s[%d] <= 0x%llx ret=%d\n", crtc->base.id,
2674 property->name, property->base.id, val, ret);
Clarence Ip7a753bb2016-07-07 11:47:44 -04002675 }
2676
2677 return ret;
2678}
2679
2680/**
2681 * sde_crtc_set_property - set a crtc drm property
2682 * @crtc: Pointer to drm crtc structure
2683 * @property: Pointer to targeted drm property
2684 * @val: Updated property value
2685 * @Returns: Zero on success
2686 */
2687static int sde_crtc_set_property(struct drm_crtc *crtc,
2688 struct drm_property *property, uint64_t val)
2689{
Lloyd Atkinson4f1c8692016-09-14 14:04:25 -04002690 SDE_DEBUG("\n");
Clarence Ip7a753bb2016-07-07 11:47:44 -04002691
2692 return sde_crtc_atomic_set_property(crtc, crtc->state, property, val);
2693}
2694
2695/**
2696 * sde_crtc_atomic_get_property - retrieve a crtc drm property
2697 * @crtc: Pointer to drm crtc structure
2698 * @state: Pointer to drm crtc state structure
2699 * @property: Pointer to targeted drm property
2700 * @val: Pointer to variable for receiving property value
2701 * @Returns: Zero on success
2702 */
2703static int sde_crtc_atomic_get_property(struct drm_crtc *crtc,
2704 const struct drm_crtc_state *state,
2705 struct drm_property *property,
2706 uint64_t *val)
2707{
2708 struct sde_crtc *sde_crtc;
2709 struct sde_crtc_state *cstate;
Clarence Ip24f80662016-06-13 19:05:32 -04002710 int i, ret = -EINVAL;
Clarence Ip7a753bb2016-07-07 11:47:44 -04002711
2712 if (!crtc || !state) {
Dhaval Patelec10fad2016-08-22 14:40:48 -07002713 SDE_ERROR("invalid argument(s)\n");
Clarence Ip7a753bb2016-07-07 11:47:44 -04002714 } else {
2715 sde_crtc = to_sde_crtc(crtc);
2716 cstate = to_sde_crtc_state(state);
Clarence Ip24f80662016-06-13 19:05:32 -04002717 i = msm_property_index(&sde_crtc->property_info, property);
2718 if (i == CRTC_PROP_OUTPUT_FENCE) {
Dhaval Patel39323d42017-03-01 23:48:24 -08002719 uint32_t offset = sde_crtc_get_property(cstate,
Clarence Ip1d9728b2016-09-01 11:10:54 -04002720 CRTC_PROP_OUTPUT_FENCE_OFFSET);
2721
2722 ret = sde_fence_create(
2723 &sde_crtc->output_fence, val, offset);
2724 if (ret)
2725 SDE_ERROR("fence create failed\n");
Clarence Ip24f80662016-06-13 19:05:32 -04002726 } else {
2727 ret = msm_property_atomic_get(&sde_crtc->property_info,
2728 cstate->property_values,
2729 cstate->property_blobs, property, val);
Gopikrishnaiah Anandane0e5e0c2016-05-25 11:05:33 -07002730 if (ret)
2731 ret = sde_cp_crtc_get_property(crtc,
2732 property, val);
Clarence Ip24f80662016-06-13 19:05:32 -04002733 }
Gopikrishnaiah Anandane0e5e0c2016-05-25 11:05:33 -07002734 if (ret)
2735 DRM_ERROR("get property failed\n");
Clarence Ip7a753bb2016-07-07 11:47:44 -04002736 }
Clarence Ip7a753bb2016-07-07 11:47:44 -04002737 return ret;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002738}
2739
Alan Kwong67a3f792016-11-01 23:16:53 -04002740#ifdef CONFIG_DEBUG_FS
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07002741static int _sde_debugfs_status_show(struct seq_file *s, void *data)
Clarence Ip8f7366c2016-07-05 12:15:26 -04002742{
2743 struct sde_crtc *sde_crtc;
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07002744 struct sde_plane_state *pstate = NULL;
Clarence Ip8f7366c2016-07-05 12:15:26 -04002745 struct sde_crtc_mixer *m;
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07002746
2747 struct drm_crtc *crtc;
2748 struct drm_plane *plane;
2749 struct drm_display_mode *mode;
2750 struct drm_framebuffer *fb;
2751 struct drm_plane_state *state;
2752
2753 int i, out_width;
Clarence Ip8f7366c2016-07-05 12:15:26 -04002754
2755 if (!s || !s->private)
2756 return -EINVAL;
2757
2758 sde_crtc = s->private;
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07002759 crtc = &sde_crtc->base;
2760
2761 mutex_lock(&sde_crtc->crtc_lock);
2762 mode = &crtc->state->adjusted_mode;
2763 out_width = sde_crtc_mixer_width(sde_crtc, mode);
2764
2765 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
2766 mode->hdisplay, mode->vdisplay);
2767
2768 seq_puts(s, "\n");
2769
Clarence Ip8f7366c2016-07-05 12:15:26 -04002770 for (i = 0; i < sde_crtc->num_mixers; ++i) {
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -04002771 m = &sde_crtc->mixers[i];
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07002772 if (!m->hw_lm)
2773 seq_printf(s, "\tmixer[%d] has no lm\n", i);
2774 else if (!m->hw_ctl)
2775 seq_printf(s, "\tmixer[%d] has no ctl\n", i);
2776 else
2777 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
2778 m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
2779 out_width, mode->vdisplay);
Clarence Ip8f7366c2016-07-05 12:15:26 -04002780 }
Dhaval Patel44f12472016-08-29 12:19:47 -07002781
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07002782 seq_puts(s, "\n");
Dhaval Patel48c76022016-09-01 17:51:23 -07002783
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07002784 drm_atomic_crtc_for_each_plane(plane, crtc) {
2785 pstate = to_sde_plane_state(plane->state);
2786 state = plane->state;
2787
2788 if (!pstate || !state)
2789 continue;
2790
2791 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
2792 pstate->stage);
2793
2794 if (plane->state->fb) {
2795 fb = plane->state->fb;
2796
2797 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u bpp:%d\n",
2798 fb->base.id, (char *) &fb->pixel_format,
2799 fb->width, fb->height, fb->bits_per_pixel);
2800
2801 seq_puts(s, "\t");
2802 for (i = 0; i < ARRAY_SIZE(fb->modifier); i++)
2803 seq_printf(s, "modifier[%d]:%8llu ", i,
2804 fb->modifier[i]);
2805 seq_puts(s, "\n");
2806
2807 seq_puts(s, "\t");
2808 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
2809 seq_printf(s, "pitches[%d]:%8u ", i,
2810 fb->pitches[i]);
2811 seq_puts(s, "\n");
2812
2813 seq_puts(s, "\t");
2814 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
2815 seq_printf(s, "offsets[%d]:%8u ", i,
2816 fb->offsets[i]);
Dhaval Patel48c76022016-09-01 17:51:23 -07002817 seq_puts(s, "\n");
2818 }
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07002819
2820 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
2821 state->src_x, state->src_y, state->src_w, state->src_h);
2822
2823 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
2824 state->crtc_x, state->crtc_y, state->crtc_w,
2825 state->crtc_h);
2826 seq_puts(s, "\n");
Clarence Ip8f7366c2016-07-05 12:15:26 -04002827 }
Alan Kwong07da0982016-11-04 12:57:45 -04002828
2829 if (sde_crtc->vblank_cb_count) {
2830 ktime_t diff = ktime_sub(ktime_get(), sde_crtc->vblank_cb_time);
2831 s64 diff_ms = ktime_to_ms(diff);
2832 s64 fps = diff_ms ? DIV_ROUND_CLOSEST(
2833 sde_crtc->vblank_cb_count * 1000, diff_ms) : 0;
2834
2835 seq_printf(s,
2836 "vblank fps:%lld count:%u total:%llums\n",
2837 fps,
2838 sde_crtc->vblank_cb_count,
2839 ktime_to_ms(diff));
2840
2841 /* reset time & count for next measurement */
2842 sde_crtc->vblank_cb_count = 0;
2843 sde_crtc->vblank_cb_time = ktime_set(0, 0);
2844 }
2845
2846 seq_printf(s, "vblank_refcount:%d\n",
2847 atomic_read(&sde_crtc->vblank_refcount));
2848
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07002849 mutex_unlock(&sde_crtc->crtc_lock);
2850
Clarence Ip8f7366c2016-07-05 12:15:26 -04002851 return 0;
2852}
2853
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07002854static int _sde_debugfs_status_open(struct inode *inode, struct file *file)
Clarence Ip8f7366c2016-07-05 12:15:26 -04002855{
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07002856 return single_open(file, _sde_debugfs_status_show, inode->i_private);
Clarence Ip8f7366c2016-07-05 12:15:26 -04002857}
2858
Dhaval Patelf9245d62017-03-28 16:24:00 -07002859static ssize_t _sde_crtc_misr_setup(struct file *file,
2860 const char __user *user_buf, size_t count, loff_t *ppos)
2861{
2862 struct sde_crtc *sde_crtc;
2863 struct sde_crtc_mixer *m;
2864 int i = 0, rc;
2865 char buf[MISR_BUFF_SIZE + 1];
2866 u32 frame_count, enable;
2867 size_t buff_copy;
2868
2869 if (!file || !file->private_data)
2870 return -EINVAL;
2871
2872 sde_crtc = file->private_data;
2873 buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
2874 if (copy_from_user(buf, user_buf, buff_copy)) {
2875 SDE_ERROR("buffer copy failed\n");
2876 return -EINVAL;
2877 }
2878
2879 buf[buff_copy] = 0; /* end of string */
2880
2881 if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
2882 return -EINVAL;
2883
2884 rc = _sde_crtc_power_enable(sde_crtc, true);
2885 if (rc)
2886 return rc;
2887
2888 mutex_lock(&sde_crtc->crtc_lock);
2889 sde_crtc->misr_enable = enable;
2890 for (i = 0; i < sde_crtc->num_mixers; ++i) {
2891 m = &sde_crtc->mixers[i];
2892 if (!m->hw_lm)
2893 continue;
2894
2895 m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
2896 }
2897 mutex_unlock(&sde_crtc->crtc_lock);
2898 _sde_crtc_power_enable(sde_crtc, false);
2899
2900 return count;
2901}
2902
2903static ssize_t _sde_crtc_misr_read(struct file *file,
2904 char __user *user_buff, size_t count, loff_t *ppos)
2905{
2906 struct sde_crtc *sde_crtc;
2907 struct sde_crtc_mixer *m;
2908 int i = 0, rc;
2909 ssize_t len = 0;
2910 char buf[MISR_BUFF_SIZE + 1] = {'\0'};
2911
2912 if (*ppos)
2913 return 0;
2914
2915 if (!file || !file->private_data)
2916 return -EINVAL;
2917
2918 sde_crtc = file->private_data;
2919 rc = _sde_crtc_power_enable(sde_crtc, true);
2920 if (rc)
2921 return rc;
2922
2923 mutex_lock(&sde_crtc->crtc_lock);
2924 if (!sde_crtc->misr_enable) {
2925 len += snprintf(buf + len, MISR_BUFF_SIZE - len,
2926 "disabled\n");
2927 goto buff_check;
2928 }
2929
2930 for (i = 0; i < sde_crtc->num_mixers; ++i) {
2931 m = &sde_crtc->mixers[i];
2932 if (!m->hw_lm)
2933 continue;
2934
2935 len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n",
2936 m->hw_lm->idx - LM_0);
2937 len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
2938 m->hw_lm->ops.collect_misr(m->hw_lm));
2939 }
2940
2941buff_check:
2942 if (count <= len) {
2943 len = 0;
2944 goto end;
2945 }
2946
2947 if (copy_to_user(user_buff, buf, len)) {
2948 len = -EFAULT;
2949 goto end;
2950 }
2951
2952 *ppos += len; /* increase offset */
2953
2954end:
2955 mutex_unlock(&sde_crtc->crtc_lock);
2956 _sde_crtc_power_enable(sde_crtc, false);
2957 return len;
2958}
2959
2960#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
Alan Kwong67a3f792016-11-01 23:16:53 -04002961static int __prefix ## _open(struct inode *inode, struct file *file) \
2962{ \
2963 return single_open(file, __prefix ## _show, inode->i_private); \
2964} \
2965static const struct file_operations __prefix ## _fops = { \
2966 .owner = THIS_MODULE, \
2967 .open = __prefix ## _open, \
2968 .release = single_release, \
2969 .read = seq_read, \
2970 .llseek = seq_lseek, \
2971}
2972
2973static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v)
2974{
2975 struct drm_crtc *crtc = (struct drm_crtc *) s->private;
2976 struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
Alan Kwongcdb2f282017-03-18 13:42:06 -07002977 struct sde_crtc_res *res;
Alan Kwong67a3f792016-11-01 23:16:53 -04002978
2979 seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
Dhaval Patel4d424602017-02-18 19:40:14 -08002980 seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
Alan Kwong3e985f02017-02-12 15:08:44 -08002981 seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
Alan Kwong9aa061c2016-11-06 21:17:12 -05002982 seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl);
2983 seq_printf(s, "core_clk_rate: %u\n", cstate->cur_perf.core_clk_rate);
2984 seq_printf(s, "max_per_pipe_ib: %llu\n",
2985 cstate->cur_perf.max_per_pipe_ib);
Alan Kwong67a3f792016-11-01 23:16:53 -04002986
Alan Kwongcdb2f282017-03-18 13:42:06 -07002987 seq_printf(s, "rp.%d: ", cstate->rp.sequence_id);
2988 list_for_each_entry(res, &cstate->rp.res_list, list)
2989 seq_printf(s, "0x%x/0x%llx/%pK/%d ",
2990 res->type, res->tag, res->val,
2991 atomic_read(&res->refcount));
2992 seq_puts(s, "\n");
2993
Alan Kwong67a3f792016-11-01 23:16:53 -04002994 return 0;
2995}
2996DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_crtc_debugfs_state);
2997
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07002998static int _sde_crtc_init_debugfs(struct drm_crtc *crtc)
Clarence Ip8f7366c2016-07-05 12:15:26 -04002999{
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07003000 struct sde_crtc *sde_crtc;
3001 struct sde_kms *sde_kms;
3002
Dhaval Patel3fbe6bf2016-10-20 20:00:41 -07003003 static const struct file_operations debugfs_status_fops = {
3004 .open = _sde_debugfs_status_open,
Clarence Ip8f7366c2016-07-05 12:15:26 -04003005 .read = seq_read,
3006 .llseek = seq_lseek,
3007 .release = single_release,
3008 };
Dhaval Patelf9245d62017-03-28 16:24:00 -07003009 static const struct file_operations debugfs_misr_fops = {
3010 .open = simple_open,
3011 .read = _sde_crtc_misr_read,
3012 .write = _sde_crtc_misr_setup,
3013 };
Alan Kwong67a3f792016-11-01 23:16:53 -04003014
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07003015 if (!crtc)
3016 return -EINVAL;
3017 sde_crtc = to_sde_crtc(crtc);
3018
3019 sde_kms = _sde_crtc_get_kms(crtc);
3020 if (!sde_kms)
3021 return -EINVAL;
3022
3023 sde_crtc->debugfs_root = debugfs_create_dir(sde_crtc->name,
Lloyd Atkinson09e64bf2017-04-13 14:09:59 -07003024 crtc->dev->primary->debugfs_root);
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07003025 if (!sde_crtc->debugfs_root)
3026 return -ENOMEM;
3027
3028 /* don't error check these */
3029 debugfs_create_file("status", 0444,
3030 sde_crtc->debugfs_root,
3031 sde_crtc, &debugfs_status_fops);
3032 debugfs_create_file("state", 0644,
3033 sde_crtc->debugfs_root,
3034 &sde_crtc->base,
3035 &sde_crtc_debugfs_state_fops);
Dhaval Patelf9245d62017-03-28 16:24:00 -07003036 debugfs_create_file("misr_data", 0644, sde_crtc->debugfs_root,
3037 sde_crtc, &debugfs_misr_fops);
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07003038
3039 return 0;
3040}
3041
3042static void _sde_crtc_destroy_debugfs(struct drm_crtc *crtc)
3043{
3044 struct sde_crtc *sde_crtc;
3045
3046 if (!crtc)
3047 return;
3048 sde_crtc = to_sde_crtc(crtc);
3049 debugfs_remove_recursive(sde_crtc->debugfs_root);
Clarence Ip8f7366c2016-07-05 12:15:26 -04003050}
Alan Kwong67a3f792016-11-01 23:16:53 -04003051#else
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07003052static int _sde_crtc_init_debugfs(struct drm_crtc *crtc)
Alan Kwong67a3f792016-11-01 23:16:53 -04003053{
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07003054 return 0;
Alan Kwong67a3f792016-11-01 23:16:53 -04003055}
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07003056
3057static void _sde_crtc_destroy_debugfs(struct drm_crtc *crtc)
3058{
Lloyd Atkinsonb020e0f2017-03-14 08:05:18 -07003059}
3060#endif /* CONFIG_DEBUG_FS */
3061
3062static int sde_crtc_late_register(struct drm_crtc *crtc)
3063{
3064 return _sde_crtc_init_debugfs(crtc);
3065}
3066
3067static void sde_crtc_early_unregister(struct drm_crtc *crtc)
3068{
3069 _sde_crtc_destroy_debugfs(crtc);
3070}
3071
3072static const struct drm_crtc_funcs sde_crtc_funcs = {
3073 .set_config = drm_atomic_helper_set_config,
3074 .destroy = sde_crtc_destroy,
3075 .page_flip = drm_atomic_helper_page_flip,
3076 .set_property = sde_crtc_set_property,
3077 .atomic_set_property = sde_crtc_atomic_set_property,
3078 .atomic_get_property = sde_crtc_atomic_get_property,
3079 .reset = sde_crtc_reset,
3080 .atomic_duplicate_state = sde_crtc_duplicate_state,
3081 .atomic_destroy_state = sde_crtc_destroy_state,
3082 .late_register = sde_crtc_late_register,
3083 .early_unregister = sde_crtc_early_unregister,
3084};
3085
3086static const struct drm_crtc_helper_funcs sde_crtc_helper_funcs = {
3087 .mode_fixup = sde_crtc_mode_fixup,
3088 .disable = sde_crtc_disable,
3089 .enable = sde_crtc_enable,
3090 .atomic_check = sde_crtc_atomic_check,
3091 .atomic_begin = sde_crtc_atomic_begin,
3092 .atomic_flush = sde_crtc_atomic_flush,
3093};
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003094
Clarence Ipa18d4832017-03-13 12:35:44 -07003095static void _sde_crtc_event_cb(struct kthread_work *work)
3096{
3097 struct sde_crtc_event *event;
3098 struct sde_crtc *sde_crtc;
3099 unsigned long irq_flags;
3100
3101 if (!work) {
3102 SDE_ERROR("invalid work item\n");
3103 return;
3104 }
3105
3106 event = container_of(work, struct sde_crtc_event, kt_work);
Clarence Ipa18d4832017-03-13 12:35:44 -07003107
3108 /* set sde_crtc to NULL for static work structures */
3109 sde_crtc = event->sde_crtc;
3110 if (!sde_crtc)
3111 return;
3112
Gopikrishnaiah Anandanb6b401f2017-03-14 16:39:49 -07003113 if (event->cb_func)
3114 event->cb_func(&sde_crtc->base, event->usr);
3115
Clarence Ipa18d4832017-03-13 12:35:44 -07003116 spin_lock_irqsave(&sde_crtc->event_lock, irq_flags);
3117 list_add_tail(&event->list, &sde_crtc->event_free_list);
3118 spin_unlock_irqrestore(&sde_crtc->event_lock, irq_flags);
3119}
3120
3121int sde_crtc_event_queue(struct drm_crtc *crtc,
Gopikrishnaiah Anandanb6b401f2017-03-14 16:39:49 -07003122 void (*func)(struct drm_crtc *crtc, void *usr), void *usr)
Clarence Ipa18d4832017-03-13 12:35:44 -07003123{
3124 unsigned long irq_flags;
3125 struct sde_crtc *sde_crtc;
3126 struct sde_crtc_event *event = NULL;
3127
3128 if (!crtc || !func)
3129 return -EINVAL;
3130 sde_crtc = to_sde_crtc(crtc);
3131
Gopikrishnaiah Anandanb6b401f2017-03-14 16:39:49 -07003132 if (!sde_crtc->event_thread)
3133 return -EINVAL;
Clarence Ipa18d4832017-03-13 12:35:44 -07003134 /*
3135 * Obtain an event struct from the private cache. This event
3136 * queue may be called from ISR contexts, so use a private
3137 * cache to avoid calling any memory allocation functions.
3138 */
3139 spin_lock_irqsave(&sde_crtc->event_lock, irq_flags);
3140 if (!list_empty(&sde_crtc->event_free_list)) {
3141 event = list_first_entry(&sde_crtc->event_free_list,
3142 struct sde_crtc_event, list);
3143 list_del_init(&event->list);
3144 }
3145 spin_unlock_irqrestore(&sde_crtc->event_lock, irq_flags);
3146
3147 if (!event)
3148 return -ENOMEM;
3149
3150 /* populate event node */
3151 event->sde_crtc = sde_crtc;
3152 event->cb_func = func;
3153 event->usr = usr;
3154
3155 /* queue new event request */
3156 kthread_init_work(&event->kt_work, _sde_crtc_event_cb);
3157 kthread_queue_work(&sde_crtc->event_worker, &event->kt_work);
3158
3159 return 0;
3160}
3161
3162static int _sde_crtc_init_events(struct sde_crtc *sde_crtc)
3163{
3164 int i, rc = 0;
3165
3166 if (!sde_crtc) {
3167 SDE_ERROR("invalid crtc\n");
3168 return -EINVAL;
3169 }
3170
3171 spin_lock_init(&sde_crtc->event_lock);
3172
3173 INIT_LIST_HEAD(&sde_crtc->event_free_list);
3174 for (i = 0; i < SDE_CRTC_MAX_EVENT_COUNT; ++i)
3175 list_add_tail(&sde_crtc->event_cache[i].list,
3176 &sde_crtc->event_free_list);
3177
3178 kthread_init_worker(&sde_crtc->event_worker);
3179 sde_crtc->event_thread = kthread_run(kthread_worker_fn,
3180 &sde_crtc->event_worker, "crtc_event:%d",
3181 sde_crtc->base.base.id);
3182
3183 if (IS_ERR_OR_NULL(sde_crtc->event_thread)) {
3184 SDE_ERROR("failed to create event thread\n");
3185 rc = PTR_ERR(sde_crtc->event_thread);
3186 sde_crtc->event_thread = NULL;
3187 }
3188
3189 return rc;
3190}
3191
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003192/* initialize crtc */
Lloyd Atkinsonac933642016-09-14 11:52:00 -04003193struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003194{
3195 struct drm_crtc *crtc = NULL;
Clarence Ip8f7366c2016-07-05 12:15:26 -04003196 struct sde_crtc *sde_crtc = NULL;
3197 struct msm_drm_private *priv = NULL;
3198 struct sde_kms *kms = NULL;
Clarence Ipa18d4832017-03-13 12:35:44 -07003199 int i, rc;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003200
Clarence Ip8f7366c2016-07-05 12:15:26 -04003201 priv = dev->dev_private;
3202 kms = to_sde_kms(priv->kms);
3203
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003204 sde_crtc = kzalloc(sizeof(*sde_crtc), GFP_KERNEL);
3205 if (!sde_crtc)
3206 return ERR_PTR(-ENOMEM);
3207
3208 crtc = &sde_crtc->base;
Gopikrishnaiah Anandane0e5e0c2016-05-25 11:05:33 -07003209 crtc->dev = dev;
Alan Kwong07da0982016-11-04 12:57:45 -04003210 atomic_set(&sde_crtc->vblank_refcount, 0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003211
Clarence Ip7f70ce42017-03-20 06:53:46 -07003212 mutex_init(&sde_crtc->crtc_lock);
Alan Kwong628d19e2016-10-31 13:50:13 -04003213 spin_lock_init(&sde_crtc->spin_lock);
3214 atomic_set(&sde_crtc->frame_pending, 0);
3215
3216 INIT_LIST_HEAD(&sde_crtc->frame_event_list);
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07003217 INIT_LIST_HEAD(&sde_crtc->user_event_list);
Alan Kwong628d19e2016-10-31 13:50:13 -04003218 for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) {
3219 INIT_LIST_HEAD(&sde_crtc->frame_events[i].list);
3220 list_add(&sde_crtc->frame_events[i].list,
3221 &sde_crtc->frame_event_list);
3222 kthread_init_work(&sde_crtc->frame_events[i].work,
3223 sde_crtc_frame_event_work);
3224 }
3225
Dhaval Patel04c7e8e2016-09-26 20:14:31 -07003226 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &sde_crtc_funcs,
3227 NULL);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003228
3229 drm_crtc_helper_add(crtc, &sde_crtc_helper_funcs);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -04003230 plane->crtc = crtc;
3231
Clarence Ip8f7366c2016-07-05 12:15:26 -04003232 /* save user friendly CRTC name for later */
3233 snprintf(sde_crtc->name, SDE_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
3234
Clarence Ipa18d4832017-03-13 12:35:44 -07003235 /* initialize event handling */
3236 rc = _sde_crtc_init_events(sde_crtc);
3237 if (rc) {
3238 drm_crtc_cleanup(crtc);
3239 kfree(sde_crtc);
3240 return ERR_PTR(rc);
3241 }
3242
Clarence Ip9a74a442016-08-25 18:29:03 -04003243 /* initialize output fence support */
Lloyd Atkinson5d40d312016-09-06 08:34:13 -04003244 sde_fence_init(&sde_crtc->output_fence, sde_crtc->name, crtc->base.id);
Clarence Ip24f80662016-06-13 19:05:32 -04003245
Clarence Ip7a753bb2016-07-07 11:47:44 -04003246 /* create CRTC properties */
3247 msm_property_init(&sde_crtc->property_info, &crtc->base, dev,
3248 priv->crtc_property, sde_crtc->property_data,
3249 CRTC_PROP_COUNT, CRTC_PROP_BLOBCOUNT,
3250 sizeof(struct sde_crtc_state));
3251
Dhaval Patele4a5dda2016-10-13 19:29:30 -07003252 sde_crtc_install_properties(crtc, kms->catalog);
Gopikrishnaiah Anandan703eb902016-10-06 18:43:57 -07003253
3254 /* Install color processing properties */
Gopikrishnaiah Anandane0e5e0c2016-05-25 11:05:33 -07003255 sde_cp_crtc_init(crtc);
Gopikrishnaiah Anandan703eb902016-10-06 18:43:57 -07003256 sde_cp_crtc_install_properties(crtc);
Clarence Ip7a753bb2016-07-07 11:47:44 -04003257
Dhaval Patelec10fad2016-08-22 14:40:48 -07003258 SDE_DEBUG("%s: successfully initialized crtc\n", sde_crtc->name);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07003259 return crtc;
3260}
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07003261
Gopikrishnaiah Anandan5154c712017-02-27 17:48:24 -08003262static int _sde_crtc_event_enable(struct sde_kms *kms,
3263 struct drm_crtc *crtc_drm, u32 event)
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07003264{
Gopikrishnaiah Anandan5154c712017-02-27 17:48:24 -08003265 struct sde_crtc *crtc = NULL;
3266 struct sde_crtc_irq_info *node;
3267 struct msm_drm_private *priv;
3268 unsigned long flags;
3269 bool found = false;
Gopikrishnaiah Anandanb6b401f2017-03-14 16:39:49 -07003270 int ret, i = 0;
Gopikrishnaiah Anandan5154c712017-02-27 17:48:24 -08003271
3272 crtc = to_sde_crtc(crtc_drm);
3273 spin_lock_irqsave(&crtc->spin_lock, flags);
3274 list_for_each_entry(node, &crtc->user_event_list, list) {
3275 if (node->event == event) {
3276 found = true;
3277 break;
3278 }
3279 }
3280 spin_unlock_irqrestore(&crtc->spin_lock, flags);
3281
3282 /* event already enabled */
3283 if (found)
3284 return 0;
3285
Gopikrishnaiah Anandanb6b401f2017-03-14 16:39:49 -07003286 node = NULL;
3287 for (i = 0; i < ARRAY_SIZE(custom_events); i++) {
3288 if (custom_events[i].event == event &&
3289 custom_events[i].func) {
3290 node = kzalloc(sizeof(*node), GFP_KERNEL);
3291 if (!node)
3292 return -ENOMEM;
3293 node->event = event;
3294 INIT_LIST_HEAD(&node->list);
3295 node->func = custom_events[i].func;
3296 node->event = event;
3297 break;
3298 }
3299 }
Gopikrishnaiah Anandan5154c712017-02-27 17:48:24 -08003300
Gopikrishnaiah Anandanb6b401f2017-03-14 16:39:49 -07003301 if (!node) {
Gopikrishnaiah Anandan5154c712017-02-27 17:48:24 -08003302 SDE_ERROR("unsupported event %x\n", event);
Gopikrishnaiah Anandan5154c712017-02-27 17:48:24 -08003303 return -EINVAL;
3304 }
3305
3306 priv = kms->dev->dev_private;
3307 ret = 0;
3308 if (crtc_drm->enabled) {
3309 sde_power_resource_enable(&priv->phandle, kms->core_client,
3310 true);
3311 ret = node->func(crtc_drm, true, &node->irq);
3312 sde_power_resource_enable(&priv->phandle, kms->core_client,
3313 false);
3314 }
3315
3316 if (!ret) {
3317 spin_lock_irqsave(&crtc->spin_lock, flags);
3318 list_add_tail(&node->list, &crtc->user_event_list);
3319 spin_unlock_irqrestore(&crtc->spin_lock, flags);
3320 } else {
3321 kfree(node);
3322 }
3323
3324 return ret;
3325}
3326
3327static int _sde_crtc_event_disable(struct sde_kms *kms,
3328 struct drm_crtc *crtc_drm, u32 event)
3329{
3330 struct sde_crtc *crtc = NULL;
3331 struct sde_crtc_irq_info *node = NULL;
3332 struct msm_drm_private *priv;
3333 unsigned long flags;
3334 bool found = false;
3335 int ret;
3336
3337 crtc = to_sde_crtc(crtc_drm);
3338 spin_lock_irqsave(&crtc->spin_lock, flags);
3339 list_for_each_entry(node, &crtc->user_event_list, list) {
3340 if (node->event == event) {
3341 list_del(&node->list);
3342 found = true;
3343 break;
3344 }
3345 }
3346 spin_unlock_irqrestore(&crtc->spin_lock, flags);
3347
3348 /* event already disabled */
3349 if (!found)
3350 return 0;
3351
3352 /**
3353 * crtc is disabled interrupts are cleared remove from the list,
3354 * no need to disable/de-register.
3355 */
3356 if (!crtc_drm->enabled) {
3357 kfree(node);
3358 return 0;
3359 }
3360 priv = kms->dev->dev_private;
3361 sde_power_resource_enable(&priv->phandle, kms->core_client, true);
3362 ret = node->func(crtc_drm, false, &node->irq);
3363 sde_power_resource_enable(&priv->phandle, kms->core_client, false);
3364 return ret;
3365}
3366
3367int sde_crtc_register_custom_event(struct sde_kms *kms,
3368 struct drm_crtc *crtc_drm, u32 event, bool en)
3369{
3370 struct sde_crtc *crtc = NULL;
3371 int ret;
3372
3373 crtc = to_sde_crtc(crtc_drm);
3374 if (!crtc || !kms || !kms->dev) {
3375 DRM_ERROR("invalid sde_crtc %pK kms %pK dev %pK\n", crtc,
3376 kms, ((kms) ? (kms->dev) : NULL));
3377 return -EINVAL;
3378 }
3379
3380 if (en)
3381 ret = _sde_crtc_event_enable(kms, crtc_drm, event);
3382 else
3383 ret = _sde_crtc_event_disable(kms, crtc_drm, event);
3384
3385 return ret;
Gopikrishnaiah Anandande2c81b2017-03-15 12:41:29 -07003386}