blob: bbb13d611be215f73faa0f9588699a65a0bd6ba8 [file] [log] [blame]
Alan Kwongbb27c092016-07-20 16:41:25 -04001/*
2 * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "sde-wb:[%s] " fmt, __func__
16
17#include <linux/jiffies.h>
18#include <linux/debugfs.h>
19
20#include "sde_encoder_phys.h"
21#include "sde_formats.h"
22#include "sde_hw_top.h"
23#include "sde_hw_interrupts.h"
24#include "sde_wb.h"
25
26/* wait for at most 2 vsync for lowest refresh rate (24hz) */
27#define WAIT_TIMEOUT_MSEC 84
28
29#define to_sde_encoder_phys_wb(x) \
30 container_of(x, struct sde_encoder_phys_wb, base)
31
32#define DEV(phy_enc) (phy_enc->parent->dev)
33
34/**
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -040035 * sde_encoder_phys_wb_is_master - report wb always as master encoder
36 */
37static bool sde_encoder_phys_wb_is_master(struct sde_encoder_phys *phys_enc)
38{
39 return true;
40}
41
42/**
Alan Kwongbb27c092016-07-20 16:41:25 -040043 * sde_encoder_phys_wb_get_intr_type - get interrupt type based on block mode
44 * @hw_wb: Pointer to h/w writeback driver
45 */
46static enum sde_intr_type sde_encoder_phys_wb_get_intr_type(
47 struct sde_hw_wb *hw_wb)
48{
49 return (hw_wb->caps->features & BIT(SDE_WB_BLOCK_MODE)) ?
50 SDE_IRQ_TYPE_WB_ROT_COMP : SDE_IRQ_TYPE_WB_WFD_COMP;
51}
52
53/**
54 * sde_encoder_phys_wb_set_traffic_shaper - set traffic shaper for writeback
55 * @phys_enc: Pointer to physical encoder
56 */
57static void sde_encoder_phys_wb_set_traffic_shaper(
58 struct sde_encoder_phys *phys_enc)
59{
60 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
61 struct sde_hw_wb_cfg *wb_cfg = &wb_enc->wb_cfg;
62
63 /* traffic shaper is only enabled for rotator */
64 wb_cfg->ts_cfg.en = false;
65}
66
67/**
68 * sde_encoder_phys_setup_cdm - setup chroma down block
69 * @phys_enc: Pointer to physical encoder
70 * @fb: Pointer to output framebuffer
71 * @format: Output format
72 */
73void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
74 struct drm_framebuffer *fb, const struct sde_format *format,
75 struct sde_rect *wb_roi)
76{
77 struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm;
78 struct sde_hw_cdm_cfg *cdm_cfg = &phys_enc->cdm_cfg;
79 int ret;
80
81 if (!SDE_FORMAT_IS_YUV(format)) {
82 SDE_DEBUG("[cdm_disable fmt:%x]\n",
83 format->base.pixel_format);
84
85 if (hw_cdm && hw_cdm->ops.disable)
86 hw_cdm->ops.disable(hw_cdm);
87
88 return;
89 }
90
91 memset(cdm_cfg, 0, sizeof(struct sde_hw_cdm_cfg));
92
93 cdm_cfg->output_width = wb_roi->w;
94 cdm_cfg->output_height = wb_roi->h;
95 cdm_cfg->output_fmt = format;
96 cdm_cfg->output_type = CDM_CDWN_OUTPUT_WB;
97 cdm_cfg->output_bit_depth = CDM_CDWN_OUTPUT_8BIT;
98
99 /* enable 10 bit logic */
100 switch (cdm_cfg->output_fmt->chroma_sample) {
101 case SDE_CHROMA_RGB:
102 cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
103 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
104 break;
105 case SDE_CHROMA_H2V1:
106 cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
107 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
108 break;
109 case SDE_CHROMA_420:
110 cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
111 cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
112 break;
113 case SDE_CHROMA_H1V2:
114 default:
115 SDE_ERROR("unsupported chroma sampling type\n");
116 cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
117 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
118 break;
119 }
120
121 SDE_DEBUG("[cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
122 cdm_cfg->output_width,
123 cdm_cfg->output_height,
124 cdm_cfg->output_fmt->base.pixel_format,
125 cdm_cfg->output_type,
126 cdm_cfg->output_bit_depth,
127 cdm_cfg->h_cdwn_type,
128 cdm_cfg->v_cdwn_type);
129
130 if (hw_cdm && hw_cdm->ops.setup_cdwn) {
131 ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg);
132 if (ret < 0) {
133 SDE_ERROR("failed to setup CDM %d\n", ret);
134 return;
135 }
136 }
137
138 if (hw_cdm && hw_cdm->ops.enable) {
139 ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
140 if (ret < 0) {
141 SDE_ERROR("failed to enable CDM %d\n", ret);
142 return;
143 }
144 }
145}
146
147/**
148 * sde_encoder_phys_wb_setup_fb - setup output framebuffer
149 * @phys_enc: Pointer to physical encoder
150 * @fb: Pointer to output framebuffer
151 * @wb_roi: Pointer to output region of interest
152 */
153static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
154 struct drm_framebuffer *fb, struct sde_rect *wb_roi)
155{
156 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
157 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
158 struct sde_hw_wb_cfg *wb_cfg = &wb_enc->wb_cfg;
159 const struct msm_format *format;
160 int ret, mmu_id;
161
162 memset(wb_cfg, 0, sizeof(struct sde_hw_wb_cfg));
163
164 wb_cfg->intf_mode = INTF_MODE_WB_LINE;
165 wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false;
166 mmu_id = (wb_cfg->is_secure) ?
167 wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] :
168 wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
169
170 SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
171
172 format = msm_framebuffer_format(fb);
Dhaval Patelccbcb3d2016-08-22 11:58:14 -0700173 if (!format) {
174 SDE_DEBUG("invalid format for fb\n");
175 return;
176 }
177
Alan Kwongbb27c092016-07-20 16:41:25 -0400178 wb_cfg->dest.format = sde_get_sde_format_ext(
179 format->pixel_format,
180 fb->modifier,
181 drm_format_num_planes(fb->pixel_format));
182 if (!wb_cfg->dest.format) {
183 /* this error should be detected during atomic_check */
184 SDE_ERROR("failed to get format %x\n", format->pixel_format);
185 return;
186 }
187
188 ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi,
189 &wb_cfg->dest);
190 if (ret) {
191 /* this error should be detected during atomic_check */
192 SDE_DEBUG("failed to populate layout %d\n", ret);
193 return;
194 }
195
196 if ((wb_cfg->dest.format->fetch_planes == SDE_PLANE_PLANAR) &&
197 (wb_cfg->dest.format->element[0] == C1_B_Cb))
198 swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
199
200 SDE_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n",
201 wb_cfg->dest.plane_addr[0],
202 wb_cfg->dest.plane_addr[1],
203 wb_cfg->dest.plane_addr[2],
204 wb_cfg->dest.plane_addr[3]);
205 SDE_DEBUG("[fb_stride:%8.8x,%8.8x,%8.8x,%8.8x]\n",
206 wb_cfg->dest.plane_pitch[0],
207 wb_cfg->dest.plane_pitch[1],
208 wb_cfg->dest.plane_pitch[2],
209 wb_cfg->dest.plane_pitch[3]);
210
211 if (hw_wb->ops.setup_outformat)
212 hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
213
214 if (hw_wb->ops.setup_outaddress)
215 hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
216}
217
218/**
219 * sde_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
220 * @phys_enc: Pointer to physical encoder
221 */
222static void sde_encoder_phys_wb_setup_cdp(struct sde_encoder_phys *phys_enc)
223{
224 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
225 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
226 struct sde_hw_intf_cfg *intf_cfg = &wb_enc->intf_cfg;
227
228 memset(intf_cfg, 0, sizeof(struct sde_hw_intf_cfg));
229
230 intf_cfg->intf = SDE_NONE;
231 intf_cfg->wb = hw_wb->idx;
232
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400233 if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg)
Alan Kwongbb27c092016-07-20 16:41:25 -0400234 phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl,
235 intf_cfg);
236}
237
238/**
239 * sde_encoder_phys_wb_atomic_check - verify and fixup given atomic states
240 * @phys_enc: Pointer to physical encoder
241 * @crtc_state: Pointer to CRTC atomic state
242 * @conn_state: Pointer to connector atomic state
243 */
244static int sde_encoder_phys_wb_atomic_check(
245 struct sde_encoder_phys *phys_enc,
246 struct drm_crtc_state *crtc_state,
247 struct drm_connector_state *conn_state)
248{
249 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
250 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
251 const struct sde_wb_cfg *wb_cfg = hw_wb->caps;
252 struct drm_framebuffer *fb;
253 const struct sde_format *fmt;
254 struct sde_rect wb_roi;
255 const struct drm_display_mode *mode = &crtc_state->mode;
256 int rc;
257
258 SDE_DEBUG("[atomic_check:%d,%d,\"%s\",%d,%d]\n",
259 hw_wb->idx - WB_0, mode->base.id, mode->name,
260 mode->hdisplay, mode->vdisplay);
261
262 memset(&wb_roi, 0, sizeof(struct sde_rect));
263
264 rc = sde_wb_connector_state_get_output_roi(conn_state, &wb_roi);
265 if (rc) {
266 SDE_ERROR("failed to get roi %d\n", rc);
267 return rc;
268 }
269
270 SDE_DEBUG("[roi:%u,%u,%u,%u]\n", wb_roi.x, wb_roi.y,
271 wb_roi.w, wb_roi.h);
272
273 fb = sde_wb_connector_state_get_output_fb(conn_state);
274 if (!fb) {
275 SDE_ERROR("no output framebuffer\n");
276 return -EINVAL;
277 }
278
279 SDE_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
280 fb->width, fb->height);
281
282 fmt = sde_get_sde_format_ext(fb->pixel_format, fb->modifier,
283 drm_format_num_planes(fb->pixel_format));
284 if (!fmt) {
285 SDE_ERROR("unsupported output pixel format:%d\n",
286 fb->pixel_format);
287 return -EINVAL;
288 }
289
290 SDE_DEBUG("[fb_fmt:%x,%llx]\n", fb->pixel_format,
291 fb->modifier[0]);
292
293 if (SDE_FORMAT_IS_YUV(fmt) &&
294 !(wb_cfg->features & BIT(SDE_WB_YUV_CONFIG))) {
295 SDE_ERROR("invalid output format %x\n", fmt->base.pixel_format);
296 return -EINVAL;
297 }
298
299 if (SDE_FORMAT_IS_UBWC(fmt) &&
300 !(wb_cfg->features & BIT(SDE_WB_UBWC_1_0))) {
301 SDE_ERROR("invalid output format %x\n", fmt->base.pixel_format);
302 return -EINVAL;
303 }
304
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400305 phys_enc->needs_cdm = SDE_FORMAT_IS_YUV(fmt);
306
Alan Kwongbb27c092016-07-20 16:41:25 -0400307 if (wb_roi.w && wb_roi.h) {
308 if (wb_roi.w != mode->hdisplay) {
309 SDE_ERROR("invalid roi w=%d, mode w=%d\n", wb_roi.w,
310 mode->hdisplay);
311 return -EINVAL;
312 } else if (wb_roi.h != mode->vdisplay) {
313 SDE_ERROR("invalid roi h=%d, mode h=%d\n", wb_roi.h,
314 mode->vdisplay);
315 return -EINVAL;
316 } else if (wb_roi.x + wb_roi.w > fb->width) {
317 SDE_ERROR("invalid roi x=%d, w=%d, fb w=%d\n",
318 wb_roi.x, wb_roi.w, fb->width);
319 return -EINVAL;
320 } else if (wb_roi.y + wb_roi.h > fb->height) {
321 SDE_ERROR("invalid roi y=%d, h=%d, fb h=%d\n",
322 wb_roi.y, wb_roi.h, fb->height);
323 return -EINVAL;
324 } else if (wb_roi.w > wb_cfg->sblk->maxlinewidth) {
325 SDE_ERROR("invalid roi w=%d, maxlinewidth=%u\n",
326 wb_roi.w, wb_cfg->sblk->maxlinewidth);
327 return -EINVAL;
328 }
329 } else {
330 if (wb_roi.x || wb_roi.y) {
331 SDE_ERROR("invalid roi x=%d, y=%d\n",
332 wb_roi.x, wb_roi.y);
333 return -EINVAL;
334 } else if (fb->width != mode->hdisplay) {
335 SDE_ERROR("invalid fb w=%d, mode w=%d\n", fb->width,
336 mode->hdisplay);
337 return -EINVAL;
338 } else if (fb->height != mode->vdisplay) {
339 SDE_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
340 mode->vdisplay);
341 return -EINVAL;
342 } else if (fb->width > wb_cfg->sblk->maxlinewidth) {
343 SDE_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
344 fb->width, wb_cfg->sblk->maxlinewidth);
345 return -EINVAL;
346 }
347 }
348
349 return 0;
350}
351
352/**
353 * sde_encoder_phys_wb_flush - flush hardware update
354 * @phys_enc: Pointer to physical encoder
355 */
356static void sde_encoder_phys_wb_flush(struct sde_encoder_phys *phys_enc)
357{
358 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
359 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
360 struct sde_hw_ctl *hw_ctl = phys_enc->hw_ctl;
361 struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm;
362 u32 flush_mask = 0;
363
364 SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
365
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400366 if (!hw_ctl) {
367 SDE_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0);
368 return;
369 }
370
Alan Kwongbb27c092016-07-20 16:41:25 -0400371 if (hw_ctl->ops.get_bitmask_wb)
372 hw_ctl->ops.get_bitmask_wb(hw_ctl, &flush_mask, hw_wb->idx);
373
374 if (hw_ctl->ops.get_bitmask_cdm && hw_cdm)
375 hw_ctl->ops.get_bitmask_cdm(hw_ctl, &flush_mask, hw_cdm->idx);
376
377 if (hw_ctl->ops.update_pending_flush)
378 hw_ctl->ops.update_pending_flush(hw_ctl, flush_mask);
379
380 SDE_DEBUG("Flushing CTL_ID %d, flush_mask %x, WB %d\n",
381 hw_ctl->idx - CTL_0, flush_mask, hw_wb->idx - WB_0);
382}
383
384/**
385 * sde_encoder_phys_wb_setup - setup writeback encoder
386 * @phys_enc: Pointer to physical encoder
387 */
388static void sde_encoder_phys_wb_setup(
389 struct sde_encoder_phys *phys_enc)
390{
391 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
392 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
393 struct drm_display_mode mode = phys_enc->cached_mode;
394 struct drm_framebuffer *fb;
395 struct sde_rect *wb_roi = &wb_enc->wb_roi;
396
397 SDE_DEBUG("[mode_set:%d,%d,\"%s\",%d,%d]\n",
398 hw_wb->idx - WB_0, mode.base.id, mode.name,
399 mode.hdisplay, mode.vdisplay);
400
401 memset(wb_roi, 0, sizeof(struct sde_rect));
402
403 fb = sde_wb_get_output_fb(wb_enc->wb_dev);
404 if (!fb) {
405 SDE_DEBUG("no output framebuffer\n");
406 return;
407 }
408
409 SDE_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
410 fb->width, fb->height);
411
412 sde_wb_get_output_roi(wb_enc->wb_dev, wb_roi);
413 if (wb_roi->w == 0 || wb_roi->h == 0) {
414 wb_roi->x = 0;
415 wb_roi->y = 0;
416 wb_roi->w = fb->width;
417 wb_roi->h = fb->height;
418 }
419
420 SDE_DEBUG("[roi:%u,%u,%u,%u]\n", wb_roi->x, wb_roi->y,
421 wb_roi->w, wb_roi->h);
422
423 wb_enc->wb_fmt = sde_get_sde_format_ext(fb->pixel_format, fb->modifier,
424 drm_format_num_planes(fb->pixel_format));
425 if (!wb_enc->wb_fmt) {
426 SDE_ERROR("unsupported output pixel format: %d\n",
427 fb->pixel_format);
428 return;
429 }
430
431 SDE_DEBUG("[fb_fmt:%x,%llx]\n", fb->pixel_format,
432 fb->modifier[0]);
433
434 sde_encoder_phys_wb_set_traffic_shaper(phys_enc);
435
436 sde_encoder_phys_setup_cdm(phys_enc, fb, wb_enc->wb_fmt, wb_roi);
437
438 sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi);
439
440 sde_encoder_phys_wb_setup_cdp(phys_enc);
441}
442
443/**
444 * sde_encoder_phys_wb_unregister_irq - unregister writeback interrupt handler
445 * @phys_enc: Pointer to physical encoder
446 */
447static int sde_encoder_phys_wb_unregister_irq(
448 struct sde_encoder_phys *phys_enc)
449{
450 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
451 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
452
453 if (wb_enc->bypass_irqreg)
454 return 0;
455
456 sde_disable_irq(phys_enc->sde_kms, &wb_enc->irq_idx, 1);
457 sde_register_irq_callback(phys_enc->sde_kms, wb_enc->irq_idx, NULL);
458
459 SDE_DEBUG("un-register IRQ for wb %d, irq_idx=%d\n",
460 hw_wb->idx - WB_0,
461 wb_enc->irq_idx);
462
463 return 0;
464}
465
466/**
467 * sde_encoder_phys_wb_done_irq - writeback interrupt handler
468 * @arg: Pointer to writeback encoder
469 * @irq_idx: interrupt index
470 */
471static void sde_encoder_phys_wb_done_irq(void *arg, int irq_idx)
472{
473 struct sde_encoder_phys_wb *wb_enc = arg;
474 struct sde_encoder_phys *phys_enc = &wb_enc->base;
475 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
476
477 SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0,
478 wb_enc->frame_count);
479
480 complete_all(&wb_enc->wbdone_complete);
481
482 phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent);
483}
484
485/**
486 * sde_encoder_phys_wb_register_irq - register writeback interrupt handler
487 * @phys_enc: Pointer to physical encoder
488 */
489static int sde_encoder_phys_wb_register_irq(struct sde_encoder_phys *phys_enc)
490{
491 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
492 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
493 struct sde_irq_callback irq_cb;
494 enum sde_intr_type intr_type;
495 int ret = 0;
496
497 if (wb_enc->bypass_irqreg)
498 return 0;
499
500 intr_type = sde_encoder_phys_wb_get_intr_type(hw_wb);
501 wb_enc->irq_idx = sde_irq_idx_lookup(phys_enc->sde_kms,
502 intr_type, hw_wb->idx);
503 if (wb_enc->irq_idx < 0) {
504 SDE_ERROR(
505 "failed to lookup IRQ index for WB_DONE with wb=%d\n",
506 hw_wb->idx - WB_0);
507 return -EINVAL;
508 }
509
510 irq_cb.func = sde_encoder_phys_wb_done_irq;
511 irq_cb.arg = wb_enc;
512 ret = sde_register_irq_callback(phys_enc->sde_kms, wb_enc->irq_idx,
513 &irq_cb);
514 if (ret) {
515 SDE_ERROR("failed to register IRQ callback WB_DONE\n");
516 return ret;
517 }
518
519 ret = sde_enable_irq(phys_enc->sde_kms, &wb_enc->irq_idx, 1);
520 if (ret) {
521 SDE_ERROR(
522 "failed to enable IRQ for WB_DONE, wb %d, irq_idx=%d\n",
523 hw_wb->idx - WB_0,
524 wb_enc->irq_idx);
525 wb_enc->irq_idx = -EINVAL;
526
527 /* Unregister callback on IRQ enable failure */
528 sde_register_irq_callback(phys_enc->sde_kms, wb_enc->irq_idx,
529 NULL);
530 return ret;
531 }
532
533 SDE_DEBUG("registered IRQ for wb %d, irq_idx=%d\n",
534 hw_wb->idx - WB_0,
535 wb_enc->irq_idx);
536
537 return ret;
538}
539
540/**
541 * sde_encoder_phys_wb_mode_set - set display mode
542 * @phys_enc: Pointer to physical encoder
543 * @mode: Pointer to requested display mode
544 * @adj_mode: Pointer to adjusted display mode
545 */
546static void sde_encoder_phys_wb_mode_set(
547 struct sde_encoder_phys *phys_enc,
548 struct drm_display_mode *mode,
549 struct drm_display_mode *adj_mode)
550{
551 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400552 struct sde_rm *rm = &phys_enc->sde_kms->rm;
Alan Kwongbb27c092016-07-20 16:41:25 -0400553 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400554 struct sde_rm_hw_iter iter;
555 int i, instance;
Alan Kwongbb27c092016-07-20 16:41:25 -0400556
557 phys_enc->cached_mode = *adj_mode;
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400558 instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
Alan Kwongbb27c092016-07-20 16:41:25 -0400559
560 SDE_DEBUG("[mode_set_cache:%d,%d,\"%s\",%d,%d]\n",
561 hw_wb->idx - WB_0, mode->base.id,
562 mode->name, mode->hdisplay, mode->vdisplay);
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400563
564 /* Retrieve previously allocated HW Resources. CTL shouldn't fail */
565 sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
566 for (i = 0; i <= instance; i++) {
567 sde_rm_get_hw(rm, &iter);
568 if (i == instance)
569 phys_enc->hw_ctl = (struct sde_hw_ctl *) iter.hw;
570 }
571
572 if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
573 SDE_ERROR("failed init ctl: %ld\n", PTR_ERR(phys_enc->hw_ctl));
574 phys_enc->hw_ctl = NULL;
575 return;
576 }
577
578 /* CDM is optional */
579 sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CDM);
580 for (i = 0; i <= instance; i++) {
581 sde_rm_get_hw(rm, &iter);
582 if (i == instance)
583 phys_enc->hw_cdm = (struct sde_hw_cdm *) iter.hw;
584 }
585
586 if (IS_ERR_OR_NULL(phys_enc->hw_cdm)) {
587 if (phys_enc->needs_cdm) {
588 SDE_ERROR("CDM required but not allocated: %ld\n",
589 PTR_ERR(phys_enc->hw_cdm));
590 phys_enc->hw_ctl = NULL;
591 }
592 phys_enc->hw_cdm = NULL;
593 }
Alan Kwongbb27c092016-07-20 16:41:25 -0400594}
595
596/**
597 * sde_encoder_phys_wb_control_vblank_irq - Control vblank interrupt
598 * @phys_enc: Pointer to physical encoder
599 * @enable: Enable interrupt
600 */
601static int sde_encoder_phys_wb_control_vblank_irq(
602 struct sde_encoder_phys *phys_enc,
603 bool enable)
604{
605 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
606 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
607 int ret = 0;
608
609 SDE_DEBUG("[wb:%d,%d]\n", hw_wb->idx - WB_0, enable);
610
611 if (enable)
612 ret = sde_encoder_phys_wb_register_irq(phys_enc);
613 else
614 ret = sde_encoder_phys_wb_unregister_irq(phys_enc);
615
616 if (ret)
617 SDE_ERROR("control vblank irq error %d, enable %d\n", ret,
618 enable);
619
620 return ret;
621}
622
623/**
624 * sde_encoder_phys_wb_wait_for_commit_done - wait until request is committed
625 * @phys_enc: Pointer to physical encoder
626 */
627static int sde_encoder_phys_wb_wait_for_commit_done(
628 struct sde_encoder_phys *phys_enc)
629{
630 unsigned long ret;
631 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
632 u32 irq_status;
633 u64 wb_time = 0;
634 int rc = 0;
635
636 /* Return EWOULDBLOCK since we know the wait isn't necessary */
637 if (WARN_ON(phys_enc->enable_state != SDE_ENC_ENABLED))
638 return -EWOULDBLOCK;
639
640 MSM_EVT(DEV(phys_enc), wb_enc->frame_count, 0);
641
642 ret = wait_for_completion_timeout(&wb_enc->wbdone_complete,
643 msecs_to_jiffies(wb_enc->wbdone_timeout));
644
645 if (!ret) {
646 MSM_EVT(DEV(phys_enc), wb_enc->frame_count, 0);
647
648 irq_status = sde_read_irq(phys_enc->sde_kms,
649 wb_enc->irq_idx, true);
650 if (irq_status) {
651 SDE_DEBUG("wb:%d done but irq not triggered\n",
652 wb_enc->wb_dev->wb_idx - WB_0);
653 sde_encoder_phys_wb_done_irq(wb_enc, wb_enc->irq_idx);
654 } else {
655 SDE_ERROR("wb:%d kickoff timed out\n",
656 wb_enc->wb_dev->wb_idx - WB_0);
657 rc = -ETIMEDOUT;
658 }
659 }
660
661 sde_encoder_phys_wb_unregister_irq(phys_enc);
662
663 if (!rc)
664 wb_enc->end_time = ktime_get();
665
666 /* once operation is done, disable traffic shaper */
667 if (wb_enc->wb_cfg.ts_cfg.en && wb_enc->hw_wb &&
668 wb_enc->hw_wb->ops.setup_trafficshaper) {
669 wb_enc->wb_cfg.ts_cfg.en = false;
670 wb_enc->hw_wb->ops.setup_trafficshaper(
671 wb_enc->hw_wb, &wb_enc->wb_cfg);
672 }
673
674 /* remove vote for iommu/clk/bus */
675 wb_enc->frame_count++;
676
677 if (!rc) {
678 wb_time = (u64)ktime_to_us(wb_enc->end_time) -
679 (u64)ktime_to_us(wb_enc->start_time);
680 SDE_DEBUG("wb:%d took %llu us\n",
681 wb_enc->wb_dev->wb_idx - WB_0, wb_time);
682 }
683
684 MSM_EVT(DEV(phys_enc), wb_enc->frame_count, wb_time);
685
686 return rc;
687}
688
689/**
690 * sde_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
691 * @phys_enc: Pointer to physical encoder
692 * @need_to_wait: Wait for next submission
693 */
694static void sde_encoder_phys_wb_prepare_for_kickoff(
695 struct sde_encoder_phys *phys_enc,
696 bool *need_to_wait)
697{
698 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
699 int ret;
700
701 SDE_DEBUG("[wb:%d,%u]\n", wb_enc->hw_wb->idx - WB_0,
702 wb_enc->kickoff_count);
703
704 *need_to_wait = false;
705
706 reinit_completion(&wb_enc->wbdone_complete);
707
708 ret = sde_encoder_phys_wb_register_irq(phys_enc);
709 if (ret) {
710 SDE_ERROR("failed to register irq %d\n", ret);
711 return;
712 }
713
714 wb_enc->kickoff_count++;
715
716 /* set OT limit & enable traffic shaper */
717 sde_encoder_phys_wb_setup(phys_enc);
718
719 sde_encoder_phys_wb_flush(phys_enc);
720
721 /* vote for iommu/clk/bus */
722 wb_enc->start_time = ktime_get();
723
724 MSM_EVT(DEV(phys_enc), *need_to_wait, wb_enc->kickoff_count);
725}
726
727/**
728 * sde_encoder_phys_wb_handle_post_kickoff - post-kickoff processing
729 * @phys_enc: Pointer to physical encoder
730 */
731static void sde_encoder_phys_wb_handle_post_kickoff(
732 struct sde_encoder_phys *phys_enc)
733{
734 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
735
736 SDE_DEBUG("[wb:%d]\n", wb_enc->hw_wb->idx - WB_0);
737
738 MSM_EVT(DEV(phys_enc), 0, 0);
739}
740
741/**
742 * sde_encoder_phys_wb_enable - enable writeback encoder
743 * @phys_enc: Pointer to physical encoder
744 */
745static void sde_encoder_phys_wb_enable(struct sde_encoder_phys *phys_enc)
746{
747 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
748 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
749 struct drm_connector *connector;
750
751 SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
752
753 /* find associated writeback connector */
754 drm_for_each_connector(connector, phys_enc->parent->dev) {
755 if (connector->encoder == phys_enc->parent)
756 break;
757 }
758 if (!connector || connector->encoder != phys_enc->parent) {
759 SDE_ERROR("failed to find writeback connector\n");
760 return;
761 }
762 wb_enc->wb_dev = sde_wb_connector_get_wb(connector);
763
764 phys_enc->enable_state = SDE_ENC_ENABLED;
765}
766
767/**
768 * sde_encoder_phys_wb_disable - disable writeback encoder
769 * @phys_enc: Pointer to physical encoder
770 */
771static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
772{
773 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
774 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
775
776 SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
777
778 if (phys_enc->enable_state == SDE_ENC_DISABLED) {
779 SDE_ERROR("encoder is already disabled\n");
780 return;
781 }
782
783 if (wb_enc->frame_count != wb_enc->kickoff_count) {
784 SDE_DEBUG("[wait_for_done: wb:%d, frame:%u, kickoff:%u]\n",
785 hw_wb->idx - WB_0, wb_enc->frame_count,
786 wb_enc->kickoff_count);
787 sde_encoder_phys_wb_wait_for_commit_done(phys_enc);
788 }
789
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400790 if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.disable) {
791 SDE_DEBUG_DRIVER("[cdm_disable]\n");
792 phys_enc->hw_cdm->ops.disable(phys_enc->hw_cdm);
793 }
794
Alan Kwongbb27c092016-07-20 16:41:25 -0400795 phys_enc->enable_state = SDE_ENC_DISABLED;
796}
797
798/**
799 * sde_encoder_phys_wb_get_hw_resources - get hardware resources
800 * @phys_enc: Pointer to physical encoder
801 * @hw_res: Pointer to encoder resources
802 */
803static void sde_encoder_phys_wb_get_hw_resources(
804 struct sde_encoder_phys *phys_enc,
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400805 struct sde_encoder_hw_resources *hw_res,
806 struct drm_connector_state *conn_state)
Alan Kwongbb27c092016-07-20 16:41:25 -0400807{
808 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
809 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
Alan Kwongbb27c092016-07-20 16:41:25 -0400810
811 SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400812 hw_res->wbs[hw_wb->idx - WB_0] = INTF_MODE_WB_LINE;
813 hw_res->needs_cdm = phys_enc->needs_cdm;
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400814}
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400815
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400816/**
817 * sde_encoder_phys_wb_needs_ctl_start - Whether encoder needs ctl_start
818 * @phys_enc: Pointer to physical encoder
819 * @Return: Whether encoder needs ctl_start
820 */
821static bool sde_encoder_phys_wb_needs_ctl_start(
822 struct sde_encoder_phys *phys_enc)
823{
824 return true;
Alan Kwongbb27c092016-07-20 16:41:25 -0400825}
826
827#ifdef CONFIG_DEBUG_FS
828/**
829 * sde_encoder_phys_wb_init_debugfs - initialize writeback encoder debugfs
830 * @phys_enc: Pointer to physical encoder
831 * @sde_kms: Pointer to SDE KMS object
832 */
833static int sde_encoder_phys_wb_init_debugfs(
834 struct sde_encoder_phys *phys_enc, struct sde_kms *kms)
835{
836 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
837
838 if (!phys_enc || !kms || !wb_enc->hw_wb)
839 return -EINVAL;
840
841 snprintf(wb_enc->wb_name, ARRAY_SIZE(wb_enc->wb_name), "encoder_wb%d",
842 wb_enc->hw_wb->idx - WB_0);
843
844 wb_enc->debugfs_root =
845 debugfs_create_dir(wb_enc->wb_name,
846 sde_debugfs_get_root(kms));
847 if (!wb_enc->debugfs_root) {
848 SDE_ERROR("failed to create debugfs\n");
849 return -ENOMEM;
850 }
851
852 if (!debugfs_create_u32("wbdone_timeout", 0644,
853 wb_enc->debugfs_root, &wb_enc->wbdone_timeout)) {
854 SDE_ERROR("failed to create debugfs/wbdone_timeout\n");
855 return -ENOMEM;
856 }
857
858 if (!debugfs_create_u32("bypass_irqreg", 0644,
859 wb_enc->debugfs_root, &wb_enc->bypass_irqreg)) {
860 SDE_ERROR("failed to create debugfs/bypass_irqreg\n");
861 return -ENOMEM;
862 }
863
864 return 0;
865}
866
867/**
868 * sde_encoder_phys_wb_destroy_debugfs - destroy writeback encoder debugfs
869 * @phys_enc: Pointer to physical encoder
870 */
871static void sde_encoder_phys_wb_destroy_debugfs(
872 struct sde_encoder_phys *phys_enc)
873{
874 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
875
876 if (!phys_enc)
877 return;
878
879 debugfs_remove_recursive(wb_enc->debugfs_root);
880}
881#else
882static void sde_encoder_phys_wb_init_debugfs(
883 struct sde_encoder_phys *phys_enc, struct sde_kms *kms)
884{
885}
886static void sde_encoder_phys_wb_destroy_debugfs(
887 struct sde_encoder_phys *phys_enc)
888{
889}
890#endif
891
892/**
893 * sde_encoder_phys_wb_destroy - destroy writeback encoder
894 * @phys_enc: Pointer to physical encoder
895 */
896static void sde_encoder_phys_wb_destroy(struct sde_encoder_phys *phys_enc)
897{
898 struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
899 struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
900
901 SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
902
903 if (!phys_enc)
904 return;
905
906 sde_encoder_phys_wb_destroy_debugfs(phys_enc);
907
Alan Kwongbb27c092016-07-20 16:41:25 -0400908 kfree(wb_enc);
909}
910
911/**
912 * sde_encoder_phys_wb_init_ops - initialize writeback operations
913 * @ops: Pointer to encoder operation table
914 */
915static void sde_encoder_phys_wb_init_ops(struct sde_encoder_phys_ops *ops)
916{
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400917 ops->is_master = sde_encoder_phys_wb_is_master;
Alan Kwongbb27c092016-07-20 16:41:25 -0400918 ops->mode_set = sde_encoder_phys_wb_mode_set;
919 ops->enable = sde_encoder_phys_wb_enable;
920 ops->disable = sde_encoder_phys_wb_disable;
921 ops->destroy = sde_encoder_phys_wb_destroy;
922 ops->atomic_check = sde_encoder_phys_wb_atomic_check;
923 ops->get_hw_resources = sde_encoder_phys_wb_get_hw_resources;
924 ops->control_vblank_irq = sde_encoder_phys_wb_control_vblank_irq;
925 ops->wait_for_commit_done = sde_encoder_phys_wb_wait_for_commit_done;
926 ops->prepare_for_kickoff = sde_encoder_phys_wb_prepare_for_kickoff;
927 ops->handle_post_kickoff = sde_encoder_phys_wb_handle_post_kickoff;
Lloyd Atkinsone7bcdd22016-08-11 10:53:37 -0400928 ops->needs_ctl_start = sde_encoder_phys_wb_needs_ctl_start;
Alan Kwongbb27c092016-07-20 16:41:25 -0400929}
930
931/**
932 * sde_encoder_phys_wb_init - initialize writeback encoder
Lloyd Atkinson6ef6cb52016-07-06 11:49:18 -0400933 * @init: Pointer to init info structure with initialization params
Alan Kwongbb27c092016-07-20 16:41:25 -0400934 */
935struct sde_encoder_phys *sde_encoder_phys_wb_init(
Lloyd Atkinson6ef6cb52016-07-06 11:49:18 -0400936 struct sde_enc_phys_init_params *p)
Alan Kwongbb27c092016-07-20 16:41:25 -0400937{
938 struct sde_encoder_phys *phys_enc;
939 struct sde_encoder_phys_wb *wb_enc;
940 struct sde_hw_mdp *hw_mdp;
941 int ret = 0;
942
943 SDE_DEBUG("\n");
944
945 wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
946 if (!wb_enc) {
947 ret = -ENOMEM;
948 goto fail_alloc;
949 }
950 wb_enc->irq_idx = -EINVAL;
951 wb_enc->wbdone_timeout = WAIT_TIMEOUT_MSEC;
952 init_completion(&wb_enc->wbdone_complete);
953
954 phys_enc = &wb_enc->base;
955
Lloyd Atkinson6ef6cb52016-07-06 11:49:18 -0400956 if (p->sde_kms->vbif[VBIF_NRT]) {
Alan Kwongbb27c092016-07-20 16:41:25 -0400957 wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
Lloyd Atkinson6ef6cb52016-07-06 11:49:18 -0400958 p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
Alan Kwongbb27c092016-07-20 16:41:25 -0400959 wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
Lloyd Atkinson6ef6cb52016-07-06 11:49:18 -0400960 p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
Alan Kwongbb27c092016-07-20 16:41:25 -0400961 } else {
962 wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
Lloyd Atkinson6ef6cb52016-07-06 11:49:18 -0400963 p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
Alan Kwongbb27c092016-07-20 16:41:25 -0400964 wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
Lloyd Atkinson6ef6cb52016-07-06 11:49:18 -0400965 p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
Alan Kwongbb27c092016-07-20 16:41:25 -0400966 }
967
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400968 hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
Alan Kwongbb27c092016-07-20 16:41:25 -0400969 if (IS_ERR_OR_NULL(hw_mdp)) {
970 ret = PTR_ERR(hw_mdp);
971 SDE_ERROR("failed to init hw_top: %d\n", ret);
972 goto fail_mdp_init;
973 }
974 phys_enc->hw_mdptop = hw_mdp;
975
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400976 /**
977 * hw_wb resource permanently assigned to this encoder
978 * Other resources allocated at atomic commit time by use case
979 */
Lloyd Atkinson6ef6cb52016-07-06 11:49:18 -0400980 if (p->wb_idx != SDE_NONE) {
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400981 struct sde_rm_hw_iter iter;
Alan Kwongbb27c092016-07-20 16:41:25 -0400982
Lloyd Atkinson11f34442016-08-11 11:19:52 -0400983 sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_WB);
984 while (sde_rm_get_hw(&p->sde_kms->rm, &iter)) {
985 struct sde_hw_wb *hw_wb = (struct sde_hw_wb *)iter.hw;
986
987 if (hw_wb->idx == p->wb_idx) {
988 wb_enc->hw_wb = hw_wb;
989 break;
990 }
991 }
992
993 if (!wb_enc->hw_wb) {
994 ret = -EINVAL;
995 SDE_ERROR("failed to init hw_wb%d\n", p->wb_idx - WB_0);
Alan Kwongbb27c092016-07-20 16:41:25 -0400996 goto fail_wb_init;
997 }
Alan Kwongbb27c092016-07-20 16:41:25 -0400998 } else {
999 ret = -EINVAL;
1000 SDE_ERROR("invalid wb_idx\n");
1001 goto fail_wb_check;
1002 }
1003
Alan Kwongbb27c092016-07-20 16:41:25 -04001004 sde_encoder_phys_wb_init_ops(&phys_enc->ops);
Lloyd Atkinson6ef6cb52016-07-06 11:49:18 -04001005 phys_enc->parent = p->parent;
1006 phys_enc->parent_ops = p->parent_ops;
1007 phys_enc->sde_kms = p->sde_kms;
1008 phys_enc->split_role = p->split_role;
Alan Kwongbb27c092016-07-20 16:41:25 -04001009 spin_lock_init(&phys_enc->spin_lock);
1010
Lloyd Atkinson6ef6cb52016-07-06 11:49:18 -04001011 ret = sde_encoder_phys_wb_init_debugfs(phys_enc, p->sde_kms);
Alan Kwongbb27c092016-07-20 16:41:25 -04001012 if (ret) {
1013 SDE_ERROR("failed to init debugfs %d\n", ret);
1014 goto fail_debugfs_init;
1015 }
1016
1017 SDE_DEBUG("Created sde_encoder_phys_wb for wb %d\n",
1018 wb_enc->hw_wb->idx - WB_0);
1019
1020 return phys_enc;
1021
1022fail_debugfs_init:
Alan Kwongbb27c092016-07-20 16:41:25 -04001023fail_wb_init:
1024fail_wb_check:
Alan Kwongbb27c092016-07-20 16:41:25 -04001025fail_mdp_init:
1026 kfree(wb_enc);
1027fail_alloc:
1028 return ERR_PTR(ret);
1029}
1030