blob: 985b276e6b2504b1d6066f6a73e46cd773e0581a [file] [log] [blame]
Emily Dengc6e14f42016-08-08 11:30:50 +08001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "drmP.h"
24#include "amdgpu.h"
25#include "amdgpu_pm.h"
26#include "amdgpu_i2c.h"
27#include "atom.h"
28#include "amdgpu_atombios.h"
29#include "atombios_crtc.h"
30#include "atombios_encoders.h"
31#include "amdgpu_pll.h"
32#include "amdgpu_connectors.h"
33
34static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
35static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
36
Emily Deng8e6de752016-08-08 11:31:13 +080037/**
38 * dce_virtual_vblank_wait - vblank wait asic callback.
39 *
40 * @adev: amdgpu_device pointer
41 * @crtc: crtc to wait for vblank on
42 *
43 * Wait for vblank on the requested crtc (evergreen+).
44 */
45static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc)
46{
47 return;
48}
49
50static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc)
51{
52 if (crtc >= adev->mode_info.num_crtc)
53 return 0;
54 else
55 return adev->ddev->vblank[crtc].count;
56}
57
58static void dce_virtual_page_flip(struct amdgpu_device *adev,
59 int crtc_id, u64 crtc_base, bool async)
60{
61 return;
62}
63
64static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
65 u32 *vbl, u32 *position)
66{
67 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
68 return -EINVAL;
69
70 *vbl = 0;
71 *position = 0;
72
73 return 0;
74}
75
76static bool dce_virtual_hpd_sense(struct amdgpu_device *adev,
77 enum amdgpu_hpd_id hpd)
78{
79 return true;
80}
81
82static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev,
83 enum amdgpu_hpd_id hpd)
84{
85 return;
86}
87
88static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
89{
90 return 0;
91}
92
93static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
94{
95 return false;
96}
97
98void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
99 struct amdgpu_mode_mc_save *save)
100{
101 return;
102}
103void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
104 struct amdgpu_mode_mc_save *save)
105{
106 return;
107}
108
109void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
110 bool render)
111{
112 return;
113}
114
115/**
116 * dce_virtual_bandwidth_update - program display watermarks
117 *
118 * @adev: amdgpu_device pointer
119 *
120 * Calculate and program the display watermarks and line
121 * buffer allocation (CIK).
122 */
123static void dce_virtual_bandwidth_update(struct amdgpu_device *adev)
124{
125 return;
126}
127
Emily Dengc6e14f42016-08-08 11:30:50 +0800128static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
129 .cursor_set2 = NULL,
130 .cursor_move = NULL,
131 .gamma_set = NULL,
132 .set_config = NULL,
133 .destroy = NULL,
134 .page_flip = NULL,
135};
136
137static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
138 .dpms = NULL,
139 .mode_fixup = NULL,
140 .mode_set = NULL,
141 .mode_set_base = NULL,
142 .mode_set_base_atomic = NULL,
143 .prepare = NULL,
144 .commit = NULL,
145 .load_lut = NULL,
146 .disable = NULL,
147};
148
149static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
150{
151 struct amdgpu_crtc *amdgpu_crtc;
152 int i;
153
154 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
155 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
156 if (amdgpu_crtc == NULL)
157 return -ENOMEM;
158
159 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
160
161 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
162 amdgpu_crtc->crtc_id = index;
163 adev->mode_info.crtcs[index] = amdgpu_crtc;
164
165 for (i = 0; i < 256; i++) {
166 amdgpu_crtc->lut_r[i] = i << 2;
167 amdgpu_crtc->lut_g[i] = i << 2;
168 amdgpu_crtc->lut_b[i] = i << 2;
169 }
170
171 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
172 amdgpu_crtc->encoder = NULL;
173 amdgpu_crtc->connector = NULL;
174 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
175
176 return 0;
177}
178
179static int dce_virtual_early_init(void *handle)
180{
181 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
182
183 dce_virtual_set_display_funcs(adev);
184 dce_virtual_set_irq_funcs(adev);
185
186 adev->mode_info.num_crtc = 1;
187 adev->mode_info.num_hpd = 1;
188 adev->mode_info.num_dig = 1;
189 return 0;
190}
191
192static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
193{
194 struct amdgpu_i2c_bus_rec ddc_bus;
195 struct amdgpu_router router;
196 struct amdgpu_hpd hpd;
197
198 /* look up gpio for ddc, hpd */
199 ddc_bus.valid = false;
200 hpd.hpd = AMDGPU_HPD_NONE;
201 /* needed for aux chan transactions */
202 ddc_bus.hpd = hpd.hpd;
203
204 memset(&router, 0, sizeof(router));
205 router.ddc_valid = false;
206 router.cd_valid = false;
207 amdgpu_display_add_connector(adev,
208 0,
209 ATOM_DEVICE_CRT1_SUPPORT,
210 DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
211 CONNECTOR_OBJECT_ID_VIRTUAL,
212 &hpd,
213 &router);
214
215 amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
216 ATOM_DEVICE_CRT1_SUPPORT,
217 0);
218
219 amdgpu_link_encoder_connector(adev->ddev);
220
221 return true;
222}
223
224static int dce_virtual_sw_init(void *handle)
225{
226 int r, i;
227 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
228
229 r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq);
230 if (r)
231 return r;
232
233 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
234
235 adev->ddev->mode_config.max_width = 16384;
236 adev->ddev->mode_config.max_height = 16384;
237
238 adev->ddev->mode_config.preferred_depth = 24;
239 adev->ddev->mode_config.prefer_shadow = 1;
240
241 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
242
243 r = amdgpu_modeset_create_props(adev);
244 if (r)
245 return r;
246
247 adev->ddev->mode_config.max_width = 16384;
248 adev->ddev->mode_config.max_height = 16384;
249
250 /* allocate crtcs */
251 for (i = 0; i < adev->mode_info.num_crtc; i++) {
252 r = dce_virtual_crtc_init(adev, i);
253 if (r)
254 return r;
255 }
256
257 dce_virtual_get_connector_info(adev);
258 amdgpu_print_display_setup(adev->ddev);
259
260 drm_kms_helper_poll_init(adev->ddev);
261
262 adev->mode_info.mode_config_initialized = true;
263 return 0;
264}
265
266static int dce_virtual_sw_fini(void *handle)
267{
268 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
269
270 kfree(adev->mode_info.bios_hardcoded_edid);
271
272 drm_kms_helper_poll_fini(adev->ddev);
273
274 drm_mode_config_cleanup(adev->ddev);
275 adev->mode_info.mode_config_initialized = false;
276 return 0;
277}
278
279static int dce_virtual_hw_init(void *handle)
280{
281 return 0;
282}
283
284static int dce_virtual_hw_fini(void *handle)
285{
286 return 0;
287}
288
289static int dce_virtual_suspend(void *handle)
290{
291 return dce_virtual_hw_fini(handle);
292}
293
294static int dce_virtual_resume(void *handle)
295{
296 int ret;
297
298 ret = dce_virtual_hw_init(handle);
299
300 return ret;
301}
302
303static bool dce_virtual_is_idle(void *handle)
304{
305 return true;
306}
307
308static int dce_virtual_wait_for_idle(void *handle)
309{
310 return 0;
311}
312
313static int dce_virtual_soft_reset(void *handle)
314{
315 return 0;
316}
317
318static int dce_virtual_set_clockgating_state(void *handle,
319 enum amd_clockgating_state state)
320{
321 return 0;
322}
323
324static int dce_virtual_set_powergating_state(void *handle,
325 enum amd_powergating_state state)
326{
327 return 0;
328}
329
330const struct amd_ip_funcs dce_virtual_ip_funcs = {
331 .name = "dce_virtual",
332 .early_init = dce_virtual_early_init,
333 .late_init = NULL,
334 .sw_init = dce_virtual_sw_init,
335 .sw_fini = dce_virtual_sw_fini,
336 .hw_init = dce_virtual_hw_init,
337 .hw_fini = dce_virtual_hw_fini,
338 .suspend = dce_virtual_suspend,
339 .resume = dce_virtual_resume,
340 .is_idle = dce_virtual_is_idle,
341 .wait_for_idle = dce_virtual_wait_for_idle,
342 .soft_reset = dce_virtual_soft_reset,
343 .set_clockgating_state = dce_virtual_set_clockgating_state,
344 .set_powergating_state = dce_virtual_set_powergating_state,
345};
346
Emily Deng8e6de752016-08-08 11:31:13 +0800347/* these are handled by the primary encoders */
348static void dce_virtual_encoder_prepare(struct drm_encoder *encoder)
349{
350 return;
351}
352
353static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
354{
355 return;
356}
357
358static void
359dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
360 struct drm_display_mode *mode,
361 struct drm_display_mode *adjusted_mode)
362{
363 return;
364}
365
366static void dce_virtual_encoder_disable(struct drm_encoder *encoder)
367{
368 return;
369}
370
371static void
372dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode)
373{
374 return;
375}
376
377static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
378 const struct drm_display_mode *mode,
379 struct drm_display_mode *adjusted_mode)
380{
381
382 /* set the active encoder to connector routing */
383 amdgpu_encoder_set_active_device(encoder);
384
385 return true;
386}
387
388static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = {
389 .dpms = dce_virtual_encoder_dpms,
390 .mode_fixup = dce_virtual_encoder_mode_fixup,
391 .prepare = dce_virtual_encoder_prepare,
392 .mode_set = dce_virtual_encoder_mode_set,
393 .commit = dce_virtual_encoder_commit,
394 .disable = dce_virtual_encoder_disable,
395};
396
397static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
398{
399 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
400
401 kfree(amdgpu_encoder->enc_priv);
402 drm_encoder_cleanup(encoder);
403 kfree(amdgpu_encoder);
404}
405
406static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
407 .destroy = dce_virtual_encoder_destroy,
408};
409
410static void dce_virtual_encoder_add(struct amdgpu_device *adev,
411 uint32_t encoder_enum,
412 uint32_t supported_device,
413 u16 caps)
414{
415 struct drm_device *dev = adev->ddev;
416 struct drm_encoder *encoder;
417 struct amdgpu_encoder *amdgpu_encoder;
418
419 /* see if we already added it */
420 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
421 amdgpu_encoder = to_amdgpu_encoder(encoder);
422 if (amdgpu_encoder->encoder_enum == encoder_enum) {
423 amdgpu_encoder->devices |= supported_device;
424 return;
425 }
426
427 }
428
429 /* add a new one */
430 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
431 if (!amdgpu_encoder)
432 return;
433
434 encoder = &amdgpu_encoder->base;
435 encoder->possible_crtcs = 0x1;
436 amdgpu_encoder->enc_priv = NULL;
437 amdgpu_encoder->encoder_enum = encoder_enum;
438 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
439 amdgpu_encoder->devices = supported_device;
440 amdgpu_encoder->rmx_type = RMX_OFF;
441 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
442 amdgpu_encoder->is_ext_encoder = false;
443 amdgpu_encoder->caps = caps;
444
445 drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
446 DRM_MODE_ENCODER_VIRTUAL, NULL);
447 drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
448 DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
449}
450
Emily Dengc6e14f42016-08-08 11:30:50 +0800451static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
Emily Deng8e6de752016-08-08 11:31:13 +0800452 .set_vga_render_state = &dce_virtual_set_vga_render_state,
453 .bandwidth_update = &dce_virtual_bandwidth_update,
454 .vblank_get_counter = &dce_virtual_vblank_get_counter,
455 .vblank_wait = &dce_virtual_vblank_wait,
456 .is_display_hung = &dce_virtual_is_display_hung,
Emily Dengc6e14f42016-08-08 11:30:50 +0800457 .backlight_set_level = NULL,
458 .backlight_get_level = NULL,
Emily Deng8e6de752016-08-08 11:31:13 +0800459 .hpd_sense = &dce_virtual_hpd_sense,
460 .hpd_set_polarity = &dce_virtual_hpd_set_polarity,
461 .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
462 .page_flip = &dce_virtual_page_flip,
463 .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
464 .add_encoder = &dce_virtual_encoder_add,
Emily Dengc6e14f42016-08-08 11:30:50 +0800465 .add_connector = &amdgpu_connector_add,
Emily Deng8e6de752016-08-08 11:31:13 +0800466 .stop_mc_access = &dce_virtual_stop_mc_access,
467 .resume_mc_access = &dce_virtual_resume_mc_access,
Emily Dengc6e14f42016-08-08 11:30:50 +0800468};
469
470static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
471{
472 if (adev->mode_info.funcs == NULL)
473 adev->mode_info.funcs = &dce_virtual_display_funcs;
474}
475
Emily Denge13273d2016-08-08 11:31:37 +0800476static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
477 int crtc,
478 enum amdgpu_interrupt_state state)
479{
480 if (crtc >= adev->mode_info.num_crtc) {
481 DRM_DEBUG("invalid crtc %d\n", crtc);
482 return;
483 }
484}
485
486static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
487 struct amdgpu_irq_src *source,
488 unsigned type,
489 enum amdgpu_interrupt_state state)
490{
491 switch (type) {
492 case AMDGPU_CRTC_IRQ_VBLANK1:
493 dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
494 break;
495 default:
496 break;
497 }
498 return 0;
499}
500
501static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
502 int crtc)
503{
504 if (crtc >= adev->mode_info.num_crtc) {
505 DRM_DEBUG("invalid crtc %d\n", crtc);
506 return;
507 }
508}
509
510static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
511 struct amdgpu_irq_src *source,
512 struct amdgpu_iv_entry *entry)
513{
514 unsigned crtc = 0;
515 unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
516
517 adev->ddev->vblank[crtc].count++;
518 dce_virtual_crtc_vblank_int_ack(adev, crtc);
519
520 if (amdgpu_irq_enabled(adev, source, irq_type)) {
521 drm_handle_vblank(adev->ddev, crtc);
522 }
523
524 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
525 return 0;
526}
527
528static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
529 struct amdgpu_irq_src *src,
530 unsigned type,
531 enum amdgpu_interrupt_state state)
532{
533 if (type >= adev->mode_info.num_crtc) {
534 DRM_ERROR("invalid pageflip crtc %d\n", type);
535 return -EINVAL;
536 }
537 DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
538
539 return 0;
540}
541
542static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
543 struct amdgpu_irq_src *source,
544 struct amdgpu_iv_entry *entry)
545{
546 unsigned long flags;
547 unsigned crtc_id = 0;
548 struct amdgpu_crtc *amdgpu_crtc;
549 struct amdgpu_flip_work *works;
550
551 crtc_id = 0;
552 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
553
554 if (crtc_id >= adev->mode_info.num_crtc) {
555 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
556 return -EINVAL;
557 }
558
559 /* IRQ could occur when in initial stage */
560 if (amdgpu_crtc == NULL)
561 return 0;
562
563 spin_lock_irqsave(&adev->ddev->event_lock, flags);
564 works = amdgpu_crtc->pflip_works;
565 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
566 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
567 "AMDGPU_FLIP_SUBMITTED(%d)\n",
568 amdgpu_crtc->pflip_status,
569 AMDGPU_FLIP_SUBMITTED);
570 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
571 return 0;
572 }
573
574 /* page flip completed. clean up */
575 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
576 amdgpu_crtc->pflip_works = NULL;
577
578 /* wakeup usersapce */
579 if (works->event)
580 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
581
582 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
583
584 drm_crtc_vblank_put(&amdgpu_crtc->base);
585 schedule_work(&works->unpin_work);
586
587 return 0;
588}
589
Emily Dengc6e14f42016-08-08 11:30:50 +0800590static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
Emily Denge13273d2016-08-08 11:31:37 +0800591 .set = dce_virtual_set_crtc_irq_state,
592 .process = dce_virtual_crtc_irq,
Emily Dengc6e14f42016-08-08 11:30:50 +0800593};
594
595static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
Emily Denge13273d2016-08-08 11:31:37 +0800596 .set = dce_virtual_set_pageflip_irq_state,
597 .process = dce_virtual_pageflip_irq,
Emily Dengc6e14f42016-08-08 11:30:50 +0800598};
599
600static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
601{
602 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
603 adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
604
605 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
606 adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
Emily Dengc6e14f42016-08-08 11:30:50 +0800607}
608