blob: ae801e9cec94a9016c7a3fb5cd0559e9119dfff3 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
29#include <linux/slab.h>
30#include <linux/debugfs.h>
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
33#include <drm/amdgpu_drm.h>
34#include <linux/vgaarb.h>
35#include <linux/vga_switcheroo.h>
36#include <linux/efi.h>
37#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040038#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040039#include "amdgpu_i2c.h"
40#include "atom.h"
41#include "amdgpu_atombios.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050042#include "amd_pcie.h"
Alex Deuchera2e73f52015-04-20 17:09:27 -040043#ifdef CONFIG_DRM_AMDGPU_CIK
44#include "cik.h"
45#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040046#include "vi.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040047#include "bif/bif_4_1_d.h"
48
49static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
50static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
51
52static const char *amdgpu_asic_name[] = {
53 "BONAIRE",
54 "KAVERI",
55 "KABINI",
56 "HAWAII",
57 "MULLINS",
58 "TOPAZ",
59 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080060 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040061 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040062 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040063 "POLARIS10",
64 "POLARIS11",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040065 "LAST",
66};
67
68bool amdgpu_device_is_px(struct drm_device *dev)
69{
70 struct amdgpu_device *adev = dev->dev_private;
71
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080072 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040073 return true;
74 return false;
75}
76
77/*
78 * MMIO register access helper functions.
79 */
80uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
81 bool always_indirect)
82{
Tom St Denisf4b373f2016-05-31 08:02:27 -040083 uint32_t ret;
84
Alex Deucherd38ceaf2015-04-20 16:55:21 -040085 if ((reg * 4) < adev->rmmio_size && !always_indirect)
Tom St Denisf4b373f2016-05-31 08:02:27 -040086 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -040087 else {
88 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040089
90 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
91 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
92 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
93 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040094 }
Tom St Denisf4b373f2016-05-31 08:02:27 -040095 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
96 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040097}
98
99void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
100 bool always_indirect)
101{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400102 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
103
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400104 if ((reg * 4) < adev->rmmio_size && !always_indirect)
105 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
106 else {
107 unsigned long flags;
108
109 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
110 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
111 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
112 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
113 }
114}
115
116u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
117{
118 if ((reg * 4) < adev->rio_mem_size)
119 return ioread32(adev->rio_mem + (reg * 4));
120 else {
121 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
122 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
123 }
124}
125
126void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
127{
128
129 if ((reg * 4) < adev->rio_mem_size)
130 iowrite32(v, adev->rio_mem + (reg * 4));
131 else {
132 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
133 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
134 }
135}
136
137/**
138 * amdgpu_mm_rdoorbell - read a doorbell dword
139 *
140 * @adev: amdgpu_device pointer
141 * @index: doorbell index
142 *
143 * Returns the value in the doorbell aperture at the
144 * requested doorbell index (CIK).
145 */
146u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
147{
148 if (index < adev->doorbell.num_doorbells) {
149 return readl(adev->doorbell.ptr + index);
150 } else {
151 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
152 return 0;
153 }
154}
155
156/**
157 * amdgpu_mm_wdoorbell - write a doorbell dword
158 *
159 * @adev: amdgpu_device pointer
160 * @index: doorbell index
161 * @v: value to write
162 *
163 * Writes @v to the doorbell aperture at the
164 * requested doorbell index (CIK).
165 */
166void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
167{
168 if (index < adev->doorbell.num_doorbells) {
169 writel(v, adev->doorbell.ptr + index);
170 } else {
171 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
172 }
173}
174
175/**
176 * amdgpu_invalid_rreg - dummy reg read function
177 *
178 * @adev: amdgpu device pointer
179 * @reg: offset of register
180 *
181 * Dummy register read function. Used for register blocks
182 * that certain asics don't have (all asics).
183 * Returns the value in the register.
184 */
185static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
186{
187 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
188 BUG();
189 return 0;
190}
191
192/**
193 * amdgpu_invalid_wreg - dummy reg write function
194 *
195 * @adev: amdgpu device pointer
196 * @reg: offset of register
197 * @v: value to write to the register
198 *
199 * Dummy register read function. Used for register blocks
200 * that certain asics don't have (all asics).
201 */
202static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
203{
204 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
205 reg, v);
206 BUG();
207}
208
209/**
210 * amdgpu_block_invalid_rreg - dummy reg read function
211 *
212 * @adev: amdgpu device pointer
213 * @block: offset of instance
214 * @reg: offset of register
215 *
216 * Dummy register read function. Used for register blocks
217 * that certain asics don't have (all asics).
218 * Returns the value in the register.
219 */
220static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
221 uint32_t block, uint32_t reg)
222{
223 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
224 reg, block);
225 BUG();
226 return 0;
227}
228
229/**
230 * amdgpu_block_invalid_wreg - dummy reg write function
231 *
232 * @adev: amdgpu device pointer
233 * @block: offset of instance
234 * @reg: offset of register
235 * @v: value to write to the register
236 *
237 * Dummy register read function. Used for register blocks
238 * that certain asics don't have (all asics).
239 */
240static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
241 uint32_t block,
242 uint32_t reg, uint32_t v)
243{
244 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
245 reg, block, v);
246 BUG();
247}
248
249static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
250{
251 int r;
252
253 if (adev->vram_scratch.robj == NULL) {
254 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
Alex Deucher857d9132015-08-27 00:14:16 -0400255 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
256 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
Christian König72d76682015-09-03 17:34:59 +0200257 NULL, NULL, &adev->vram_scratch.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400258 if (r) {
259 return r;
260 }
261 }
262
263 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
264 if (unlikely(r != 0))
265 return r;
266 r = amdgpu_bo_pin(adev->vram_scratch.robj,
267 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
268 if (r) {
269 amdgpu_bo_unreserve(adev->vram_scratch.robj);
270 return r;
271 }
272 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
273 (void **)&adev->vram_scratch.ptr);
274 if (r)
275 amdgpu_bo_unpin(adev->vram_scratch.robj);
276 amdgpu_bo_unreserve(adev->vram_scratch.robj);
277
278 return r;
279}
280
281static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
282{
283 int r;
284
285 if (adev->vram_scratch.robj == NULL) {
286 return;
287 }
288 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
289 if (likely(r == 0)) {
290 amdgpu_bo_kunmap(adev->vram_scratch.robj);
291 amdgpu_bo_unpin(adev->vram_scratch.robj);
292 amdgpu_bo_unreserve(adev->vram_scratch.robj);
293 }
294 amdgpu_bo_unref(&adev->vram_scratch.robj);
295}
296
297/**
298 * amdgpu_program_register_sequence - program an array of registers.
299 *
300 * @adev: amdgpu_device pointer
301 * @registers: pointer to the register array
302 * @array_size: size of the register array
303 *
304 * Programs an array or registers with and and or masks.
305 * This is a helper for setting golden registers.
306 */
307void amdgpu_program_register_sequence(struct amdgpu_device *adev,
308 const u32 *registers,
309 const u32 array_size)
310{
311 u32 tmp, reg, and_mask, or_mask;
312 int i;
313
314 if (array_size % 3)
315 return;
316
317 for (i = 0; i < array_size; i +=3) {
318 reg = registers[i + 0];
319 and_mask = registers[i + 1];
320 or_mask = registers[i + 2];
321
322 if (and_mask == 0xffffffff) {
323 tmp = or_mask;
324 } else {
325 tmp = RREG32(reg);
326 tmp &= ~and_mask;
327 tmp |= or_mask;
328 }
329 WREG32(reg, tmp);
330 }
331}
332
333void amdgpu_pci_config_reset(struct amdgpu_device *adev)
334{
335 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
336}
337
338/*
339 * GPU doorbell aperture helpers function.
340 */
341/**
342 * amdgpu_doorbell_init - Init doorbell driver information.
343 *
344 * @adev: amdgpu_device pointer
345 *
346 * Init doorbell driver information (CIK)
347 * Returns 0 on success, error on failure.
348 */
349static int amdgpu_doorbell_init(struct amdgpu_device *adev)
350{
351 /* doorbell bar mapping */
352 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
353 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
354
Christian Königedf600d2016-05-03 15:54:54 +0200355 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400356 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
357 if (adev->doorbell.num_doorbells == 0)
358 return -EINVAL;
359
360 adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
361 if (adev->doorbell.ptr == NULL) {
362 return -ENOMEM;
363 }
364 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
365 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
366
367 return 0;
368}
369
370/**
371 * amdgpu_doorbell_fini - Tear down doorbell driver information.
372 *
373 * @adev: amdgpu_device pointer
374 *
375 * Tear down doorbell driver information (CIK)
376 */
377static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
378{
379 iounmap(adev->doorbell.ptr);
380 adev->doorbell.ptr = NULL;
381}
382
383/**
384 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
385 * setup amdkfd
386 *
387 * @adev: amdgpu_device pointer
388 * @aperture_base: output returning doorbell aperture base physical address
389 * @aperture_size: output returning doorbell aperture size in bytes
390 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
391 *
392 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
393 * takes doorbells required for its own rings and reports the setup to amdkfd.
394 * amdgpu reserved doorbells are at the start of the doorbell aperture.
395 */
396void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
397 phys_addr_t *aperture_base,
398 size_t *aperture_size,
399 size_t *start_offset)
400{
401 /*
402 * The first num_doorbells are used by amdgpu.
403 * amdkfd takes whatever's left in the aperture.
404 */
405 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
406 *aperture_base = adev->doorbell.base;
407 *aperture_size = adev->doorbell.size;
408 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
409 } else {
410 *aperture_base = 0;
411 *aperture_size = 0;
412 *start_offset = 0;
413 }
414}
415
416/*
417 * amdgpu_wb_*()
418 * Writeback is the the method by which the the GPU updates special pages
419 * in memory with the status of certain GPU events (fences, ring pointers,
420 * etc.).
421 */
422
423/**
424 * amdgpu_wb_fini - Disable Writeback and free memory
425 *
426 * @adev: amdgpu_device pointer
427 *
428 * Disables Writeback and frees the Writeback memory (all asics).
429 * Used at driver shutdown.
430 */
431static void amdgpu_wb_fini(struct amdgpu_device *adev)
432{
433 if (adev->wb.wb_obj) {
434 if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
435 amdgpu_bo_kunmap(adev->wb.wb_obj);
436 amdgpu_bo_unpin(adev->wb.wb_obj);
437 amdgpu_bo_unreserve(adev->wb.wb_obj);
438 }
439 amdgpu_bo_unref(&adev->wb.wb_obj);
440 adev->wb.wb = NULL;
441 adev->wb.wb_obj = NULL;
442 }
443}
444
445/**
446 * amdgpu_wb_init- Init Writeback driver info and allocate memory
447 *
448 * @adev: amdgpu_device pointer
449 *
450 * Disables Writeback and frees the Writeback memory (all asics).
451 * Used at driver startup.
452 * Returns 0 on success or an -error on failure.
453 */
454static int amdgpu_wb_init(struct amdgpu_device *adev)
455{
456 int r;
457
458 if (adev->wb.wb_obj == NULL) {
459 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
Christian König72d76682015-09-03 17:34:59 +0200460 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
461 &adev->wb.wb_obj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400462 if (r) {
463 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
464 return r;
465 }
466 r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
467 if (unlikely(r != 0)) {
468 amdgpu_wb_fini(adev);
469 return r;
470 }
471 r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
472 &adev->wb.gpu_addr);
473 if (r) {
474 amdgpu_bo_unreserve(adev->wb.wb_obj);
475 dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
476 amdgpu_wb_fini(adev);
477 return r;
478 }
479 r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
480 amdgpu_bo_unreserve(adev->wb.wb_obj);
481 if (r) {
482 dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
483 amdgpu_wb_fini(adev);
484 return r;
485 }
486
487 adev->wb.num_wb = AMDGPU_MAX_WB;
488 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
489
490 /* clear wb memory */
491 memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
492 }
493
494 return 0;
495}
496
497/**
498 * amdgpu_wb_get - Allocate a wb entry
499 *
500 * @adev: amdgpu_device pointer
501 * @wb: wb index
502 *
503 * Allocate a wb slot for use by the driver (all asics).
504 * Returns 0 on success or -EINVAL on failure.
505 */
506int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
507{
508 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
509 if (offset < adev->wb.num_wb) {
510 __set_bit(offset, adev->wb.used);
511 *wb = offset;
512 return 0;
513 } else {
514 return -EINVAL;
515 }
516}
517
518/**
519 * amdgpu_wb_free - Free a wb entry
520 *
521 * @adev: amdgpu_device pointer
522 * @wb: wb index
523 *
524 * Free a wb slot allocated for use by the driver (all asics)
525 */
526void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
527{
528 if (wb < adev->wb.num_wb)
529 __clear_bit(wb, adev->wb.used);
530}
531
532/**
533 * amdgpu_vram_location - try to find VRAM location
534 * @adev: amdgpu device structure holding all necessary informations
535 * @mc: memory controller structure holding memory informations
536 * @base: base address at which to put VRAM
537 *
538 * Function will place try to place VRAM at base address provided
539 * as parameter (which is so far either PCI aperture address or
540 * for IGP TOM base address).
541 *
542 * If there is not enough space to fit the unvisible VRAM in the 32bits
543 * address space then we limit the VRAM size to the aperture.
544 *
545 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
546 * this shouldn't be a problem as we are using the PCI aperture as a reference.
547 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
548 * not IGP.
549 *
550 * Note: we use mc_vram_size as on some board we need to program the mc to
551 * cover the whole aperture even if VRAM size is inferior to aperture size
552 * Novell bug 204882 + along with lots of ubuntu ones
553 *
554 * Note: when limiting vram it's safe to overwritte real_vram_size because
555 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
556 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
557 * ones)
558 *
559 * Note: IGP TOM addr should be the same as the aperture addr, we don't
560 * explicitly check for that thought.
561 *
562 * FIXME: when reducing VRAM size align new size on power of 2.
563 */
564void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
565{
566 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
567
568 mc->vram_start = base;
569 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
570 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
571 mc->real_vram_size = mc->aper_size;
572 mc->mc_vram_size = mc->aper_size;
573 }
574 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
575 if (limit && limit < mc->real_vram_size)
576 mc->real_vram_size = limit;
577 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
578 mc->mc_vram_size >> 20, mc->vram_start,
579 mc->vram_end, mc->real_vram_size >> 20);
580}
581
582/**
583 * amdgpu_gtt_location - try to find GTT location
584 * @adev: amdgpu device structure holding all necessary informations
585 * @mc: memory controller structure holding memory informations
586 *
587 * Function will place try to place GTT before or after VRAM.
588 *
589 * If GTT size is bigger than space left then we ajust GTT size.
590 * Thus function will never fails.
591 *
592 * FIXME: when reducing GTT size align new size on power of 2.
593 */
594void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
595{
596 u64 size_af, size_bf;
597
598 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
599 size_bf = mc->vram_start & ~mc->gtt_base_align;
600 if (size_bf > size_af) {
601 if (mc->gtt_size > size_bf) {
602 dev_warn(adev->dev, "limiting GTT\n");
603 mc->gtt_size = size_bf;
604 }
605 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
606 } else {
607 if (mc->gtt_size > size_af) {
608 dev_warn(adev->dev, "limiting GTT\n");
609 mc->gtt_size = size_af;
610 }
611 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
612 }
613 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
614 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
615 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
616}
617
618/*
619 * GPU helpers function.
620 */
621/**
622 * amdgpu_card_posted - check if the hw has already been initialized
623 *
624 * @adev: amdgpu_device pointer
625 *
626 * Check if the asic has been initialized (all asics).
627 * Used at driver startup.
628 * Returns true if initialized or false if not.
629 */
630bool amdgpu_card_posted(struct amdgpu_device *adev)
631{
632 uint32_t reg;
633
634 /* then check MEM_SIZE, in case the crtcs are off */
635 reg = RREG32(mmCONFIG_MEMSIZE);
636
637 if (reg)
638 return true;
639
640 return false;
641
642}
643
644/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400645 * amdgpu_dummy_page_init - init dummy page used by the driver
646 *
647 * @adev: amdgpu_device pointer
648 *
649 * Allocate the dummy page used by the driver (all asics).
650 * This dummy page is used by the driver as a filler for gart entries
651 * when pages are taken out of the GART
652 * Returns 0 on sucess, -ENOMEM on failure.
653 */
654int amdgpu_dummy_page_init(struct amdgpu_device *adev)
655{
656 if (adev->dummy_page.page)
657 return 0;
658 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
659 if (adev->dummy_page.page == NULL)
660 return -ENOMEM;
661 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
662 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
663 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
664 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
665 __free_page(adev->dummy_page.page);
666 adev->dummy_page.page = NULL;
667 return -ENOMEM;
668 }
669 return 0;
670}
671
672/**
673 * amdgpu_dummy_page_fini - free dummy page used by the driver
674 *
675 * @adev: amdgpu_device pointer
676 *
677 * Frees the dummy page used by the driver (all asics).
678 */
679void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
680{
681 if (adev->dummy_page.page == NULL)
682 return;
683 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
684 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
685 __free_page(adev->dummy_page.page);
686 adev->dummy_page.page = NULL;
687}
688
689
690/* ATOM accessor methods */
691/*
692 * ATOM is an interpreted byte code stored in tables in the vbios. The
693 * driver registers callbacks to access registers and the interpreter
694 * in the driver parses the tables and executes then to program specific
695 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
696 * atombios.h, and atom.c
697 */
698
699/**
700 * cail_pll_read - read PLL register
701 *
702 * @info: atom card_info pointer
703 * @reg: PLL register offset
704 *
705 * Provides a PLL register accessor for the atom interpreter (r4xx+).
706 * Returns the value of the PLL register.
707 */
708static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
709{
710 return 0;
711}
712
713/**
714 * cail_pll_write - write PLL register
715 *
716 * @info: atom card_info pointer
717 * @reg: PLL register offset
718 * @val: value to write to the pll register
719 *
720 * Provides a PLL register accessor for the atom interpreter (r4xx+).
721 */
722static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
723{
724
725}
726
727/**
728 * cail_mc_read - read MC (Memory Controller) register
729 *
730 * @info: atom card_info pointer
731 * @reg: MC register offset
732 *
733 * Provides an MC register accessor for the atom interpreter (r4xx+).
734 * Returns the value of the MC register.
735 */
736static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
737{
738 return 0;
739}
740
741/**
742 * cail_mc_write - write MC (Memory Controller) register
743 *
744 * @info: atom card_info pointer
745 * @reg: MC register offset
746 * @val: value to write to the pll register
747 *
748 * Provides a MC register accessor for the atom interpreter (r4xx+).
749 */
750static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
751{
752
753}
754
755/**
756 * cail_reg_write - write MMIO register
757 *
758 * @info: atom card_info pointer
759 * @reg: MMIO register offset
760 * @val: value to write to the pll register
761 *
762 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
763 */
764static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
765{
766 struct amdgpu_device *adev = info->dev->dev_private;
767
768 WREG32(reg, val);
769}
770
771/**
772 * cail_reg_read - read MMIO register
773 *
774 * @info: atom card_info pointer
775 * @reg: MMIO register offset
776 *
777 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
778 * Returns the value of the MMIO register.
779 */
780static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
781{
782 struct amdgpu_device *adev = info->dev->dev_private;
783 uint32_t r;
784
785 r = RREG32(reg);
786 return r;
787}
788
789/**
790 * cail_ioreg_write - write IO register
791 *
792 * @info: atom card_info pointer
793 * @reg: IO register offset
794 * @val: value to write to the pll register
795 *
796 * Provides a IO register accessor for the atom interpreter (r4xx+).
797 */
798static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
799{
800 struct amdgpu_device *adev = info->dev->dev_private;
801
802 WREG32_IO(reg, val);
803}
804
805/**
806 * cail_ioreg_read - read IO register
807 *
808 * @info: atom card_info pointer
809 * @reg: IO register offset
810 *
811 * Provides an IO register accessor for the atom interpreter (r4xx+).
812 * Returns the value of the IO register.
813 */
814static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
815{
816 struct amdgpu_device *adev = info->dev->dev_private;
817 uint32_t r;
818
819 r = RREG32_IO(reg);
820 return r;
821}
822
823/**
824 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
825 *
826 * @adev: amdgpu_device pointer
827 *
828 * Frees the driver info and register access callbacks for the ATOM
829 * interpreter (r4xx+).
830 * Called at driver shutdown.
831 */
832static void amdgpu_atombios_fini(struct amdgpu_device *adev)
833{
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800834 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400835 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800836 kfree(adev->mode_info.atom_context->iio);
837 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400838 kfree(adev->mode_info.atom_context);
839 adev->mode_info.atom_context = NULL;
840 kfree(adev->mode_info.atom_card_info);
841 adev->mode_info.atom_card_info = NULL;
842}
843
844/**
845 * amdgpu_atombios_init - init the driver info and callbacks for atombios
846 *
847 * @adev: amdgpu_device pointer
848 *
849 * Initializes the driver info and register access callbacks for the
850 * ATOM interpreter (r4xx+).
851 * Returns 0 on sucess, -ENOMEM on failure.
852 * Called at driver startup.
853 */
854static int amdgpu_atombios_init(struct amdgpu_device *adev)
855{
856 struct card_info *atom_card_info =
857 kzalloc(sizeof(struct card_info), GFP_KERNEL);
858
859 if (!atom_card_info)
860 return -ENOMEM;
861
862 adev->mode_info.atom_card_info = atom_card_info;
863 atom_card_info->dev = adev->ddev;
864 atom_card_info->reg_read = cail_reg_read;
865 atom_card_info->reg_write = cail_reg_write;
866 /* needed for iio ops */
867 if (adev->rio_mem) {
868 atom_card_info->ioreg_read = cail_ioreg_read;
869 atom_card_info->ioreg_write = cail_ioreg_write;
870 } else {
871 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
872 atom_card_info->ioreg_read = cail_reg_read;
873 atom_card_info->ioreg_write = cail_reg_write;
874 }
875 atom_card_info->mc_read = cail_mc_read;
876 atom_card_info->mc_write = cail_mc_write;
877 atom_card_info->pll_read = cail_pll_read;
878 atom_card_info->pll_write = cail_pll_write;
879
880 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
881 if (!adev->mode_info.atom_context) {
882 amdgpu_atombios_fini(adev);
883 return -ENOMEM;
884 }
885
886 mutex_init(&adev->mode_info.atom_context->mutex);
887 amdgpu_atombios_scratch_regs_init(adev);
888 amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
889 return 0;
890}
891
892/* if we get transitioned to only one device, take VGA back */
893/**
894 * amdgpu_vga_set_decode - enable/disable vga decode
895 *
896 * @cookie: amdgpu_device pointer
897 * @state: enable/disable vga decode
898 *
899 * Enable/disable vga decode (all asics).
900 * Returns VGA resource flags.
901 */
902static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
903{
904 struct amdgpu_device *adev = cookie;
905 amdgpu_asic_set_vga_state(adev, state);
906 if (state)
907 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
908 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
909 else
910 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
911}
912
913/**
914 * amdgpu_check_pot_argument - check that argument is a power of two
915 *
916 * @arg: value to check
917 *
918 * Validates that a certain argument is a power of two (all asics).
919 * Returns true if argument is valid.
920 */
921static bool amdgpu_check_pot_argument(int arg)
922{
923 return (arg & (arg - 1)) == 0;
924}
925
926/**
927 * amdgpu_check_arguments - validate module params
928 *
929 * @adev: amdgpu_device pointer
930 *
931 * Validates certain module parameters and updates
932 * the associated values used by the driver (all asics).
933 */
934static void amdgpu_check_arguments(struct amdgpu_device *adev)
935{
Chunming Zhou5b011232015-12-10 17:34:33 +0800936 if (amdgpu_sched_jobs < 4) {
937 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
938 amdgpu_sched_jobs);
939 amdgpu_sched_jobs = 4;
940 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
941 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
942 amdgpu_sched_jobs);
943 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
944 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400945
946 if (amdgpu_gart_size != -1) {
Christian Königc4e1a132016-03-17 16:25:15 +0100947 /* gtt size must be greater or equal to 32M */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400948 if (amdgpu_gart_size < 32) {
949 dev_warn(adev->dev, "gart size (%d) too small\n",
950 amdgpu_gart_size);
951 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400952 }
953 }
954
955 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
956 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
957 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -0400958 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400959 }
960
961 if (amdgpu_vm_size < 1) {
962 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
963 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -0400964 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400965 }
966
967 /*
968 * Max GPUVM size for Cayman, SI and CI are 40 bits.
969 */
970 if (amdgpu_vm_size > 1024) {
971 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
972 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -0400973 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400974 }
975
976 /* defines number of bits in page table versus page directory,
977 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
978 * page table and the remaining bits are in the page directory */
979 if (amdgpu_vm_block_size == -1) {
980
981 /* Total bits covered by PD + PTs */
982 unsigned bits = ilog2(amdgpu_vm_size) + 18;
983
984 /* Make sure the PD is 4K in size up to 8GB address space.
985 Above that split equal between PD and PTs */
986 if (amdgpu_vm_size <= 8)
987 amdgpu_vm_block_size = bits - 9;
988 else
989 amdgpu_vm_block_size = (bits + 3) / 2;
990
991 } else if (amdgpu_vm_block_size < 9) {
992 dev_warn(adev->dev, "VM page table size (%d) too small\n",
993 amdgpu_vm_block_size);
994 amdgpu_vm_block_size = 9;
995 }
996
997 if (amdgpu_vm_block_size > 24 ||
998 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
999 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1000 amdgpu_vm_block_size);
1001 amdgpu_vm_block_size = 9;
1002 }
1003}
1004
1005/**
1006 * amdgpu_switcheroo_set_state - set switcheroo state
1007 *
1008 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001009 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001010 *
1011 * Callback for the switcheroo driver. Suspends or resumes the
1012 * the asics before or after it is powered up using ACPI methods.
1013 */
1014static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1015{
1016 struct drm_device *dev = pci_get_drvdata(pdev);
1017
1018 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1019 return;
1020
1021 if (state == VGA_SWITCHEROO_ON) {
1022 unsigned d3_delay = dev->pdev->d3_delay;
1023
1024 printk(KERN_INFO "amdgpu: switched on\n");
1025 /* don't suspend or resume card normally */
1026 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1027
1028 amdgpu_resume_kms(dev, true, true);
1029
1030 dev->pdev->d3_delay = d3_delay;
1031
1032 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1033 drm_kms_helper_poll_enable(dev);
1034 } else {
1035 printk(KERN_INFO "amdgpu: switched off\n");
1036 drm_kms_helper_poll_disable(dev);
1037 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1038 amdgpu_suspend_kms(dev, true, true);
1039 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1040 }
1041}
1042
1043/**
1044 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1045 *
1046 * @pdev: pci dev pointer
1047 *
1048 * Callback for the switcheroo driver. Check of the switcheroo
1049 * state can be changed.
1050 * Returns true if the state can be changed, false if not.
1051 */
1052static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1053{
1054 struct drm_device *dev = pci_get_drvdata(pdev);
1055
1056 /*
1057 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1058 * locking inversion with the driver load path. And the access here is
1059 * completely racy anyway. So don't bother with locking for now.
1060 */
1061 return dev->open_count == 0;
1062}
1063
1064static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1065 .set_gpu_state = amdgpu_switcheroo_set_state,
1066 .reprobe = NULL,
1067 .can_switch = amdgpu_switcheroo_can_switch,
1068};
1069
1070int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001071 enum amd_ip_block_type block_type,
1072 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001073{
1074 int i, r = 0;
1075
1076 for (i = 0; i < adev->num_ip_blocks; i++) {
1077 if (adev->ip_blocks[i].type == block_type) {
yanyang15fc3aee2015-05-22 14:39:35 -04001078 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001079 state);
1080 if (r)
1081 return r;
1082 }
1083 }
1084 return r;
1085}
1086
1087int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001088 enum amd_ip_block_type block_type,
1089 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001090{
1091 int i, r = 0;
1092
1093 for (i = 0; i < adev->num_ip_blocks; i++) {
1094 if (adev->ip_blocks[i].type == block_type) {
yanyang15fc3aee2015-05-22 14:39:35 -04001095 r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001096 state);
1097 if (r)
1098 return r;
1099 }
1100 }
1101 return r;
1102}
1103
1104const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
1105 struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001106 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001107{
1108 int i;
1109
1110 for (i = 0; i < adev->num_ip_blocks; i++)
1111 if (adev->ip_blocks[i].type == type)
1112 return &adev->ip_blocks[i];
1113
1114 return NULL;
1115}
1116
1117/**
1118 * amdgpu_ip_block_version_cmp
1119 *
1120 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001121 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001122 * @major: major version
1123 * @minor: minor version
1124 *
1125 * return 0 if equal or greater
1126 * return 1 if smaller or the ip_block doesn't exist
1127 */
1128int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001129 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001130 u32 major, u32 minor)
1131{
1132 const struct amdgpu_ip_block_version *ip_block;
1133 ip_block = amdgpu_get_ip_block(adev, type);
1134
1135 if (ip_block && ((ip_block->major > major) ||
1136 ((ip_block->major == major) &&
1137 (ip_block->minor >= minor))))
1138 return 0;
1139
1140 return 1;
1141}
1142
1143static int amdgpu_early_init(struct amdgpu_device *adev)
1144{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001145 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001146
1147 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001148 case CHIP_TOPAZ:
1149 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001150 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001151 case CHIP_POLARIS11:
1152 case CHIP_POLARIS10:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001153 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001154 case CHIP_STONEY:
1155 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001156 adev->family = AMDGPU_FAMILY_CZ;
1157 else
1158 adev->family = AMDGPU_FAMILY_VI;
1159
1160 r = vi_set_ip_blocks(adev);
1161 if (r)
1162 return r;
1163 break;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001164#ifdef CONFIG_DRM_AMDGPU_CIK
1165 case CHIP_BONAIRE:
1166 case CHIP_HAWAII:
1167 case CHIP_KAVERI:
1168 case CHIP_KABINI:
1169 case CHIP_MULLINS:
1170 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1171 adev->family = AMDGPU_FAMILY_CI;
1172 else
1173 adev->family = AMDGPU_FAMILY_KV;
1174
1175 r = cik_set_ip_blocks(adev);
1176 if (r)
1177 return r;
1178 break;
1179#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001180 default:
1181 /* FIXME: not supported yet */
1182 return -EINVAL;
1183 }
1184
Alex Deucher8faf0e02015-07-28 11:50:31 -04001185 adev->ip_block_status = kcalloc(adev->num_ip_blocks,
1186 sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
1187 if (adev->ip_block_status == NULL)
Alex Deucherd8d090b2015-06-26 13:02:57 -04001188 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001189
1190 if (adev->ip_blocks == NULL) {
1191 DRM_ERROR("No IP blocks found!\n");
1192 return r;
1193 }
1194
1195 for (i = 0; i < adev->num_ip_blocks; i++) {
1196 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1197 DRM_ERROR("disabled ip block: %d\n", i);
Alex Deucher8faf0e02015-07-28 11:50:31 -04001198 adev->ip_block_status[i].valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001199 } else {
1200 if (adev->ip_blocks[i].funcs->early_init) {
yanyang15fc3aee2015-05-22 14:39:35 -04001201 r = adev->ip_blocks[i].funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001202 if (r == -ENOENT) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001203 adev->ip_block_status[i].valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001204 } else if (r) {
Tom St Denis88a907d2016-05-04 14:28:35 -04001205 DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001206 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001207 } else {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001208 adev->ip_block_status[i].valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001209 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001210 } else {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001211 adev->ip_block_status[i].valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001212 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001213 }
1214 }
1215
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001216 adev->cg_flags &= amdgpu_cg_mask;
1217 adev->pg_flags &= amdgpu_pg_mask;
1218
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001219 return 0;
1220}
1221
1222static int amdgpu_init(struct amdgpu_device *adev)
1223{
1224 int i, r;
1225
1226 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001227 if (!adev->ip_block_status[i].valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001228 continue;
yanyang15fc3aee2015-05-22 14:39:35 -04001229 r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001230 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001231 DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001232 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001233 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001234 adev->ip_block_status[i].sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001235 /* need to do gmc hw init early so we can allocate gpu mem */
yanyang15fc3aee2015-05-22 14:39:35 -04001236 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001237 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001238 if (r) {
1239 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001240 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001241 }
yanyang15fc3aee2015-05-22 14:39:35 -04001242 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001243 if (r) {
1244 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001245 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001246 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001247 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001248 if (r) {
1249 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001250 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001251 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001252 adev->ip_block_status[i].hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001253 }
1254 }
1255
1256 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001257 if (!adev->ip_block_status[i].sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001258 continue;
1259 /* gmc hw init is done early */
yanyang15fc3aee2015-05-22 14:39:35 -04001260 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001261 continue;
yanyang15fc3aee2015-05-22 14:39:35 -04001262 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001263 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001264 DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001265 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001266 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001267 adev->ip_block_status[i].hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001268 }
1269
1270 return 0;
1271}
1272
1273static int amdgpu_late_init(struct amdgpu_device *adev)
1274{
1275 int i = 0, r;
1276
1277 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001278 if (!adev->ip_block_status[i].valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001279 continue;
1280 /* enable clockgating to save power */
yanyang15fc3aee2015-05-22 14:39:35 -04001281 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1282 AMD_CG_STATE_GATE);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001283 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001284 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001285 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001286 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001287 if (adev->ip_blocks[i].funcs->late_init) {
yanyang15fc3aee2015-05-22 14:39:35 -04001288 r = adev->ip_blocks[i].funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001289 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001290 DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001291 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001292 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001293 }
1294 }
1295
1296 return 0;
1297}
1298
1299static int amdgpu_fini(struct amdgpu_device *adev)
1300{
1301 int i, r;
1302
1303 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001304 if (!adev->ip_block_status[i].hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001305 continue;
yanyang15fc3aee2015-05-22 14:39:35 -04001306 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001307 amdgpu_wb_fini(adev);
1308 amdgpu_vram_scratch_fini(adev);
1309 }
1310 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
yanyang15fc3aee2015-05-22 14:39:35 -04001311 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1312 AMD_CG_STATE_UNGATE);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001313 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001314 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001315 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001316 }
yanyang15fc3aee2015-05-22 14:39:35 -04001317 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001318 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001319 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001320 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001321 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001322 adev->ip_block_status[i].hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001323 }
1324
1325 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001326 if (!adev->ip_block_status[i].sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001327 continue;
yanyang15fc3aee2015-05-22 14:39:35 -04001328 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001329 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001330 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001331 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001332 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001333 adev->ip_block_status[i].sw = false;
1334 adev->ip_block_status[i].valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001335 }
1336
Monk Liua6dcfd92016-05-19 14:36:34 +08001337 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1338 if (adev->ip_blocks[i].funcs->late_fini)
1339 adev->ip_blocks[i].funcs->late_fini((void *)adev);
1340 }
1341
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001342 return 0;
1343}
1344
1345static int amdgpu_suspend(struct amdgpu_device *adev)
1346{
1347 int i, r;
1348
Flora Cuic5a93a22016-02-26 10:45:25 +08001349 /* ungate SMC block first */
1350 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1351 AMD_CG_STATE_UNGATE);
1352 if (r) {
1353 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1354 }
1355
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001356 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001357 if (!adev->ip_block_status[i].valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001358 continue;
1359 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001360 if (i != AMD_IP_BLOCK_TYPE_SMC) {
1361 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1362 AMD_CG_STATE_UNGATE);
1363 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001364 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001365 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001366 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001367 /* XXX handle errors */
1368 r = adev->ip_blocks[i].funcs->suspend(adev);
1369 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001370 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001371 DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001372 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001373 }
1374
1375 return 0;
1376}
1377
1378static int amdgpu_resume(struct amdgpu_device *adev)
1379{
1380 int i, r;
1381
1382 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001383 if (!adev->ip_block_status[i].valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001384 continue;
1385 r = adev->ip_blocks[i].funcs->resume(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001386 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001387 DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001388 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001389 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001390 }
1391
1392 return 0;
1393}
1394
Andres Rodriguez048765a2016-06-11 02:51:32 -04001395static bool amdgpu_device_is_virtual(void)
1396{
1397#ifdef CONFIG_X86
1398 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
1399#else
1400 return false;
1401#endif
1402}
1403
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001404/**
1405 * amdgpu_device_init - initialize the driver
1406 *
1407 * @adev: amdgpu_device pointer
1408 * @pdev: drm dev pointer
1409 * @pdev: pci dev pointer
1410 * @flags: driver flags
1411 *
1412 * Initializes the driver info and hw (all asics).
1413 * Returns 0 for success or an error on failure.
1414 * Called at driver startup.
1415 */
1416int amdgpu_device_init(struct amdgpu_device *adev,
1417 struct drm_device *ddev,
1418 struct pci_dev *pdev,
1419 uint32_t flags)
1420{
1421 int r, i;
1422 bool runtime = false;
1423
1424 adev->shutdown = false;
1425 adev->dev = &pdev->dev;
1426 adev->ddev = ddev;
1427 adev->pdev = pdev;
1428 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08001429 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001430 adev->is_atom_bios = false;
1431 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1432 adev->mc.gtt_size = 512 * 1024 * 1024;
1433 adev->accel_working = false;
1434 adev->num_rings = 0;
1435 adev->mman.buffer_funcs = NULL;
1436 adev->mman.buffer_funcs_ring = NULL;
1437 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01001438 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001439 adev->gart.gart_funcs = NULL;
1440 adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
1441
1442 adev->smc_rreg = &amdgpu_invalid_rreg;
1443 adev->smc_wreg = &amdgpu_invalid_wreg;
1444 adev->pcie_rreg = &amdgpu_invalid_rreg;
1445 adev->pcie_wreg = &amdgpu_invalid_wreg;
1446 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1447 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1448 adev->didt_rreg = &amdgpu_invalid_rreg;
1449 adev->didt_wreg = &amdgpu_invalid_wreg;
1450 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1451 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1452
Alex Deucher3e39ab92015-06-05 15:04:33 -04001453 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1454 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1455 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001456
1457 /* mutex initialization are all done here so we
1458 * can recall function without having locking issues */
Christian König8d0a7ce2015-11-03 20:58:50 +01001459 mutex_init(&adev->vm_manager.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001460 atomic_set(&adev->irq.ih.lock, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001461 mutex_init(&adev->pm.mutex);
1462 mutex_init(&adev->gfx.gpu_clock_mutex);
1463 mutex_init(&adev->srbm_mutex);
1464 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001465 mutex_init(&adev->mn_lock);
1466 hash_init(adev->mn_hash);
1467
1468 amdgpu_check_arguments(adev);
1469
1470 /* Registers mapping */
1471 /* TODO: block userspace mapping of io register */
1472 spin_lock_init(&adev->mmio_idx_lock);
1473 spin_lock_init(&adev->smc_idx_lock);
1474 spin_lock_init(&adev->pcie_idx_lock);
1475 spin_lock_init(&adev->uvd_ctx_idx_lock);
1476 spin_lock_init(&adev->didt_idx_lock);
1477 spin_lock_init(&adev->audio_endpt_idx_lock);
1478
1479 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1480 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1481 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1482 if (adev->rmmio == NULL) {
1483 return -ENOMEM;
1484 }
1485 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1486 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1487
1488 /* doorbell bar mapping */
1489 amdgpu_doorbell_init(adev);
1490
1491 /* io port mapping */
1492 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1493 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1494 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1495 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1496 break;
1497 }
1498 }
1499 if (adev->rio_mem == NULL)
1500 DRM_ERROR("Unable to find PCI I/O BAR\n");
1501
1502 /* early init functions */
1503 r = amdgpu_early_init(adev);
1504 if (r)
1505 return r;
1506
1507 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1508 /* this will fail for cards that aren't VGA class devices, just
1509 * ignore it */
1510 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1511
1512 if (amdgpu_runtime_pm == 1)
1513 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04001514 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001515 runtime = true;
1516 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1517 if (runtime)
1518 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1519
1520 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04001521 if (!amdgpu_get_bios(adev)) {
1522 r = -EINVAL;
1523 goto failed;
1524 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001525 /* Must be an ATOMBIOS */
1526 if (!adev->is_atom_bios) {
1527 dev_err(adev->dev, "Expecting atombios for GPU\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001528 r = -EINVAL;
1529 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001530 }
1531 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001532 if (r) {
1533 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001534 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001535 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001536
Alex Deucher7e471e62016-02-01 11:13:04 -05001537 /* See if the asic supports SR-IOV */
1538 adev->virtualization.supports_sr_iov =
1539 amdgpu_atombios_has_gpu_virtualization_table(adev);
1540
Andres Rodriguez048765a2016-06-11 02:51:32 -04001541 /* Check if we are executing in a virtualized environment */
1542 adev->virtualization.is_virtual = amdgpu_device_is_virtual();
1543 adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
1544
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001545 /* Post card if necessary */
Andres Rodriguez048765a2016-06-11 02:51:32 -04001546 if (!amdgpu_card_posted(adev) ||
1547 (adev->virtualization.is_virtual &&
Dan Carpenter48a70e12016-06-18 11:38:44 +03001548 !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001549 if (!adev->bios) {
1550 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001551 r = -EINVAL;
1552 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001553 }
1554 DRM_INFO("GPU not posted. posting now...\n");
1555 amdgpu_atom_asic_init(adev->mode_info.atom_context);
1556 }
1557
1558 /* Initialize clocks */
1559 r = amdgpu_atombios_get_clock_info(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001560 if (r) {
1561 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001562 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001563 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001564 /* init i2c buses */
1565 amdgpu_atombios_i2c_init(adev);
1566
1567 /* Fence driver */
1568 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001569 if (r) {
1570 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001571 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001572 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001573
1574 /* init the mode config */
1575 drm_mode_config_init(adev->ddev);
1576
1577 r = amdgpu_init(adev);
1578 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05001579 dev_err(adev->dev, "amdgpu_init failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001580 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04001581 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001582 }
1583
1584 adev->accel_working = true;
1585
1586 amdgpu_fbdev_init(adev);
1587
1588 r = amdgpu_ib_pool_init(adev);
1589 if (r) {
1590 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Alex Deucher83ba1262016-06-03 18:21:41 -04001591 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001592 }
1593
1594 r = amdgpu_ib_ring_tests(adev);
1595 if (r)
1596 DRM_ERROR("ib ring test failed (%d).\n", r);
1597
1598 r = amdgpu_gem_debugfs_init(adev);
1599 if (r) {
1600 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1601 }
1602
1603 r = amdgpu_debugfs_regs_init(adev);
1604 if (r) {
1605 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1606 }
1607
Huang Rui50ab2532016-06-12 15:51:09 +08001608 r = amdgpu_debugfs_firmware_init(adev);
1609 if (r) {
1610 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1611 return r;
1612 }
1613
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001614 if ((amdgpu_testing & 1)) {
1615 if (adev->accel_working)
1616 amdgpu_test_moves(adev);
1617 else
1618 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
1619 }
1620 if ((amdgpu_testing & 2)) {
1621 if (adev->accel_working)
1622 amdgpu_test_syncing(adev);
1623 else
1624 DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
1625 }
1626 if (amdgpu_benchmarking) {
1627 if (adev->accel_working)
1628 amdgpu_benchmark(adev, amdgpu_benchmarking);
1629 else
1630 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
1631 }
1632
1633 /* enable clockgating, etc. after ib tests, etc. since some blocks require
1634 * explicit gating rather than handling it automatically.
1635 */
1636 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001637 if (r) {
1638 dev_err(adev->dev, "amdgpu_late_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001639 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001640 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001641
1642 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04001643
1644failed:
1645 if (runtime)
1646 vga_switcheroo_fini_domain_pm_ops(adev->dev);
1647 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001648}
1649
1650static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
1651
1652/**
1653 * amdgpu_device_fini - tear down the driver
1654 *
1655 * @adev: amdgpu_device pointer
1656 *
1657 * Tear down the driver info (all asics).
1658 * Called at driver shutdown.
1659 */
1660void amdgpu_device_fini(struct amdgpu_device *adev)
1661{
1662 int r;
1663
1664 DRM_INFO("amdgpu: finishing device.\n");
1665 adev->shutdown = true;
1666 /* evict vram memory */
1667 amdgpu_bo_evict_vram(adev);
1668 amdgpu_ib_pool_fini(adev);
1669 amdgpu_fence_driver_fini(adev);
1670 amdgpu_fbdev_fini(adev);
1671 r = amdgpu_fini(adev);
Alex Deucher8faf0e02015-07-28 11:50:31 -04001672 kfree(adev->ip_block_status);
1673 adev->ip_block_status = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001674 adev->accel_working = false;
1675 /* free i2c buses */
1676 amdgpu_i2c_fini(adev);
1677 amdgpu_atombios_fini(adev);
1678 kfree(adev->bios);
1679 adev->bios = NULL;
1680 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04001681 if (adev->flags & AMD_IS_PX)
1682 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001683 vga_client_register(adev->pdev, NULL, NULL, NULL);
1684 if (adev->rio_mem)
1685 pci_iounmap(adev->pdev, adev->rio_mem);
1686 adev->rio_mem = NULL;
1687 iounmap(adev->rmmio);
1688 adev->rmmio = NULL;
1689 amdgpu_doorbell_fini(adev);
1690 amdgpu_debugfs_regs_cleanup(adev);
1691 amdgpu_debugfs_remove_files(adev);
1692}
1693
1694
1695/*
1696 * Suspend & resume.
1697 */
1698/**
1699 * amdgpu_suspend_kms - initiate device suspend
1700 *
1701 * @pdev: drm dev pointer
1702 * @state: suspend state
1703 *
1704 * Puts the hw in the suspend state (all asics).
1705 * Returns 0 for success or an error on failure.
1706 * Called at driver suspend.
1707 */
1708int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1709{
1710 struct amdgpu_device *adev;
1711 struct drm_crtc *crtc;
1712 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04001713 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001714
1715 if (dev == NULL || dev->dev_private == NULL) {
1716 return -ENODEV;
1717 }
1718
1719 adev = dev->dev_private;
1720
1721 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1722 return 0;
1723
1724 drm_kms_helper_poll_disable(dev);
1725
1726 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04001727 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001728 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1729 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1730 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04001731 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001732
Alex Deucher756e6882015-10-08 00:03:36 -04001733 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001734 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04001735 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001736 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
1737 struct amdgpu_bo *robj;
1738
Alex Deucher756e6882015-10-08 00:03:36 -04001739 if (amdgpu_crtc->cursor_bo) {
1740 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1741 r = amdgpu_bo_reserve(aobj, false);
1742 if (r == 0) {
1743 amdgpu_bo_unpin(aobj);
1744 amdgpu_bo_unreserve(aobj);
1745 }
1746 }
1747
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001748 if (rfb == NULL || rfb->obj == NULL) {
1749 continue;
1750 }
1751 robj = gem_to_amdgpu_bo(rfb->obj);
1752 /* don't unpin kernel fb objects */
1753 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
1754 r = amdgpu_bo_reserve(robj, false);
1755 if (r == 0) {
1756 amdgpu_bo_unpin(robj);
1757 amdgpu_bo_unreserve(robj);
1758 }
1759 }
1760 }
1761 /* evict vram memory */
1762 amdgpu_bo_evict_vram(adev);
1763
Alex Deucher5ceb54c2015-08-05 12:41:48 -04001764 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001765
1766 r = amdgpu_suspend(adev);
1767
1768 /* evict remaining vram memory */
1769 amdgpu_bo_evict_vram(adev);
1770
1771 pci_save_state(dev->pdev);
1772 if (suspend) {
1773 /* Shut down the device */
1774 pci_disable_device(dev->pdev);
1775 pci_set_power_state(dev->pdev, PCI_D3hot);
1776 }
1777
1778 if (fbcon) {
1779 console_lock();
1780 amdgpu_fbdev_set_suspend(adev, 1);
1781 console_unlock();
1782 }
1783 return 0;
1784}
1785
1786/**
1787 * amdgpu_resume_kms - initiate device resume
1788 *
1789 * @pdev: drm dev pointer
1790 *
1791 * Bring the hw back to operating state (all asics).
1792 * Returns 0 for success or an error on failure.
1793 * Called at driver resume.
1794 */
1795int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1796{
1797 struct drm_connector *connector;
1798 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04001799 struct drm_crtc *crtc;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001800 int r;
1801
1802 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1803 return 0;
1804
1805 if (fbcon) {
1806 console_lock();
1807 }
1808 if (resume) {
1809 pci_set_power_state(dev->pdev, PCI_D0);
1810 pci_restore_state(dev->pdev);
1811 if (pci_enable_device(dev->pdev)) {
1812 if (fbcon)
1813 console_unlock();
1814 return -1;
1815 }
1816 }
1817
1818 /* post card */
Flora Cuica198522016-02-04 15:10:08 +08001819 if (!amdgpu_card_posted(adev))
1820 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001821
1822 r = amdgpu_resume(adev);
Flora Cuica198522016-02-04 15:10:08 +08001823 if (r)
1824 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001825
Alex Deucher5ceb54c2015-08-05 12:41:48 -04001826 amdgpu_fence_driver_resume(adev);
1827
Flora Cuica198522016-02-04 15:10:08 +08001828 if (resume) {
1829 r = amdgpu_ib_ring_tests(adev);
1830 if (r)
1831 DRM_ERROR("ib ring test failed (%d).\n", r);
1832 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001833
1834 r = amdgpu_late_init(adev);
1835 if (r)
1836 return r;
1837
Alex Deucher756e6882015-10-08 00:03:36 -04001838 /* pin cursors */
1839 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1840 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1841
1842 if (amdgpu_crtc->cursor_bo) {
1843 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1844 r = amdgpu_bo_reserve(aobj, false);
1845 if (r == 0) {
1846 r = amdgpu_bo_pin(aobj,
1847 AMDGPU_GEM_DOMAIN_VRAM,
1848 &amdgpu_crtc->cursor_addr);
1849 if (r != 0)
1850 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1851 amdgpu_bo_unreserve(aobj);
1852 }
1853 }
1854 }
1855
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001856 /* blat the mode back in */
1857 if (fbcon) {
1858 drm_helper_resume_force_mode(dev);
1859 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04001860 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001861 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1862 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1863 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04001864 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001865 }
1866
1867 drm_kms_helper_poll_enable(dev);
Alex Deucher54fb2a52015-11-24 14:30:56 -05001868 drm_helper_hpd_irq_event(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001869
1870 if (fbcon) {
1871 amdgpu_fbdev_set_suspend(adev, 0);
1872 console_unlock();
1873 }
1874
1875 return 0;
1876}
1877
1878/**
1879 * amdgpu_gpu_reset - reset the asic
1880 *
1881 * @adev: amdgpu device pointer
1882 *
1883 * Attempt the reset the GPU if it has hung (all asics).
1884 * Returns 0 for success or an error on failure.
1885 */
1886int amdgpu_gpu_reset(struct amdgpu_device *adev)
1887{
1888 unsigned ring_sizes[AMDGPU_MAX_RINGS];
1889 uint32_t *ring_data[AMDGPU_MAX_RINGS];
1890
1891 bool saved = false;
1892
1893 int i, r;
1894 int resched;
1895
Marek Olšákd94aed52015-05-05 21:13:49 +02001896 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001897
1898 /* block TTM */
1899 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
1900
1901 r = amdgpu_suspend(adev);
1902
1903 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1904 struct amdgpu_ring *ring = adev->rings[i];
1905 if (!ring)
1906 continue;
1907
1908 ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]);
1909 if (ring_sizes[i]) {
1910 saved = true;
1911 dev_info(adev->dev, "Saved %d dwords of commands "
1912 "on ring %d.\n", ring_sizes[i], i);
1913 }
1914 }
1915
1916retry:
1917 r = amdgpu_asic_reset(adev);
Alex Deucherbfa99262016-01-15 11:59:48 -05001918 /* post card */
1919 amdgpu_atom_asic_init(adev->mode_info.atom_context);
1920
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001921 if (!r) {
1922 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
1923 r = amdgpu_resume(adev);
1924 }
1925
1926 if (!r) {
1927 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1928 struct amdgpu_ring *ring = adev->rings[i];
1929 if (!ring)
1930 continue;
1931
1932 amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
1933 ring_sizes[i] = 0;
1934 ring_data[i] = NULL;
1935 }
1936
1937 r = amdgpu_ib_ring_tests(adev);
1938 if (r) {
1939 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
1940 if (saved) {
1941 saved = false;
1942 r = amdgpu_suspend(adev);
1943 goto retry;
1944 }
1945 }
1946 } else {
1947 amdgpu_fence_driver_force_completion(adev);
1948 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1949 if (adev->rings[i])
1950 kfree(ring_data[i]);
1951 }
1952 }
1953
1954 drm_helper_resume_force_mode(adev->ddev);
1955
1956 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
1957 if (r) {
1958 /* bad news, how to tell it to userspace ? */
1959 dev_info(adev->dev, "GPU reset failed\n");
1960 }
1961
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001962 return r;
1963}
1964
Alex Deuchercd474ba2016-02-04 10:21:23 -05001965#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
1966#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
1967
Alex Deucherd0dd7f02015-11-11 19:45:06 -05001968void amdgpu_get_pcie_info(struct amdgpu_device *adev)
1969{
1970 u32 mask;
1971 int ret;
1972
Alex Deuchercd474ba2016-02-04 10:21:23 -05001973 if (amdgpu_pcie_gen_cap)
1974 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
1975
1976 if (amdgpu_pcie_lane_cap)
1977 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
1978
1979 /* covers APUs as well */
1980 if (pci_is_root_bus(adev->pdev->bus)) {
1981 if (adev->pm.pcie_gen_mask == 0)
1982 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
1983 if (adev->pm.pcie_mlw_mask == 0)
1984 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05001985 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05001986 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05001987
1988 if (adev->pm.pcie_gen_mask == 0) {
1989 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
1990 if (!ret) {
1991 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
1992 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1993 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
1994
1995 if (mask & DRM_PCIE_SPEED_25)
1996 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
1997 if (mask & DRM_PCIE_SPEED_50)
1998 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
1999 if (mask & DRM_PCIE_SPEED_80)
2000 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2001 } else {
2002 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2003 }
2004 }
2005 if (adev->pm.pcie_mlw_mask == 0) {
2006 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2007 if (!ret) {
2008 switch (mask) {
2009 case 32:
2010 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2011 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2012 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2013 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2014 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2015 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2016 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2017 break;
2018 case 16:
2019 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2020 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2021 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2022 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2023 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2024 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2025 break;
2026 case 12:
2027 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2028 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2029 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2030 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2031 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2032 break;
2033 case 8:
2034 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2035 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2036 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2037 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2038 break;
2039 case 4:
2040 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2041 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2042 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2043 break;
2044 case 2:
2045 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2046 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2047 break;
2048 case 1:
2049 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2050 break;
2051 default:
2052 break;
2053 }
2054 } else {
2055 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002056 }
2057 }
2058}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002059
2060/*
2061 * Debugfs
2062 */
2063int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04002064 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002065 unsigned nfiles)
2066{
2067 unsigned i;
2068
2069 for (i = 0; i < adev->debugfs_count; i++) {
2070 if (adev->debugfs[i].files == files) {
2071 /* Already registered */
2072 return 0;
2073 }
2074 }
2075
2076 i = adev->debugfs_count + 1;
2077 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2078 DRM_ERROR("Reached maximum number of debugfs components.\n");
2079 DRM_ERROR("Report so we increase "
2080 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2081 return -EINVAL;
2082 }
2083 adev->debugfs[adev->debugfs_count].files = files;
2084 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2085 adev->debugfs_count = i;
2086#if defined(CONFIG_DEBUG_FS)
2087 drm_debugfs_create_files(files, nfiles,
2088 adev->ddev->control->debugfs_root,
2089 adev->ddev->control);
2090 drm_debugfs_create_files(files, nfiles,
2091 adev->ddev->primary->debugfs_root,
2092 adev->ddev->primary);
2093#endif
2094 return 0;
2095}
2096
2097static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
2098{
2099#if defined(CONFIG_DEBUG_FS)
2100 unsigned i;
2101
2102 for (i = 0; i < adev->debugfs_count; i++) {
2103 drm_debugfs_remove_files(adev->debugfs[i].files,
2104 adev->debugfs[i].num_files,
2105 adev->ddev->control);
2106 drm_debugfs_remove_files(adev->debugfs[i].files,
2107 adev->debugfs[i].num_files,
2108 adev->ddev->primary);
2109 }
2110#endif
2111}
2112
2113#if defined(CONFIG_DEBUG_FS)
2114
2115static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2116 size_t size, loff_t *pos)
2117{
2118 struct amdgpu_device *adev = f->f_inode->i_private;
2119 ssize_t result = 0;
2120 int r;
2121
2122 if (size & 0x3 || *pos & 0x3)
2123 return -EINVAL;
2124
2125 while (size) {
2126 uint32_t value;
2127
2128 if (*pos > adev->rmmio_size)
2129 return result;
2130
2131 value = RREG32(*pos >> 2);
2132 r = put_user(value, (uint32_t *)buf);
2133 if (r)
2134 return r;
2135
2136 result += 4;
2137 buf += 4;
2138 *pos += 4;
2139 size -= 4;
2140 }
2141
2142 return result;
2143}
2144
2145static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2146 size_t size, loff_t *pos)
2147{
2148 struct amdgpu_device *adev = f->f_inode->i_private;
2149 ssize_t result = 0;
2150 int r;
2151
2152 if (size & 0x3 || *pos & 0x3)
2153 return -EINVAL;
2154
2155 while (size) {
2156 uint32_t value;
2157
2158 if (*pos > adev->rmmio_size)
2159 return result;
2160
2161 r = get_user(value, (uint32_t *)buf);
2162 if (r)
2163 return r;
2164
2165 WREG32(*pos >> 2, value);
2166
2167 result += 4;
2168 buf += 4;
2169 *pos += 4;
2170 size -= 4;
2171 }
2172
2173 return result;
2174}
2175
Tom St Denisadcec282016-04-15 13:08:44 -04002176static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
2177 size_t size, loff_t *pos)
2178{
2179 struct amdgpu_device *adev = f->f_inode->i_private;
2180 ssize_t result = 0;
2181 int r;
2182
2183 if (size & 0x3 || *pos & 0x3)
2184 return -EINVAL;
2185
2186 while (size) {
2187 uint32_t value;
2188
2189 value = RREG32_PCIE(*pos >> 2);
2190 r = put_user(value, (uint32_t *)buf);
2191 if (r)
2192 return r;
2193
2194 result += 4;
2195 buf += 4;
2196 *pos += 4;
2197 size -= 4;
2198 }
2199
2200 return result;
2201}
2202
2203static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
2204 size_t size, loff_t *pos)
2205{
2206 struct amdgpu_device *adev = f->f_inode->i_private;
2207 ssize_t result = 0;
2208 int r;
2209
2210 if (size & 0x3 || *pos & 0x3)
2211 return -EINVAL;
2212
2213 while (size) {
2214 uint32_t value;
2215
2216 r = get_user(value, (uint32_t *)buf);
2217 if (r)
2218 return r;
2219
2220 WREG32_PCIE(*pos >> 2, value);
2221
2222 result += 4;
2223 buf += 4;
2224 *pos += 4;
2225 size -= 4;
2226 }
2227
2228 return result;
2229}
2230
2231static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
2232 size_t size, loff_t *pos)
2233{
2234 struct amdgpu_device *adev = f->f_inode->i_private;
2235 ssize_t result = 0;
2236 int r;
2237
2238 if (size & 0x3 || *pos & 0x3)
2239 return -EINVAL;
2240
2241 while (size) {
2242 uint32_t value;
2243
2244 value = RREG32_DIDT(*pos >> 2);
2245 r = put_user(value, (uint32_t *)buf);
2246 if (r)
2247 return r;
2248
2249 result += 4;
2250 buf += 4;
2251 *pos += 4;
2252 size -= 4;
2253 }
2254
2255 return result;
2256}
2257
2258static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
2259 size_t size, loff_t *pos)
2260{
2261 struct amdgpu_device *adev = f->f_inode->i_private;
2262 ssize_t result = 0;
2263 int r;
2264
2265 if (size & 0x3 || *pos & 0x3)
2266 return -EINVAL;
2267
2268 while (size) {
2269 uint32_t value;
2270
2271 r = get_user(value, (uint32_t *)buf);
2272 if (r)
2273 return r;
2274
2275 WREG32_DIDT(*pos >> 2, value);
2276
2277 result += 4;
2278 buf += 4;
2279 *pos += 4;
2280 size -= 4;
2281 }
2282
2283 return result;
2284}
2285
2286static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
2287 size_t size, loff_t *pos)
2288{
2289 struct amdgpu_device *adev = f->f_inode->i_private;
2290 ssize_t result = 0;
2291 int r;
2292
2293 if (size & 0x3 || *pos & 0x3)
2294 return -EINVAL;
2295
2296 while (size) {
2297 uint32_t value;
2298
2299 value = RREG32_SMC(*pos >> 2);
2300 r = put_user(value, (uint32_t *)buf);
2301 if (r)
2302 return r;
2303
2304 result += 4;
2305 buf += 4;
2306 *pos += 4;
2307 size -= 4;
2308 }
2309
2310 return result;
2311}
2312
2313static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
2314 size_t size, loff_t *pos)
2315{
2316 struct amdgpu_device *adev = f->f_inode->i_private;
2317 ssize_t result = 0;
2318 int r;
2319
2320 if (size & 0x3 || *pos & 0x3)
2321 return -EINVAL;
2322
2323 while (size) {
2324 uint32_t value;
2325
2326 r = get_user(value, (uint32_t *)buf);
2327 if (r)
2328 return r;
2329
2330 WREG32_SMC(*pos >> 2, value);
2331
2332 result += 4;
2333 buf += 4;
2334 *pos += 4;
2335 size -= 4;
2336 }
2337
2338 return result;
2339}
2340
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002341static const struct file_operations amdgpu_debugfs_regs_fops = {
2342 .owner = THIS_MODULE,
2343 .read = amdgpu_debugfs_regs_read,
2344 .write = amdgpu_debugfs_regs_write,
2345 .llseek = default_llseek
2346};
Tom St Denisadcec282016-04-15 13:08:44 -04002347static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
2348 .owner = THIS_MODULE,
2349 .read = amdgpu_debugfs_regs_didt_read,
2350 .write = amdgpu_debugfs_regs_didt_write,
2351 .llseek = default_llseek
2352};
2353static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
2354 .owner = THIS_MODULE,
2355 .read = amdgpu_debugfs_regs_pcie_read,
2356 .write = amdgpu_debugfs_regs_pcie_write,
2357 .llseek = default_llseek
2358};
2359static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
2360 .owner = THIS_MODULE,
2361 .read = amdgpu_debugfs_regs_smc_read,
2362 .write = amdgpu_debugfs_regs_smc_write,
2363 .llseek = default_llseek
2364};
2365
2366static const struct file_operations *debugfs_regs[] = {
2367 &amdgpu_debugfs_regs_fops,
2368 &amdgpu_debugfs_regs_didt_fops,
2369 &amdgpu_debugfs_regs_pcie_fops,
2370 &amdgpu_debugfs_regs_smc_fops,
2371};
2372
2373static const char *debugfs_regs_names[] = {
2374 "amdgpu_regs",
2375 "amdgpu_regs_didt",
2376 "amdgpu_regs_pcie",
2377 "amdgpu_regs_smc",
2378};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002379
2380static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
2381{
2382 struct drm_minor *minor = adev->ddev->primary;
2383 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04002384 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002385
Tom St Denisadcec282016-04-15 13:08:44 -04002386 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
2387 ent = debugfs_create_file(debugfs_regs_names[i],
2388 S_IFREG | S_IRUGO, root,
2389 adev, debugfs_regs[i]);
2390 if (IS_ERR(ent)) {
2391 for (j = 0; j < i; j++) {
2392 debugfs_remove(adev->debugfs_regs[i]);
2393 adev->debugfs_regs[i] = NULL;
2394 }
2395 return PTR_ERR(ent);
2396 }
2397
2398 if (!i)
2399 i_size_write(ent->d_inode, adev->rmmio_size);
2400 adev->debugfs_regs[i] = ent;
2401 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002402
2403 return 0;
2404}
2405
2406static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
2407{
Tom St Denisadcec282016-04-15 13:08:44 -04002408 unsigned i;
2409
2410 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
2411 if (adev->debugfs_regs[i]) {
2412 debugfs_remove(adev->debugfs_regs[i]);
2413 adev->debugfs_regs[i] = NULL;
2414 }
2415 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002416}
2417
2418int amdgpu_debugfs_init(struct drm_minor *minor)
2419{
2420 return 0;
2421}
2422
2423void amdgpu_debugfs_cleanup(struct drm_minor *minor)
2424{
2425}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06002426#else
2427static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
2428{
2429 return 0;
2430}
2431static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002432#endif