blob: 71fe5c15cca8b0729b6f0a16993c8f01c67f07ad [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050043#include "amd_pcie.h"
Alex Deuchera2e73f52015-04-20 17:09:27 -040044#ifdef CONFIG_DRM_AMDGPU_CIK
45#include "cik.h"
46#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040047#include "vi.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040048#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080049#include <linux/pci.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040050
51static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
52static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
53
54static const char *amdgpu_asic_name[] = {
55 "BONAIRE",
56 "KAVERI",
57 "KABINI",
58 "HAWAII",
59 "MULLINS",
60 "TOPAZ",
61 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080062 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040063 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040064 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040065 "POLARIS10",
66 "POLARIS11",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040067 "LAST",
68};
69
70bool amdgpu_device_is_px(struct drm_device *dev)
71{
72 struct amdgpu_device *adev = dev->dev_private;
73
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080074 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040075 return true;
76 return false;
77}
78
79/*
80 * MMIO register access helper functions.
81 */
82uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
83 bool always_indirect)
84{
Tom St Denisf4b373f2016-05-31 08:02:27 -040085 uint32_t ret;
86
Alex Deucherd38ceaf2015-04-20 16:55:21 -040087 if ((reg * 4) < adev->rmmio_size && !always_indirect)
Tom St Denisf4b373f2016-05-31 08:02:27 -040088 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -040089 else {
90 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040091
92 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
93 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
94 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
95 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040096 }
Tom St Denisf4b373f2016-05-31 08:02:27 -040097 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
98 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040099}
100
101void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
102 bool always_indirect)
103{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400104 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
105
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400106 if ((reg * 4) < adev->rmmio_size && !always_indirect)
107 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
108 else {
109 unsigned long flags;
110
111 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
112 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
113 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
114 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
115 }
116}
117
118u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
119{
120 if ((reg * 4) < adev->rio_mem_size)
121 return ioread32(adev->rio_mem + (reg * 4));
122 else {
123 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
124 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
125 }
126}
127
128void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
129{
130
131 if ((reg * 4) < adev->rio_mem_size)
132 iowrite32(v, adev->rio_mem + (reg * 4));
133 else {
134 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
135 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
136 }
137}
138
139/**
140 * amdgpu_mm_rdoorbell - read a doorbell dword
141 *
142 * @adev: amdgpu_device pointer
143 * @index: doorbell index
144 *
145 * Returns the value in the doorbell aperture at the
146 * requested doorbell index (CIK).
147 */
148u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
149{
150 if (index < adev->doorbell.num_doorbells) {
151 return readl(adev->doorbell.ptr + index);
152 } else {
153 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
154 return 0;
155 }
156}
157
158/**
159 * amdgpu_mm_wdoorbell - write a doorbell dword
160 *
161 * @adev: amdgpu_device pointer
162 * @index: doorbell index
163 * @v: value to write
164 *
165 * Writes @v to the doorbell aperture at the
166 * requested doorbell index (CIK).
167 */
168void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
169{
170 if (index < adev->doorbell.num_doorbells) {
171 writel(v, adev->doorbell.ptr + index);
172 } else {
173 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
174 }
175}
176
177/**
178 * amdgpu_invalid_rreg - dummy reg read function
179 *
180 * @adev: amdgpu device pointer
181 * @reg: offset of register
182 *
183 * Dummy register read function. Used for register blocks
184 * that certain asics don't have (all asics).
185 * Returns the value in the register.
186 */
187static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
188{
189 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
190 BUG();
191 return 0;
192}
193
194/**
195 * amdgpu_invalid_wreg - dummy reg write function
196 *
197 * @adev: amdgpu device pointer
198 * @reg: offset of register
199 * @v: value to write to the register
200 *
201 * Dummy register read function. Used for register blocks
202 * that certain asics don't have (all asics).
203 */
204static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
205{
206 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
207 reg, v);
208 BUG();
209}
210
211/**
212 * amdgpu_block_invalid_rreg - dummy reg read function
213 *
214 * @adev: amdgpu device pointer
215 * @block: offset of instance
216 * @reg: offset of register
217 *
218 * Dummy register read function. Used for register blocks
219 * that certain asics don't have (all asics).
220 * Returns the value in the register.
221 */
222static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
223 uint32_t block, uint32_t reg)
224{
225 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
226 reg, block);
227 BUG();
228 return 0;
229}
230
231/**
232 * amdgpu_block_invalid_wreg - dummy reg write function
233 *
234 * @adev: amdgpu device pointer
235 * @block: offset of instance
236 * @reg: offset of register
237 * @v: value to write to the register
238 *
239 * Dummy register read function. Used for register blocks
240 * that certain asics don't have (all asics).
241 */
242static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
243 uint32_t block,
244 uint32_t reg, uint32_t v)
245{
246 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
247 reg, block, v);
248 BUG();
249}
250
251static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
252{
253 int r;
254
255 if (adev->vram_scratch.robj == NULL) {
256 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
Alex Deucher857d9132015-08-27 00:14:16 -0400257 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
258 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
Christian König72d76682015-09-03 17:34:59 +0200259 NULL, NULL, &adev->vram_scratch.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400260 if (r) {
261 return r;
262 }
263 }
264
265 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
266 if (unlikely(r != 0))
267 return r;
268 r = amdgpu_bo_pin(adev->vram_scratch.robj,
269 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
270 if (r) {
271 amdgpu_bo_unreserve(adev->vram_scratch.robj);
272 return r;
273 }
274 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
275 (void **)&adev->vram_scratch.ptr);
276 if (r)
277 amdgpu_bo_unpin(adev->vram_scratch.robj);
278 amdgpu_bo_unreserve(adev->vram_scratch.robj);
279
280 return r;
281}
282
283static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
284{
285 int r;
286
287 if (adev->vram_scratch.robj == NULL) {
288 return;
289 }
290 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
291 if (likely(r == 0)) {
292 amdgpu_bo_kunmap(adev->vram_scratch.robj);
293 amdgpu_bo_unpin(adev->vram_scratch.robj);
294 amdgpu_bo_unreserve(adev->vram_scratch.robj);
295 }
296 amdgpu_bo_unref(&adev->vram_scratch.robj);
297}
298
299/**
300 * amdgpu_program_register_sequence - program an array of registers.
301 *
302 * @adev: amdgpu_device pointer
303 * @registers: pointer to the register array
304 * @array_size: size of the register array
305 *
306 * Programs an array or registers with and and or masks.
307 * This is a helper for setting golden registers.
308 */
309void amdgpu_program_register_sequence(struct amdgpu_device *adev,
310 const u32 *registers,
311 const u32 array_size)
312{
313 u32 tmp, reg, and_mask, or_mask;
314 int i;
315
316 if (array_size % 3)
317 return;
318
319 for (i = 0; i < array_size; i +=3) {
320 reg = registers[i + 0];
321 and_mask = registers[i + 1];
322 or_mask = registers[i + 2];
323
324 if (and_mask == 0xffffffff) {
325 tmp = or_mask;
326 } else {
327 tmp = RREG32(reg);
328 tmp &= ~and_mask;
329 tmp |= or_mask;
330 }
331 WREG32(reg, tmp);
332 }
333}
334
335void amdgpu_pci_config_reset(struct amdgpu_device *adev)
336{
337 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
338}
339
340/*
341 * GPU doorbell aperture helpers function.
342 */
343/**
344 * amdgpu_doorbell_init - Init doorbell driver information.
345 *
346 * @adev: amdgpu_device pointer
347 *
348 * Init doorbell driver information (CIK)
349 * Returns 0 on success, error on failure.
350 */
351static int amdgpu_doorbell_init(struct amdgpu_device *adev)
352{
353 /* doorbell bar mapping */
354 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
355 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
356
Christian Königedf600d2016-05-03 15:54:54 +0200357 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400358 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
359 if (adev->doorbell.num_doorbells == 0)
360 return -EINVAL;
361
362 adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
363 if (adev->doorbell.ptr == NULL) {
364 return -ENOMEM;
365 }
366 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
367 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
368
369 return 0;
370}
371
372/**
373 * amdgpu_doorbell_fini - Tear down doorbell driver information.
374 *
375 * @adev: amdgpu_device pointer
376 *
377 * Tear down doorbell driver information (CIK)
378 */
379static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
380{
381 iounmap(adev->doorbell.ptr);
382 adev->doorbell.ptr = NULL;
383}
384
385/**
386 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
387 * setup amdkfd
388 *
389 * @adev: amdgpu_device pointer
390 * @aperture_base: output returning doorbell aperture base physical address
391 * @aperture_size: output returning doorbell aperture size in bytes
392 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
393 *
394 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
395 * takes doorbells required for its own rings and reports the setup to amdkfd.
396 * amdgpu reserved doorbells are at the start of the doorbell aperture.
397 */
398void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
399 phys_addr_t *aperture_base,
400 size_t *aperture_size,
401 size_t *start_offset)
402{
403 /*
404 * The first num_doorbells are used by amdgpu.
405 * amdkfd takes whatever's left in the aperture.
406 */
407 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
408 *aperture_base = adev->doorbell.base;
409 *aperture_size = adev->doorbell.size;
410 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
411 } else {
412 *aperture_base = 0;
413 *aperture_size = 0;
414 *start_offset = 0;
415 }
416}
417
418/*
419 * amdgpu_wb_*()
420 * Writeback is the the method by which the the GPU updates special pages
421 * in memory with the status of certain GPU events (fences, ring pointers,
422 * etc.).
423 */
424
425/**
426 * amdgpu_wb_fini - Disable Writeback and free memory
427 *
428 * @adev: amdgpu_device pointer
429 *
430 * Disables Writeback and frees the Writeback memory (all asics).
431 * Used at driver shutdown.
432 */
433static void amdgpu_wb_fini(struct amdgpu_device *adev)
434{
435 if (adev->wb.wb_obj) {
436 if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
437 amdgpu_bo_kunmap(adev->wb.wb_obj);
438 amdgpu_bo_unpin(adev->wb.wb_obj);
439 amdgpu_bo_unreserve(adev->wb.wb_obj);
440 }
441 amdgpu_bo_unref(&adev->wb.wb_obj);
442 adev->wb.wb = NULL;
443 adev->wb.wb_obj = NULL;
444 }
445}
446
447/**
448 * amdgpu_wb_init- Init Writeback driver info and allocate memory
449 *
450 * @adev: amdgpu_device pointer
451 *
452 * Disables Writeback and frees the Writeback memory (all asics).
453 * Used at driver startup.
454 * Returns 0 on success or an -error on failure.
455 */
456static int amdgpu_wb_init(struct amdgpu_device *adev)
457{
458 int r;
459
460 if (adev->wb.wb_obj == NULL) {
461 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
Christian König72d76682015-09-03 17:34:59 +0200462 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
463 &adev->wb.wb_obj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400464 if (r) {
465 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
466 return r;
467 }
468 r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
469 if (unlikely(r != 0)) {
470 amdgpu_wb_fini(adev);
471 return r;
472 }
473 r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
474 &adev->wb.gpu_addr);
475 if (r) {
476 amdgpu_bo_unreserve(adev->wb.wb_obj);
477 dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
478 amdgpu_wb_fini(adev);
479 return r;
480 }
481 r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
482 amdgpu_bo_unreserve(adev->wb.wb_obj);
483 if (r) {
484 dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
485 amdgpu_wb_fini(adev);
486 return r;
487 }
488
489 adev->wb.num_wb = AMDGPU_MAX_WB;
490 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
491
492 /* clear wb memory */
493 memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
494 }
495
496 return 0;
497}
498
499/**
500 * amdgpu_wb_get - Allocate a wb entry
501 *
502 * @adev: amdgpu_device pointer
503 * @wb: wb index
504 *
505 * Allocate a wb slot for use by the driver (all asics).
506 * Returns 0 on success or -EINVAL on failure.
507 */
508int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
509{
510 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
511 if (offset < adev->wb.num_wb) {
512 __set_bit(offset, adev->wb.used);
513 *wb = offset;
514 return 0;
515 } else {
516 return -EINVAL;
517 }
518}
519
520/**
521 * amdgpu_wb_free - Free a wb entry
522 *
523 * @adev: amdgpu_device pointer
524 * @wb: wb index
525 *
526 * Free a wb slot allocated for use by the driver (all asics)
527 */
528void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
529{
530 if (wb < adev->wb.num_wb)
531 __clear_bit(wb, adev->wb.used);
532}
533
534/**
535 * amdgpu_vram_location - try to find VRAM location
536 * @adev: amdgpu device structure holding all necessary informations
537 * @mc: memory controller structure holding memory informations
538 * @base: base address at which to put VRAM
539 *
540 * Function will place try to place VRAM at base address provided
541 * as parameter (which is so far either PCI aperture address or
542 * for IGP TOM base address).
543 *
544 * If there is not enough space to fit the unvisible VRAM in the 32bits
545 * address space then we limit the VRAM size to the aperture.
546 *
547 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
548 * this shouldn't be a problem as we are using the PCI aperture as a reference.
549 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
550 * not IGP.
551 *
552 * Note: we use mc_vram_size as on some board we need to program the mc to
553 * cover the whole aperture even if VRAM size is inferior to aperture size
554 * Novell bug 204882 + along with lots of ubuntu ones
555 *
556 * Note: when limiting vram it's safe to overwritte real_vram_size because
557 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
558 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
559 * ones)
560 *
561 * Note: IGP TOM addr should be the same as the aperture addr, we don't
562 * explicitly check for that thought.
563 *
564 * FIXME: when reducing VRAM size align new size on power of 2.
565 */
566void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
567{
568 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
569
570 mc->vram_start = base;
571 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
572 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
573 mc->real_vram_size = mc->aper_size;
574 mc->mc_vram_size = mc->aper_size;
575 }
576 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
577 if (limit && limit < mc->real_vram_size)
578 mc->real_vram_size = limit;
579 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
580 mc->mc_vram_size >> 20, mc->vram_start,
581 mc->vram_end, mc->real_vram_size >> 20);
582}
583
584/**
585 * amdgpu_gtt_location - try to find GTT location
586 * @adev: amdgpu device structure holding all necessary informations
587 * @mc: memory controller structure holding memory informations
588 *
589 * Function will place try to place GTT before or after VRAM.
590 *
591 * If GTT size is bigger than space left then we ajust GTT size.
592 * Thus function will never fails.
593 *
594 * FIXME: when reducing GTT size align new size on power of 2.
595 */
596void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
597{
598 u64 size_af, size_bf;
599
600 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
601 size_bf = mc->vram_start & ~mc->gtt_base_align;
602 if (size_bf > size_af) {
603 if (mc->gtt_size > size_bf) {
604 dev_warn(adev->dev, "limiting GTT\n");
605 mc->gtt_size = size_bf;
606 }
607 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
608 } else {
609 if (mc->gtt_size > size_af) {
610 dev_warn(adev->dev, "limiting GTT\n");
611 mc->gtt_size = size_af;
612 }
613 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
614 }
615 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
616 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
617 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
618}
619
620/*
621 * GPU helpers function.
622 */
623/**
624 * amdgpu_card_posted - check if the hw has already been initialized
625 *
626 * @adev: amdgpu_device pointer
627 *
628 * Check if the asic has been initialized (all asics).
629 * Used at driver startup.
630 * Returns true if initialized or false if not.
631 */
632bool amdgpu_card_posted(struct amdgpu_device *adev)
633{
634 uint32_t reg;
635
636 /* then check MEM_SIZE, in case the crtcs are off */
637 reg = RREG32(mmCONFIG_MEMSIZE);
638
639 if (reg)
640 return true;
641
642 return false;
643
644}
645
646/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400647 * amdgpu_dummy_page_init - init dummy page used by the driver
648 *
649 * @adev: amdgpu_device pointer
650 *
651 * Allocate the dummy page used by the driver (all asics).
652 * This dummy page is used by the driver as a filler for gart entries
653 * when pages are taken out of the GART
654 * Returns 0 on sucess, -ENOMEM on failure.
655 */
656int amdgpu_dummy_page_init(struct amdgpu_device *adev)
657{
658 if (adev->dummy_page.page)
659 return 0;
660 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
661 if (adev->dummy_page.page == NULL)
662 return -ENOMEM;
663 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
664 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
665 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
666 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
667 __free_page(adev->dummy_page.page);
668 adev->dummy_page.page = NULL;
669 return -ENOMEM;
670 }
671 return 0;
672}
673
674/**
675 * amdgpu_dummy_page_fini - free dummy page used by the driver
676 *
677 * @adev: amdgpu_device pointer
678 *
679 * Frees the dummy page used by the driver (all asics).
680 */
681void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
682{
683 if (adev->dummy_page.page == NULL)
684 return;
685 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
686 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
687 __free_page(adev->dummy_page.page);
688 adev->dummy_page.page = NULL;
689}
690
691
692/* ATOM accessor methods */
693/*
694 * ATOM is an interpreted byte code stored in tables in the vbios. The
695 * driver registers callbacks to access registers and the interpreter
696 * in the driver parses the tables and executes then to program specific
697 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
698 * atombios.h, and atom.c
699 */
700
701/**
702 * cail_pll_read - read PLL register
703 *
704 * @info: atom card_info pointer
705 * @reg: PLL register offset
706 *
707 * Provides a PLL register accessor for the atom interpreter (r4xx+).
708 * Returns the value of the PLL register.
709 */
710static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
711{
712 return 0;
713}
714
715/**
716 * cail_pll_write - write PLL register
717 *
718 * @info: atom card_info pointer
719 * @reg: PLL register offset
720 * @val: value to write to the pll register
721 *
722 * Provides a PLL register accessor for the atom interpreter (r4xx+).
723 */
724static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
725{
726
727}
728
729/**
730 * cail_mc_read - read MC (Memory Controller) register
731 *
732 * @info: atom card_info pointer
733 * @reg: MC register offset
734 *
735 * Provides an MC register accessor for the atom interpreter (r4xx+).
736 * Returns the value of the MC register.
737 */
738static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
739{
740 return 0;
741}
742
743/**
744 * cail_mc_write - write MC (Memory Controller) register
745 *
746 * @info: atom card_info pointer
747 * @reg: MC register offset
748 * @val: value to write to the pll register
749 *
750 * Provides a MC register accessor for the atom interpreter (r4xx+).
751 */
752static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
753{
754
755}
756
757/**
758 * cail_reg_write - write MMIO register
759 *
760 * @info: atom card_info pointer
761 * @reg: MMIO register offset
762 * @val: value to write to the pll register
763 *
764 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
765 */
766static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
767{
768 struct amdgpu_device *adev = info->dev->dev_private;
769
770 WREG32(reg, val);
771}
772
773/**
774 * cail_reg_read - read MMIO register
775 *
776 * @info: atom card_info pointer
777 * @reg: MMIO register offset
778 *
779 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
780 * Returns the value of the MMIO register.
781 */
782static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
783{
784 struct amdgpu_device *adev = info->dev->dev_private;
785 uint32_t r;
786
787 r = RREG32(reg);
788 return r;
789}
790
791/**
792 * cail_ioreg_write - write IO register
793 *
794 * @info: atom card_info pointer
795 * @reg: IO register offset
796 * @val: value to write to the pll register
797 *
798 * Provides a IO register accessor for the atom interpreter (r4xx+).
799 */
800static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
801{
802 struct amdgpu_device *adev = info->dev->dev_private;
803
804 WREG32_IO(reg, val);
805}
806
807/**
808 * cail_ioreg_read - read IO register
809 *
810 * @info: atom card_info pointer
811 * @reg: IO register offset
812 *
813 * Provides an IO register accessor for the atom interpreter (r4xx+).
814 * Returns the value of the IO register.
815 */
816static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
817{
818 struct amdgpu_device *adev = info->dev->dev_private;
819 uint32_t r;
820
821 r = RREG32_IO(reg);
822 return r;
823}
824
825/**
826 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
827 *
828 * @adev: amdgpu_device pointer
829 *
830 * Frees the driver info and register access callbacks for the ATOM
831 * interpreter (r4xx+).
832 * Called at driver shutdown.
833 */
834static void amdgpu_atombios_fini(struct amdgpu_device *adev)
835{
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800836 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400837 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800838 kfree(adev->mode_info.atom_context->iio);
839 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400840 kfree(adev->mode_info.atom_context);
841 adev->mode_info.atom_context = NULL;
842 kfree(adev->mode_info.atom_card_info);
843 adev->mode_info.atom_card_info = NULL;
844}
845
846/**
847 * amdgpu_atombios_init - init the driver info and callbacks for atombios
848 *
849 * @adev: amdgpu_device pointer
850 *
851 * Initializes the driver info and register access callbacks for the
852 * ATOM interpreter (r4xx+).
853 * Returns 0 on sucess, -ENOMEM on failure.
854 * Called at driver startup.
855 */
856static int amdgpu_atombios_init(struct amdgpu_device *adev)
857{
858 struct card_info *atom_card_info =
859 kzalloc(sizeof(struct card_info), GFP_KERNEL);
860
861 if (!atom_card_info)
862 return -ENOMEM;
863
864 adev->mode_info.atom_card_info = atom_card_info;
865 atom_card_info->dev = adev->ddev;
866 atom_card_info->reg_read = cail_reg_read;
867 atom_card_info->reg_write = cail_reg_write;
868 /* needed for iio ops */
869 if (adev->rio_mem) {
870 atom_card_info->ioreg_read = cail_ioreg_read;
871 atom_card_info->ioreg_write = cail_ioreg_write;
872 } else {
873 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
874 atom_card_info->ioreg_read = cail_reg_read;
875 atom_card_info->ioreg_write = cail_reg_write;
876 }
877 atom_card_info->mc_read = cail_mc_read;
878 atom_card_info->mc_write = cail_mc_write;
879 atom_card_info->pll_read = cail_pll_read;
880 atom_card_info->pll_write = cail_pll_write;
881
882 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
883 if (!adev->mode_info.atom_context) {
884 amdgpu_atombios_fini(adev);
885 return -ENOMEM;
886 }
887
888 mutex_init(&adev->mode_info.atom_context->mutex);
889 amdgpu_atombios_scratch_regs_init(adev);
890 amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
891 return 0;
892}
893
894/* if we get transitioned to only one device, take VGA back */
895/**
896 * amdgpu_vga_set_decode - enable/disable vga decode
897 *
898 * @cookie: amdgpu_device pointer
899 * @state: enable/disable vga decode
900 *
901 * Enable/disable vga decode (all asics).
902 * Returns VGA resource flags.
903 */
904static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
905{
906 struct amdgpu_device *adev = cookie;
907 amdgpu_asic_set_vga_state(adev, state);
908 if (state)
909 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
910 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
911 else
912 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
913}
914
915/**
916 * amdgpu_check_pot_argument - check that argument is a power of two
917 *
918 * @arg: value to check
919 *
920 * Validates that a certain argument is a power of two (all asics).
921 * Returns true if argument is valid.
922 */
923static bool amdgpu_check_pot_argument(int arg)
924{
925 return (arg & (arg - 1)) == 0;
926}
927
928/**
929 * amdgpu_check_arguments - validate module params
930 *
931 * @adev: amdgpu_device pointer
932 *
933 * Validates certain module parameters and updates
934 * the associated values used by the driver (all asics).
935 */
936static void amdgpu_check_arguments(struct amdgpu_device *adev)
937{
Chunming Zhou5b011232015-12-10 17:34:33 +0800938 if (amdgpu_sched_jobs < 4) {
939 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
940 amdgpu_sched_jobs);
941 amdgpu_sched_jobs = 4;
942 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
943 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
944 amdgpu_sched_jobs);
945 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
946 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400947
948 if (amdgpu_gart_size != -1) {
Christian Königc4e1a132016-03-17 16:25:15 +0100949 /* gtt size must be greater or equal to 32M */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400950 if (amdgpu_gart_size < 32) {
951 dev_warn(adev->dev, "gart size (%d) too small\n",
952 amdgpu_gart_size);
953 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400954 }
955 }
956
957 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
958 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
959 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -0400960 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400961 }
962
963 if (amdgpu_vm_size < 1) {
964 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
965 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -0400966 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400967 }
968
969 /*
970 * Max GPUVM size for Cayman, SI and CI are 40 bits.
971 */
972 if (amdgpu_vm_size > 1024) {
973 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
974 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -0400975 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400976 }
977
978 /* defines number of bits in page table versus page directory,
979 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
980 * page table and the remaining bits are in the page directory */
981 if (amdgpu_vm_block_size == -1) {
982
983 /* Total bits covered by PD + PTs */
984 unsigned bits = ilog2(amdgpu_vm_size) + 18;
985
986 /* Make sure the PD is 4K in size up to 8GB address space.
987 Above that split equal between PD and PTs */
988 if (amdgpu_vm_size <= 8)
989 amdgpu_vm_block_size = bits - 9;
990 else
991 amdgpu_vm_block_size = (bits + 3) / 2;
992
993 } else if (amdgpu_vm_block_size < 9) {
994 dev_warn(adev->dev, "VM page table size (%d) too small\n",
995 amdgpu_vm_block_size);
996 amdgpu_vm_block_size = 9;
997 }
998
999 if (amdgpu_vm_block_size > 24 ||
1000 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1001 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1002 amdgpu_vm_block_size);
1003 amdgpu_vm_block_size = 9;
1004 }
1005}
1006
1007/**
1008 * amdgpu_switcheroo_set_state - set switcheroo state
1009 *
1010 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001011 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001012 *
1013 * Callback for the switcheroo driver. Suspends or resumes the
1014 * the asics before or after it is powered up using ACPI methods.
1015 */
1016static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1017{
1018 struct drm_device *dev = pci_get_drvdata(pdev);
1019
1020 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1021 return;
1022
1023 if (state == VGA_SWITCHEROO_ON) {
1024 unsigned d3_delay = dev->pdev->d3_delay;
1025
1026 printk(KERN_INFO "amdgpu: switched on\n");
1027 /* don't suspend or resume card normally */
1028 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1029
Alex Deucher810ddc32016-08-23 13:25:49 -04001030 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001031
1032 dev->pdev->d3_delay = d3_delay;
1033
1034 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1035 drm_kms_helper_poll_enable(dev);
1036 } else {
1037 printk(KERN_INFO "amdgpu: switched off\n");
1038 drm_kms_helper_poll_disable(dev);
1039 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001040 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001041 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1042 }
1043}
1044
1045/**
1046 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1047 *
1048 * @pdev: pci dev pointer
1049 *
1050 * Callback for the switcheroo driver. Check of the switcheroo
1051 * state can be changed.
1052 * Returns true if the state can be changed, false if not.
1053 */
1054static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1055{
1056 struct drm_device *dev = pci_get_drvdata(pdev);
1057
1058 /*
1059 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1060 * locking inversion with the driver load path. And the access here is
1061 * completely racy anyway. So don't bother with locking for now.
1062 */
1063 return dev->open_count == 0;
1064}
1065
1066static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1067 .set_gpu_state = amdgpu_switcheroo_set_state,
1068 .reprobe = NULL,
1069 .can_switch = amdgpu_switcheroo_can_switch,
1070};
1071
1072int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001073 enum amd_ip_block_type block_type,
1074 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001075{
1076 int i, r = 0;
1077
1078 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001079 if (!adev->ip_block_status[i].valid)
1080 continue;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001081 if (adev->ip_blocks[i].type == block_type) {
yanyang15fc3aee2015-05-22 14:39:35 -04001082 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001083 state);
1084 if (r)
1085 return r;
Alex Deuchera225bf12016-06-23 11:48:30 -04001086 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001087 }
1088 }
1089 return r;
1090}
1091
1092int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001093 enum amd_ip_block_type block_type,
1094 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001095{
1096 int i, r = 0;
1097
1098 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001099 if (!adev->ip_block_status[i].valid)
1100 continue;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001101 if (adev->ip_blocks[i].type == block_type) {
yanyang15fc3aee2015-05-22 14:39:35 -04001102 r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001103 state);
1104 if (r)
1105 return r;
Alex Deuchera225bf12016-06-23 11:48:30 -04001106 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001107 }
1108 }
1109 return r;
1110}
1111
Alex Deucher5dbbb602016-06-23 11:41:04 -04001112int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1113 enum amd_ip_block_type block_type)
1114{
1115 int i, r;
1116
1117 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001118 if (!adev->ip_block_status[i].valid)
1119 continue;
Alex Deucher5dbbb602016-06-23 11:41:04 -04001120 if (adev->ip_blocks[i].type == block_type) {
1121 r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
1122 if (r)
1123 return r;
1124 break;
1125 }
1126 }
1127 return 0;
1128
1129}
1130
1131bool amdgpu_is_idle(struct amdgpu_device *adev,
1132 enum amd_ip_block_type block_type)
1133{
1134 int i;
1135
1136 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001137 if (!adev->ip_block_status[i].valid)
1138 continue;
Alex Deucher5dbbb602016-06-23 11:41:04 -04001139 if (adev->ip_blocks[i].type == block_type)
1140 return adev->ip_blocks[i].funcs->is_idle((void *)adev);
1141 }
1142 return true;
1143
1144}
1145
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001146const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
1147 struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001148 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001149{
1150 int i;
1151
1152 for (i = 0; i < adev->num_ip_blocks; i++)
1153 if (adev->ip_blocks[i].type == type)
1154 return &adev->ip_blocks[i];
1155
1156 return NULL;
1157}
1158
1159/**
1160 * amdgpu_ip_block_version_cmp
1161 *
1162 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001163 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001164 * @major: major version
1165 * @minor: minor version
1166 *
1167 * return 0 if equal or greater
1168 * return 1 if smaller or the ip_block doesn't exist
1169 */
1170int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001171 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001172 u32 major, u32 minor)
1173{
1174 const struct amdgpu_ip_block_version *ip_block;
1175 ip_block = amdgpu_get_ip_block(adev, type);
1176
1177 if (ip_block && ((ip_block->major > major) ||
1178 ((ip_block->major == major) &&
1179 (ip_block->minor >= minor))))
1180 return 0;
1181
1182 return 1;
1183}
1184
Emily Deng9accf2f2016-08-10 16:01:25 +08001185static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
1186{
1187 adev->enable_virtual_display = false;
1188
1189 if (amdgpu_virtual_display) {
1190 struct drm_device *ddev = adev->ddev;
1191 const char *pci_address_name = pci_name(ddev->pdev);
1192 char *pciaddstr, *pciaddstr_tmp, *pciaddname;
1193
1194 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1195 pciaddstr_tmp = pciaddstr;
1196 while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
1197 if (!strcmp(pci_address_name, pciaddname)) {
1198 adev->enable_virtual_display = true;
1199 break;
1200 }
1201 }
1202
1203 DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
1204 amdgpu_virtual_display, pci_address_name,
1205 adev->enable_virtual_display);
1206
1207 kfree(pciaddstr);
1208 }
1209}
1210
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001211static int amdgpu_early_init(struct amdgpu_device *adev)
1212{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001213 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001214
Emily Deng9accf2f2016-08-10 16:01:25 +08001215 amdgpu_whether_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001216
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001217 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001218 case CHIP_TOPAZ:
1219 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001220 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001221 case CHIP_POLARIS11:
1222 case CHIP_POLARIS10:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001223 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001224 case CHIP_STONEY:
1225 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001226 adev->family = AMDGPU_FAMILY_CZ;
1227 else
1228 adev->family = AMDGPU_FAMILY_VI;
1229
1230 r = vi_set_ip_blocks(adev);
1231 if (r)
1232 return r;
1233 break;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001234#ifdef CONFIG_DRM_AMDGPU_CIK
1235 case CHIP_BONAIRE:
1236 case CHIP_HAWAII:
1237 case CHIP_KAVERI:
1238 case CHIP_KABINI:
1239 case CHIP_MULLINS:
1240 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1241 adev->family = AMDGPU_FAMILY_CI;
1242 else
1243 adev->family = AMDGPU_FAMILY_KV;
1244
1245 r = cik_set_ip_blocks(adev);
1246 if (r)
1247 return r;
1248 break;
1249#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001250 default:
1251 /* FIXME: not supported yet */
1252 return -EINVAL;
1253 }
1254
Alex Deucher8faf0e02015-07-28 11:50:31 -04001255 adev->ip_block_status = kcalloc(adev->num_ip_blocks,
1256 sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
1257 if (adev->ip_block_status == NULL)
Alex Deucherd8d090b2015-06-26 13:02:57 -04001258 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001259
1260 if (adev->ip_blocks == NULL) {
1261 DRM_ERROR("No IP blocks found!\n");
1262 return r;
1263 }
1264
1265 for (i = 0; i < adev->num_ip_blocks; i++) {
1266 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1267 DRM_ERROR("disabled ip block: %d\n", i);
Alex Deucher8faf0e02015-07-28 11:50:31 -04001268 adev->ip_block_status[i].valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001269 } else {
1270 if (adev->ip_blocks[i].funcs->early_init) {
yanyang15fc3aee2015-05-22 14:39:35 -04001271 r = adev->ip_blocks[i].funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001272 if (r == -ENOENT) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001273 adev->ip_block_status[i].valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001274 } else if (r) {
Tom St Denis88a907d2016-05-04 14:28:35 -04001275 DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001276 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001277 } else {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001278 adev->ip_block_status[i].valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001279 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001280 } else {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001281 adev->ip_block_status[i].valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001282 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001283 }
1284 }
1285
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001286 adev->cg_flags &= amdgpu_cg_mask;
1287 adev->pg_flags &= amdgpu_pg_mask;
1288
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001289 return 0;
1290}
1291
1292static int amdgpu_init(struct amdgpu_device *adev)
1293{
1294 int i, r;
1295
1296 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001297 if (!adev->ip_block_status[i].valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001298 continue;
yanyang15fc3aee2015-05-22 14:39:35 -04001299 r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001300 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001301 DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001302 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001303 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001304 adev->ip_block_status[i].sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001305 /* need to do gmc hw init early so we can allocate gpu mem */
yanyang15fc3aee2015-05-22 14:39:35 -04001306 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001307 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001308 if (r) {
1309 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001310 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001311 }
yanyang15fc3aee2015-05-22 14:39:35 -04001312 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001313 if (r) {
1314 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001315 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001316 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001317 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001318 if (r) {
1319 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001320 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001321 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001322 adev->ip_block_status[i].hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001323 }
1324 }
1325
1326 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001327 if (!adev->ip_block_status[i].sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001328 continue;
1329 /* gmc hw init is done early */
yanyang15fc3aee2015-05-22 14:39:35 -04001330 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001331 continue;
yanyang15fc3aee2015-05-22 14:39:35 -04001332 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001333 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001334 DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001335 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001336 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001337 adev->ip_block_status[i].hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001338 }
1339
1340 return 0;
1341}
1342
1343static int amdgpu_late_init(struct amdgpu_device *adev)
1344{
1345 int i = 0, r;
1346
1347 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001348 if (!adev->ip_block_status[i].valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001349 continue;
1350 /* enable clockgating to save power */
yanyang15fc3aee2015-05-22 14:39:35 -04001351 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1352 AMD_CG_STATE_GATE);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001353 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001354 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001355 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001356 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001357 if (adev->ip_blocks[i].funcs->late_init) {
yanyang15fc3aee2015-05-22 14:39:35 -04001358 r = adev->ip_blocks[i].funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001359 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001360 DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001361 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001362 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001363 }
1364 }
1365
1366 return 0;
1367}
1368
1369static int amdgpu_fini(struct amdgpu_device *adev)
1370{
1371 int i, r;
1372
1373 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001374 if (!adev->ip_block_status[i].hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001375 continue;
yanyang15fc3aee2015-05-22 14:39:35 -04001376 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001377 amdgpu_wb_fini(adev);
1378 amdgpu_vram_scratch_fini(adev);
1379 }
1380 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
yanyang15fc3aee2015-05-22 14:39:35 -04001381 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1382 AMD_CG_STATE_UNGATE);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001383 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001384 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001385 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001386 }
yanyang15fc3aee2015-05-22 14:39:35 -04001387 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001388 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001389 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001390 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001391 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001392 adev->ip_block_status[i].hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001393 }
1394
1395 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001396 if (!adev->ip_block_status[i].sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001397 continue;
yanyang15fc3aee2015-05-22 14:39:35 -04001398 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001399 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001400 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001401 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001402 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001403 adev->ip_block_status[i].sw = false;
1404 adev->ip_block_status[i].valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001405 }
1406
Monk Liua6dcfd92016-05-19 14:36:34 +08001407 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1408 if (adev->ip_blocks[i].funcs->late_fini)
1409 adev->ip_blocks[i].funcs->late_fini((void *)adev);
1410 }
1411
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001412 return 0;
1413}
1414
1415static int amdgpu_suspend(struct amdgpu_device *adev)
1416{
1417 int i, r;
1418
Flora Cuic5a93a22016-02-26 10:45:25 +08001419 /* ungate SMC block first */
1420 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1421 AMD_CG_STATE_UNGATE);
1422 if (r) {
1423 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1424 }
1425
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001426 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001427 if (!adev->ip_block_status[i].valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001428 continue;
1429 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001430 if (i != AMD_IP_BLOCK_TYPE_SMC) {
1431 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1432 AMD_CG_STATE_UNGATE);
1433 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001434 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001435 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001436 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001437 /* XXX handle errors */
1438 r = adev->ip_blocks[i].funcs->suspend(adev);
1439 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001440 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001441 DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001442 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001443 }
1444
1445 return 0;
1446}
1447
1448static int amdgpu_resume(struct amdgpu_device *adev)
1449{
1450 int i, r;
1451
1452 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001453 if (!adev->ip_block_status[i].valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001454 continue;
1455 r = adev->ip_blocks[i].funcs->resume(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001456 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001457 DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001458 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001459 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001460 }
1461
1462 return 0;
1463}
1464
Andres Rodriguez048765a2016-06-11 02:51:32 -04001465static bool amdgpu_device_is_virtual(void)
1466{
1467#ifdef CONFIG_X86
1468 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
1469#else
1470 return false;
1471#endif
1472}
1473
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001474/**
1475 * amdgpu_device_init - initialize the driver
1476 *
1477 * @adev: amdgpu_device pointer
1478 * @pdev: drm dev pointer
1479 * @pdev: pci dev pointer
1480 * @flags: driver flags
1481 *
1482 * Initializes the driver info and hw (all asics).
1483 * Returns 0 for success or an error on failure.
1484 * Called at driver startup.
1485 */
1486int amdgpu_device_init(struct amdgpu_device *adev,
1487 struct drm_device *ddev,
1488 struct pci_dev *pdev,
1489 uint32_t flags)
1490{
1491 int r, i;
1492 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02001493 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001494
1495 adev->shutdown = false;
1496 adev->dev = &pdev->dev;
1497 adev->ddev = ddev;
1498 adev->pdev = pdev;
1499 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08001500 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001501 adev->is_atom_bios = false;
1502 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1503 adev->mc.gtt_size = 512 * 1024 * 1024;
1504 adev->accel_working = false;
1505 adev->num_rings = 0;
1506 adev->mman.buffer_funcs = NULL;
1507 adev->mman.buffer_funcs_ring = NULL;
1508 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01001509 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001510 adev->gart.gart_funcs = NULL;
1511 adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
1512
1513 adev->smc_rreg = &amdgpu_invalid_rreg;
1514 adev->smc_wreg = &amdgpu_invalid_wreg;
1515 adev->pcie_rreg = &amdgpu_invalid_rreg;
1516 adev->pcie_wreg = &amdgpu_invalid_wreg;
1517 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1518 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1519 adev->didt_rreg = &amdgpu_invalid_rreg;
1520 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08001521 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1522 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001523 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1524 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1525
Rex Zhuccdbb202016-06-08 12:47:41 +08001526
Alex Deucher3e39ab92015-06-05 15:04:33 -04001527 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1528 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1529 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001530
1531 /* mutex initialization are all done here so we
1532 * can recall function without having locking issues */
Christian König8d0a7ce2015-11-03 20:58:50 +01001533 mutex_init(&adev->vm_manager.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001534 atomic_set(&adev->irq.ih.lock, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001535 mutex_init(&adev->pm.mutex);
1536 mutex_init(&adev->gfx.gpu_clock_mutex);
1537 mutex_init(&adev->srbm_mutex);
1538 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001539 mutex_init(&adev->mn_lock);
1540 hash_init(adev->mn_hash);
1541
1542 amdgpu_check_arguments(adev);
1543
1544 /* Registers mapping */
1545 /* TODO: block userspace mapping of io register */
1546 spin_lock_init(&adev->mmio_idx_lock);
1547 spin_lock_init(&adev->smc_idx_lock);
1548 spin_lock_init(&adev->pcie_idx_lock);
1549 spin_lock_init(&adev->uvd_ctx_idx_lock);
1550 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08001551 spin_lock_init(&adev->gc_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001552 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02001553 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001554
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08001555 INIT_LIST_HEAD(&adev->shadow_list);
1556 mutex_init(&adev->shadow_list_lock);
1557
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001558 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1559 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1560 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1561 if (adev->rmmio == NULL) {
1562 return -ENOMEM;
1563 }
1564 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1565 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1566
1567 /* doorbell bar mapping */
1568 amdgpu_doorbell_init(adev);
1569
1570 /* io port mapping */
1571 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1572 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1573 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1574 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1575 break;
1576 }
1577 }
1578 if (adev->rio_mem == NULL)
1579 DRM_ERROR("Unable to find PCI I/O BAR\n");
1580
1581 /* early init functions */
1582 r = amdgpu_early_init(adev);
1583 if (r)
1584 return r;
1585
1586 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1587 /* this will fail for cards that aren't VGA class devices, just
1588 * ignore it */
1589 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1590
1591 if (amdgpu_runtime_pm == 1)
1592 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04001593 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001594 runtime = true;
1595 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1596 if (runtime)
1597 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1598
1599 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04001600 if (!amdgpu_get_bios(adev)) {
1601 r = -EINVAL;
1602 goto failed;
1603 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001604 /* Must be an ATOMBIOS */
1605 if (!adev->is_atom_bios) {
1606 dev_err(adev->dev, "Expecting atombios for GPU\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001607 r = -EINVAL;
1608 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001609 }
1610 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001611 if (r) {
1612 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001613 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001614 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001615
Alex Deucher7e471e62016-02-01 11:13:04 -05001616 /* See if the asic supports SR-IOV */
1617 adev->virtualization.supports_sr_iov =
1618 amdgpu_atombios_has_gpu_virtualization_table(adev);
1619
Andres Rodriguez048765a2016-06-11 02:51:32 -04001620 /* Check if we are executing in a virtualized environment */
1621 adev->virtualization.is_virtual = amdgpu_device_is_virtual();
1622 adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
1623
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001624 /* Post card if necessary */
Andres Rodriguez048765a2016-06-11 02:51:32 -04001625 if (!amdgpu_card_posted(adev) ||
1626 (adev->virtualization.is_virtual &&
Dan Carpenter48a70e12016-06-18 11:38:44 +03001627 !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001628 if (!adev->bios) {
1629 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001630 r = -EINVAL;
1631 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001632 }
1633 DRM_INFO("GPU not posted. posting now...\n");
1634 amdgpu_atom_asic_init(adev->mode_info.atom_context);
1635 }
1636
1637 /* Initialize clocks */
1638 r = amdgpu_atombios_get_clock_info(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001639 if (r) {
1640 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001641 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001642 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001643 /* init i2c buses */
1644 amdgpu_atombios_i2c_init(adev);
1645
1646 /* Fence driver */
1647 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001648 if (r) {
1649 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001650 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001651 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001652
1653 /* init the mode config */
1654 drm_mode_config_init(adev->ddev);
1655
1656 r = amdgpu_init(adev);
1657 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05001658 dev_err(adev->dev, "amdgpu_init failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001659 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04001660 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001661 }
1662
1663 adev->accel_working = true;
1664
Marek Olšák95844d22016-08-17 23:49:27 +02001665 /* Initialize the buffer migration limit. */
1666 if (amdgpu_moverate >= 0)
1667 max_MBps = amdgpu_moverate;
1668 else
1669 max_MBps = 8; /* Allow 8 MB/s. */
1670 /* Get a log2 for easy divisions. */
1671 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
1672
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001673 amdgpu_fbdev_init(adev);
1674
1675 r = amdgpu_ib_pool_init(adev);
1676 if (r) {
1677 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Alex Deucher83ba1262016-06-03 18:21:41 -04001678 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001679 }
1680
1681 r = amdgpu_ib_ring_tests(adev);
1682 if (r)
1683 DRM_ERROR("ib ring test failed (%d).\n", r);
1684
1685 r = amdgpu_gem_debugfs_init(adev);
1686 if (r) {
1687 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1688 }
1689
1690 r = amdgpu_debugfs_regs_init(adev);
1691 if (r) {
1692 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1693 }
1694
Huang Rui50ab2532016-06-12 15:51:09 +08001695 r = amdgpu_debugfs_firmware_init(adev);
1696 if (r) {
1697 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1698 return r;
1699 }
1700
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001701 if ((amdgpu_testing & 1)) {
1702 if (adev->accel_working)
1703 amdgpu_test_moves(adev);
1704 else
1705 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
1706 }
1707 if ((amdgpu_testing & 2)) {
1708 if (adev->accel_working)
1709 amdgpu_test_syncing(adev);
1710 else
1711 DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
1712 }
1713 if (amdgpu_benchmarking) {
1714 if (adev->accel_working)
1715 amdgpu_benchmark(adev, amdgpu_benchmarking);
1716 else
1717 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
1718 }
1719
1720 /* enable clockgating, etc. after ib tests, etc. since some blocks require
1721 * explicit gating rather than handling it automatically.
1722 */
1723 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001724 if (r) {
1725 dev_err(adev->dev, "amdgpu_late_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001726 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001727 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001728
1729 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04001730
1731failed:
1732 if (runtime)
1733 vga_switcheroo_fini_domain_pm_ops(adev->dev);
1734 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001735}
1736
1737static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
1738
1739/**
1740 * amdgpu_device_fini - tear down the driver
1741 *
1742 * @adev: amdgpu_device pointer
1743 *
1744 * Tear down the driver info (all asics).
1745 * Called at driver shutdown.
1746 */
1747void amdgpu_device_fini(struct amdgpu_device *adev)
1748{
1749 int r;
1750
1751 DRM_INFO("amdgpu: finishing device.\n");
1752 adev->shutdown = true;
1753 /* evict vram memory */
1754 amdgpu_bo_evict_vram(adev);
1755 amdgpu_ib_pool_fini(adev);
1756 amdgpu_fence_driver_fini(adev);
Lukas Wunner84b89bd2016-06-08 18:47:27 +02001757 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001758 amdgpu_fbdev_fini(adev);
1759 r = amdgpu_fini(adev);
Alex Deucher8faf0e02015-07-28 11:50:31 -04001760 kfree(adev->ip_block_status);
1761 adev->ip_block_status = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001762 adev->accel_working = false;
1763 /* free i2c buses */
1764 amdgpu_i2c_fini(adev);
1765 amdgpu_atombios_fini(adev);
1766 kfree(adev->bios);
1767 adev->bios = NULL;
1768 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04001769 if (adev->flags & AMD_IS_PX)
1770 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001771 vga_client_register(adev->pdev, NULL, NULL, NULL);
1772 if (adev->rio_mem)
1773 pci_iounmap(adev->pdev, adev->rio_mem);
1774 adev->rio_mem = NULL;
1775 iounmap(adev->rmmio);
1776 adev->rmmio = NULL;
1777 amdgpu_doorbell_fini(adev);
1778 amdgpu_debugfs_regs_cleanup(adev);
1779 amdgpu_debugfs_remove_files(adev);
1780}
1781
1782
1783/*
1784 * Suspend & resume.
1785 */
1786/**
Alex Deucher810ddc32016-08-23 13:25:49 -04001787 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001788 *
1789 * @pdev: drm dev pointer
1790 * @state: suspend state
1791 *
1792 * Puts the hw in the suspend state (all asics).
1793 * Returns 0 for success or an error on failure.
1794 * Called at driver suspend.
1795 */
Alex Deucher810ddc32016-08-23 13:25:49 -04001796int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001797{
1798 struct amdgpu_device *adev;
1799 struct drm_crtc *crtc;
1800 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04001801 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001802
1803 if (dev == NULL || dev->dev_private == NULL) {
1804 return -ENODEV;
1805 }
1806
1807 adev = dev->dev_private;
1808
1809 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1810 return 0;
1811
1812 drm_kms_helper_poll_disable(dev);
1813
1814 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04001815 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001816 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1817 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1818 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04001819 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001820
Alex Deucher756e6882015-10-08 00:03:36 -04001821 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001822 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04001823 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001824 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
1825 struct amdgpu_bo *robj;
1826
Alex Deucher756e6882015-10-08 00:03:36 -04001827 if (amdgpu_crtc->cursor_bo) {
1828 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1829 r = amdgpu_bo_reserve(aobj, false);
1830 if (r == 0) {
1831 amdgpu_bo_unpin(aobj);
1832 amdgpu_bo_unreserve(aobj);
1833 }
1834 }
1835
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001836 if (rfb == NULL || rfb->obj == NULL) {
1837 continue;
1838 }
1839 robj = gem_to_amdgpu_bo(rfb->obj);
1840 /* don't unpin kernel fb objects */
1841 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
1842 r = amdgpu_bo_reserve(robj, false);
1843 if (r == 0) {
1844 amdgpu_bo_unpin(robj);
1845 amdgpu_bo_unreserve(robj);
1846 }
1847 }
1848 }
1849 /* evict vram memory */
1850 amdgpu_bo_evict_vram(adev);
1851
Alex Deucher5ceb54c2015-08-05 12:41:48 -04001852 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001853
1854 r = amdgpu_suspend(adev);
1855
1856 /* evict remaining vram memory */
1857 amdgpu_bo_evict_vram(adev);
1858
1859 pci_save_state(dev->pdev);
1860 if (suspend) {
1861 /* Shut down the device */
1862 pci_disable_device(dev->pdev);
1863 pci_set_power_state(dev->pdev, PCI_D3hot);
1864 }
1865
1866 if (fbcon) {
1867 console_lock();
1868 amdgpu_fbdev_set_suspend(adev, 1);
1869 console_unlock();
1870 }
1871 return 0;
1872}
1873
1874/**
Alex Deucher810ddc32016-08-23 13:25:49 -04001875 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001876 *
1877 * @pdev: drm dev pointer
1878 *
1879 * Bring the hw back to operating state (all asics).
1880 * Returns 0 for success or an error on failure.
1881 * Called at driver resume.
1882 */
Alex Deucher810ddc32016-08-23 13:25:49 -04001883int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001884{
1885 struct drm_connector *connector;
1886 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04001887 struct drm_crtc *crtc;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001888 int r;
1889
1890 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1891 return 0;
1892
1893 if (fbcon) {
1894 console_lock();
1895 }
1896 if (resume) {
1897 pci_set_power_state(dev->pdev, PCI_D0);
1898 pci_restore_state(dev->pdev);
1899 if (pci_enable_device(dev->pdev)) {
1900 if (fbcon)
1901 console_unlock();
1902 return -1;
1903 }
1904 }
1905
1906 /* post card */
Flora Cuica198522016-02-04 15:10:08 +08001907 if (!amdgpu_card_posted(adev))
1908 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001909
1910 r = amdgpu_resume(adev);
Flora Cuica198522016-02-04 15:10:08 +08001911 if (r)
1912 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001913
Alex Deucher5ceb54c2015-08-05 12:41:48 -04001914 amdgpu_fence_driver_resume(adev);
1915
Flora Cuica198522016-02-04 15:10:08 +08001916 if (resume) {
1917 r = amdgpu_ib_ring_tests(adev);
1918 if (r)
1919 DRM_ERROR("ib ring test failed (%d).\n", r);
1920 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001921
1922 r = amdgpu_late_init(adev);
1923 if (r)
1924 return r;
1925
Alex Deucher756e6882015-10-08 00:03:36 -04001926 /* pin cursors */
1927 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1928 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1929
1930 if (amdgpu_crtc->cursor_bo) {
1931 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1932 r = amdgpu_bo_reserve(aobj, false);
1933 if (r == 0) {
1934 r = amdgpu_bo_pin(aobj,
1935 AMDGPU_GEM_DOMAIN_VRAM,
1936 &amdgpu_crtc->cursor_addr);
1937 if (r != 0)
1938 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1939 amdgpu_bo_unreserve(aobj);
1940 }
1941 }
1942 }
1943
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001944 /* blat the mode back in */
1945 if (fbcon) {
1946 drm_helper_resume_force_mode(dev);
1947 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04001948 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001949 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1950 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1951 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04001952 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001953 }
1954
1955 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04001956
1957 /*
1958 * Most of the connector probing functions try to acquire runtime pm
1959 * refs to ensure that the GPU is powered on when connector polling is
1960 * performed. Since we're calling this from a runtime PM callback,
1961 * trying to acquire rpm refs will cause us to deadlock.
1962 *
1963 * Since we're guaranteed to be holding the rpm lock, it's safe to
1964 * temporarily disable the rpm helpers so this doesn't deadlock us.
1965 */
1966#ifdef CONFIG_PM
1967 dev->dev->power.disable_depth++;
1968#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05001969 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04001970#ifdef CONFIG_PM
1971 dev->dev->power.disable_depth--;
1972#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001973
1974 if (fbcon) {
1975 amdgpu_fbdev_set_suspend(adev, 0);
1976 console_unlock();
1977 }
1978
1979 return 0;
1980}
1981
Chunming Zhou63fbf422016-07-15 11:19:20 +08001982static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
1983{
1984 int i;
1985 bool asic_hang = false;
1986
1987 for (i = 0; i < adev->num_ip_blocks; i++) {
1988 if (!adev->ip_block_status[i].valid)
1989 continue;
1990 if (adev->ip_blocks[i].funcs->check_soft_reset)
1991 adev->ip_blocks[i].funcs->check_soft_reset(adev);
1992 if (adev->ip_block_status[i].hang) {
1993 DRM_INFO("IP block:%d is hang!\n", i);
1994 asic_hang = true;
1995 }
1996 }
1997 return asic_hang;
1998}
1999
Chunming Zhoud31a5012016-07-18 10:04:34 +08002000int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
2001{
2002 int i, r = 0;
2003
2004 for (i = 0; i < adev->num_ip_blocks; i++) {
2005 if (!adev->ip_block_status[i].valid)
2006 continue;
Chunming Zhou35d782f2016-07-15 15:57:13 +08002007 if (adev->ip_block_status[i].hang &&
2008 adev->ip_blocks[i].funcs->pre_soft_reset) {
Chunming Zhoud31a5012016-07-18 10:04:34 +08002009 r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
2010 if (r)
2011 return r;
2012 }
2013 }
2014
2015 return 0;
2016}
2017
Chunming Zhou35d782f2016-07-15 15:57:13 +08002018static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2019{
2020 if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang ||
Chunming Zhou35d782f2016-07-15 15:57:13 +08002021 adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
Chunming Zhou35d782f2016-07-15 15:57:13 +08002022 adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang ||
2023 adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) {
2024 DRM_INFO("Some block need full reset!\n");
2025 return true;
2026 }
2027 return false;
2028}
2029
2030static int amdgpu_soft_reset(struct amdgpu_device *adev)
2031{
2032 int i, r = 0;
2033
2034 for (i = 0; i < adev->num_ip_blocks; i++) {
2035 if (!adev->ip_block_status[i].valid)
2036 continue;
2037 if (adev->ip_block_status[i].hang &&
2038 adev->ip_blocks[i].funcs->soft_reset) {
2039 r = adev->ip_blocks[i].funcs->soft_reset(adev);
2040 if (r)
2041 return r;
2042 }
2043 }
2044
2045 return 0;
2046}
2047
2048static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2049{
2050 int i, r = 0;
2051
2052 for (i = 0; i < adev->num_ip_blocks; i++) {
2053 if (!adev->ip_block_status[i].valid)
2054 continue;
2055 if (adev->ip_block_status[i].hang &&
2056 adev->ip_blocks[i].funcs->post_soft_reset)
2057 r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
2058 if (r)
2059 return r;
2060 }
2061
2062 return 0;
2063}
2064
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002065bool amdgpu_need_backup(struct amdgpu_device *adev)
2066{
2067 if (adev->flags & AMD_IS_APU)
2068 return false;
2069
2070 return amdgpu_lockup_timeout > 0 ? true : false;
2071}
2072
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002073static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2074 struct amdgpu_ring *ring,
2075 struct amdgpu_bo *bo,
2076 struct fence **fence)
2077{
2078 uint32_t domain;
2079 int r;
2080
2081 if (!bo->shadow)
2082 return 0;
2083
2084 r = amdgpu_bo_reserve(bo, false);
2085 if (r)
2086 return r;
2087 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2088 /* if bo has been evicted, then no need to recover */
2089 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2090 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2091 NULL, fence, true);
2092 if (r) {
2093 DRM_ERROR("recover page table failed!\n");
2094 goto err;
2095 }
2096 }
2097err:
2098 amdgpu_bo_unreserve(bo);
2099 return r;
2100}
2101
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002102/**
2103 * amdgpu_gpu_reset - reset the asic
2104 *
2105 * @adev: amdgpu device pointer
2106 *
2107 * Attempt the reset the GPU if it has hung (all asics).
2108 * Returns 0 for success or an error on failure.
2109 */
2110int amdgpu_gpu_reset(struct amdgpu_device *adev)
2111{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002112 int i, r;
2113 int resched;
Chunming Zhou35d782f2016-07-15 15:57:13 +08002114 bool need_full_reset;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002115
Chunming Zhou63fbf422016-07-15 11:19:20 +08002116 if (!amdgpu_check_soft_reset(adev)) {
2117 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2118 return 0;
2119 }
2120
Marek Olšákd94aed52015-05-05 21:13:49 +02002121 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002122
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002123 /* block TTM */
2124 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2125
Chunming Zhou0875dc92016-06-12 15:41:58 +08002126 /* block scheduler */
2127 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2128 struct amdgpu_ring *ring = adev->rings[i];
2129
2130 if (!ring)
2131 continue;
2132 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002133 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002134 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002135 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2136 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002137
Chunming Zhou35d782f2016-07-15 15:57:13 +08002138 need_full_reset = amdgpu_need_full_reset(adev);
2139
2140 if (!need_full_reset) {
2141 amdgpu_pre_soft_reset(adev);
2142 r = amdgpu_soft_reset(adev);
2143 amdgpu_post_soft_reset(adev);
2144 if (r || amdgpu_check_soft_reset(adev)) {
2145 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2146 need_full_reset = true;
2147 }
2148 }
2149
2150 if (need_full_reset) {
2151 /* save scratch */
2152 amdgpu_atombios_scratch_regs_save(adev);
2153 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002154
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002155retry:
Chunming Zhou35d782f2016-07-15 15:57:13 +08002156 /* Disable fb access */
2157 if (adev->mode_info.num_crtc) {
2158 struct amdgpu_mode_mc_save save;
2159 amdgpu_display_stop_mc_access(adev, &save);
2160 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2161 }
Chunming Zhouf1aa7e02016-06-28 10:38:50 +08002162
Chunming Zhou35d782f2016-07-15 15:57:13 +08002163 r = amdgpu_asic_reset(adev);
2164 /* post card */
2165 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002166
Chunming Zhou35d782f2016-07-15 15:57:13 +08002167 if (!r) {
2168 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2169 r = amdgpu_resume(adev);
2170 }
2171 /* restore scratch */
2172 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002173 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002174 if (!r) {
Chunming Zhoue72cfd52016-07-27 13:15:20 +08002175 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou1f465082016-06-30 15:02:26 +08002176 r = amdgpu_ib_ring_tests(adev);
2177 if (r) {
2178 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002179 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002180 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002181 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002182 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002183 /**
2184 * recovery vm page tables, since we cannot depend on VRAM is
2185 * consistent after gpu full reset.
2186 */
2187 if (need_full_reset && amdgpu_need_backup(adev)) {
2188 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2189 struct amdgpu_bo *bo, *tmp;
2190 struct fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002191
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002192 DRM_INFO("recover vram bo from shadow\n");
2193 mutex_lock(&adev->shadow_list_lock);
2194 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2195 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2196 if (fence) {
2197 r = fence_wait(fence, false);
2198 if (r) {
2199 WARN(r, "recovery from shadow isn't comleted\n");
2200 break;
2201 }
2202 }
2203
2204 fence_put(fence);
2205 fence = next;
2206 }
2207 mutex_unlock(&adev->shadow_list_lock);
2208 if (fence) {
2209 r = fence_wait(fence, false);
2210 if (r)
2211 WARN(r, "recovery from shadow isn't comleted\n");
2212 }
2213 fence_put(fence);
2214 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002215 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2216 struct amdgpu_ring *ring = adev->rings[i];
2217 if (!ring)
2218 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002219
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002220 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002221 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002222 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002223 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08002224 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002225 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08002226 if (adev->rings[i]) {
2227 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002228 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002229 }
2230 }
2231
2232 drm_helper_resume_force_mode(adev->ddev);
2233
2234 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2235 if (r) {
2236 /* bad news, how to tell it to userspace ? */
2237 dev_info(adev->dev, "GPU reset failed\n");
2238 }
2239
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002240 return r;
2241}
2242
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002243void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2244{
2245 u32 mask;
2246 int ret;
2247
Alex Deuchercd474ba2016-02-04 10:21:23 -05002248 if (amdgpu_pcie_gen_cap)
2249 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2250
2251 if (amdgpu_pcie_lane_cap)
2252 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2253
2254 /* covers APUs as well */
2255 if (pci_is_root_bus(adev->pdev->bus)) {
2256 if (adev->pm.pcie_gen_mask == 0)
2257 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2258 if (adev->pm.pcie_mlw_mask == 0)
2259 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002260 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002261 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05002262
2263 if (adev->pm.pcie_gen_mask == 0) {
2264 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2265 if (!ret) {
2266 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2267 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2268 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2269
2270 if (mask & DRM_PCIE_SPEED_25)
2271 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2272 if (mask & DRM_PCIE_SPEED_50)
2273 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2274 if (mask & DRM_PCIE_SPEED_80)
2275 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2276 } else {
2277 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2278 }
2279 }
2280 if (adev->pm.pcie_mlw_mask == 0) {
2281 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2282 if (!ret) {
2283 switch (mask) {
2284 case 32:
2285 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2286 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2287 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2288 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2289 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2290 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2291 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2292 break;
2293 case 16:
2294 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2295 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2296 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2297 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2298 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2299 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2300 break;
2301 case 12:
2302 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2303 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2304 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2305 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2306 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2307 break;
2308 case 8:
2309 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2310 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2311 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2312 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2313 break;
2314 case 4:
2315 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2316 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2317 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2318 break;
2319 case 2:
2320 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2321 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2322 break;
2323 case 1:
2324 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2325 break;
2326 default:
2327 break;
2328 }
2329 } else {
2330 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002331 }
2332 }
2333}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002334
2335/*
2336 * Debugfs
2337 */
2338int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04002339 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002340 unsigned nfiles)
2341{
2342 unsigned i;
2343
2344 for (i = 0; i < adev->debugfs_count; i++) {
2345 if (adev->debugfs[i].files == files) {
2346 /* Already registered */
2347 return 0;
2348 }
2349 }
2350
2351 i = adev->debugfs_count + 1;
2352 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2353 DRM_ERROR("Reached maximum number of debugfs components.\n");
2354 DRM_ERROR("Report so we increase "
2355 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2356 return -EINVAL;
2357 }
2358 adev->debugfs[adev->debugfs_count].files = files;
2359 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2360 adev->debugfs_count = i;
2361#if defined(CONFIG_DEBUG_FS)
2362 drm_debugfs_create_files(files, nfiles,
2363 adev->ddev->control->debugfs_root,
2364 adev->ddev->control);
2365 drm_debugfs_create_files(files, nfiles,
2366 adev->ddev->primary->debugfs_root,
2367 adev->ddev->primary);
2368#endif
2369 return 0;
2370}
2371
2372static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
2373{
2374#if defined(CONFIG_DEBUG_FS)
2375 unsigned i;
2376
2377 for (i = 0; i < adev->debugfs_count; i++) {
2378 drm_debugfs_remove_files(adev->debugfs[i].files,
2379 adev->debugfs[i].num_files,
2380 adev->ddev->control);
2381 drm_debugfs_remove_files(adev->debugfs[i].files,
2382 adev->debugfs[i].num_files,
2383 adev->ddev->primary);
2384 }
2385#endif
2386}
2387
2388#if defined(CONFIG_DEBUG_FS)
2389
2390static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2391 size_t size, loff_t *pos)
2392{
2393 struct amdgpu_device *adev = f->f_inode->i_private;
2394 ssize_t result = 0;
2395 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04002396 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04002397 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002398
2399 if (size & 0x3 || *pos & 0x3)
2400 return -EINVAL;
2401
Tom St Denisbd122672016-07-28 09:39:22 -04002402 /* are we reading registers for which a PG lock is necessary? */
2403 pm_pg_lock = (*pos >> 23) & 1;
2404
Tom St Denis566281592016-06-27 11:55:07 -04002405 if (*pos & (1ULL << 62)) {
2406 se_bank = (*pos >> 24) & 0x3FF;
2407 sh_bank = (*pos >> 34) & 0x3FF;
2408 instance_bank = (*pos >> 44) & 0x3FF;
2409 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04002410 } else {
2411 use_bank = 0;
2412 }
2413
Tom St Denisbd122672016-07-28 09:39:22 -04002414 *pos &= 0x3FFFF;
2415
Tom St Denis566281592016-06-27 11:55:07 -04002416 if (use_bank) {
2417 if (sh_bank >= adev->gfx.config.max_sh_per_se ||
2418 se_bank >= adev->gfx.config.max_shader_engines)
2419 return -EINVAL;
2420 mutex_lock(&adev->grbm_idx_mutex);
2421 amdgpu_gfx_select_se_sh(adev, se_bank,
2422 sh_bank, instance_bank);
2423 }
2424
Tom St Denisbd122672016-07-28 09:39:22 -04002425 if (pm_pg_lock)
2426 mutex_lock(&adev->pm.mutex);
2427
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002428 while (size) {
2429 uint32_t value;
2430
2431 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04002432 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002433
2434 value = RREG32(*pos >> 2);
2435 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04002436 if (r) {
2437 result = r;
2438 goto end;
2439 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002440
2441 result += 4;
2442 buf += 4;
2443 *pos += 4;
2444 size -= 4;
2445 }
2446
Tom St Denis566281592016-06-27 11:55:07 -04002447end:
2448 if (use_bank) {
2449 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2450 mutex_unlock(&adev->grbm_idx_mutex);
2451 }
2452
Tom St Denisbd122672016-07-28 09:39:22 -04002453 if (pm_pg_lock)
2454 mutex_unlock(&adev->pm.mutex);
2455
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002456 return result;
2457}
2458
2459static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2460 size_t size, loff_t *pos)
2461{
2462 struct amdgpu_device *adev = f->f_inode->i_private;
2463 ssize_t result = 0;
2464 int r;
2465
2466 if (size & 0x3 || *pos & 0x3)
2467 return -EINVAL;
2468
2469 while (size) {
2470 uint32_t value;
2471
2472 if (*pos > adev->rmmio_size)
2473 return result;
2474
2475 r = get_user(value, (uint32_t *)buf);
2476 if (r)
2477 return r;
2478
2479 WREG32(*pos >> 2, value);
2480
2481 result += 4;
2482 buf += 4;
2483 *pos += 4;
2484 size -= 4;
2485 }
2486
2487 return result;
2488}
2489
Tom St Denisadcec282016-04-15 13:08:44 -04002490static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
2491 size_t size, loff_t *pos)
2492{
2493 struct amdgpu_device *adev = f->f_inode->i_private;
2494 ssize_t result = 0;
2495 int r;
2496
2497 if (size & 0x3 || *pos & 0x3)
2498 return -EINVAL;
2499
2500 while (size) {
2501 uint32_t value;
2502
2503 value = RREG32_PCIE(*pos >> 2);
2504 r = put_user(value, (uint32_t *)buf);
2505 if (r)
2506 return r;
2507
2508 result += 4;
2509 buf += 4;
2510 *pos += 4;
2511 size -= 4;
2512 }
2513
2514 return result;
2515}
2516
2517static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
2518 size_t size, loff_t *pos)
2519{
2520 struct amdgpu_device *adev = f->f_inode->i_private;
2521 ssize_t result = 0;
2522 int r;
2523
2524 if (size & 0x3 || *pos & 0x3)
2525 return -EINVAL;
2526
2527 while (size) {
2528 uint32_t value;
2529
2530 r = get_user(value, (uint32_t *)buf);
2531 if (r)
2532 return r;
2533
2534 WREG32_PCIE(*pos >> 2, value);
2535
2536 result += 4;
2537 buf += 4;
2538 *pos += 4;
2539 size -= 4;
2540 }
2541
2542 return result;
2543}
2544
2545static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
2546 size_t size, loff_t *pos)
2547{
2548 struct amdgpu_device *adev = f->f_inode->i_private;
2549 ssize_t result = 0;
2550 int r;
2551
2552 if (size & 0x3 || *pos & 0x3)
2553 return -EINVAL;
2554
2555 while (size) {
2556 uint32_t value;
2557
2558 value = RREG32_DIDT(*pos >> 2);
2559 r = put_user(value, (uint32_t *)buf);
2560 if (r)
2561 return r;
2562
2563 result += 4;
2564 buf += 4;
2565 *pos += 4;
2566 size -= 4;
2567 }
2568
2569 return result;
2570}
2571
2572static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
2573 size_t size, loff_t *pos)
2574{
2575 struct amdgpu_device *adev = f->f_inode->i_private;
2576 ssize_t result = 0;
2577 int r;
2578
2579 if (size & 0x3 || *pos & 0x3)
2580 return -EINVAL;
2581
2582 while (size) {
2583 uint32_t value;
2584
2585 r = get_user(value, (uint32_t *)buf);
2586 if (r)
2587 return r;
2588
2589 WREG32_DIDT(*pos >> 2, value);
2590
2591 result += 4;
2592 buf += 4;
2593 *pos += 4;
2594 size -= 4;
2595 }
2596
2597 return result;
2598}
2599
2600static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
2601 size_t size, loff_t *pos)
2602{
2603 struct amdgpu_device *adev = f->f_inode->i_private;
2604 ssize_t result = 0;
2605 int r;
2606
2607 if (size & 0x3 || *pos & 0x3)
2608 return -EINVAL;
2609
2610 while (size) {
2611 uint32_t value;
2612
Tom St Denis6fc0dea2016-08-29 08:39:29 -04002613 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04002614 r = put_user(value, (uint32_t *)buf);
2615 if (r)
2616 return r;
2617
2618 result += 4;
2619 buf += 4;
2620 *pos += 4;
2621 size -= 4;
2622 }
2623
2624 return result;
2625}
2626
2627static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
2628 size_t size, loff_t *pos)
2629{
2630 struct amdgpu_device *adev = f->f_inode->i_private;
2631 ssize_t result = 0;
2632 int r;
2633
2634 if (size & 0x3 || *pos & 0x3)
2635 return -EINVAL;
2636
2637 while (size) {
2638 uint32_t value;
2639
2640 r = get_user(value, (uint32_t *)buf);
2641 if (r)
2642 return r;
2643
Tom St Denis6fc0dea2016-08-29 08:39:29 -04002644 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04002645
2646 result += 4;
2647 buf += 4;
2648 *pos += 4;
2649 size -= 4;
2650 }
2651
2652 return result;
2653}
2654
Tom St Denis1e051412016-06-27 09:57:18 -04002655static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
2656 size_t size, loff_t *pos)
2657{
2658 struct amdgpu_device *adev = f->f_inode->i_private;
2659 ssize_t result = 0;
2660 int r;
2661 uint32_t *config, no_regs = 0;
2662
2663 if (size & 0x3 || *pos & 0x3)
2664 return -EINVAL;
2665
2666 config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
2667 if (!config)
2668 return -ENOMEM;
2669
2670 /* version, increment each time something is added */
Tom St Denise9f11dc2016-08-17 12:00:51 -04002671 config[no_regs++] = 2;
Tom St Denis1e051412016-06-27 09:57:18 -04002672 config[no_regs++] = adev->gfx.config.max_shader_engines;
2673 config[no_regs++] = adev->gfx.config.max_tile_pipes;
2674 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
2675 config[no_regs++] = adev->gfx.config.max_sh_per_se;
2676 config[no_regs++] = adev->gfx.config.max_backends_per_se;
2677 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
2678 config[no_regs++] = adev->gfx.config.max_gprs;
2679 config[no_regs++] = adev->gfx.config.max_gs_threads;
2680 config[no_regs++] = adev->gfx.config.max_hw_contexts;
2681 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
2682 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
2683 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
2684 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
2685 config[no_regs++] = adev->gfx.config.num_tile_pipes;
2686 config[no_regs++] = adev->gfx.config.backend_enable_mask;
2687 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
2688 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
2689 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
2690 config[no_regs++] = adev->gfx.config.num_gpus;
2691 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
2692 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
2693 config[no_regs++] = adev->gfx.config.gb_addr_config;
2694 config[no_regs++] = adev->gfx.config.num_rbs;
2695
Tom St Denis89a8f302016-08-12 15:14:31 -04002696 /* rev==1 */
2697 config[no_regs++] = adev->rev_id;
2698 config[no_regs++] = adev->pg_flags;
2699 config[no_regs++] = adev->cg_flags;
2700
Tom St Denise9f11dc2016-08-17 12:00:51 -04002701 /* rev==2 */
2702 config[no_regs++] = adev->family;
2703 config[no_regs++] = adev->external_rev_id;
2704
Tom St Denis1e051412016-06-27 09:57:18 -04002705 while (size && (*pos < no_regs * 4)) {
2706 uint32_t value;
2707
2708 value = config[*pos >> 2];
2709 r = put_user(value, (uint32_t *)buf);
2710 if (r) {
2711 kfree(config);
2712 return r;
2713 }
2714
2715 result += 4;
2716 buf += 4;
2717 *pos += 4;
2718 size -= 4;
2719 }
2720
2721 kfree(config);
2722 return result;
2723}
2724
2725
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002726static const struct file_operations amdgpu_debugfs_regs_fops = {
2727 .owner = THIS_MODULE,
2728 .read = amdgpu_debugfs_regs_read,
2729 .write = amdgpu_debugfs_regs_write,
2730 .llseek = default_llseek
2731};
Tom St Denisadcec282016-04-15 13:08:44 -04002732static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
2733 .owner = THIS_MODULE,
2734 .read = amdgpu_debugfs_regs_didt_read,
2735 .write = amdgpu_debugfs_regs_didt_write,
2736 .llseek = default_llseek
2737};
2738static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
2739 .owner = THIS_MODULE,
2740 .read = amdgpu_debugfs_regs_pcie_read,
2741 .write = amdgpu_debugfs_regs_pcie_write,
2742 .llseek = default_llseek
2743};
2744static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
2745 .owner = THIS_MODULE,
2746 .read = amdgpu_debugfs_regs_smc_read,
2747 .write = amdgpu_debugfs_regs_smc_write,
2748 .llseek = default_llseek
2749};
2750
Tom St Denis1e051412016-06-27 09:57:18 -04002751static const struct file_operations amdgpu_debugfs_gca_config_fops = {
2752 .owner = THIS_MODULE,
2753 .read = amdgpu_debugfs_gca_config_read,
2754 .llseek = default_llseek
2755};
2756
Tom St Denisadcec282016-04-15 13:08:44 -04002757static const struct file_operations *debugfs_regs[] = {
2758 &amdgpu_debugfs_regs_fops,
2759 &amdgpu_debugfs_regs_didt_fops,
2760 &amdgpu_debugfs_regs_pcie_fops,
2761 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04002762 &amdgpu_debugfs_gca_config_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04002763};
2764
2765static const char *debugfs_regs_names[] = {
2766 "amdgpu_regs",
2767 "amdgpu_regs_didt",
2768 "amdgpu_regs_pcie",
2769 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04002770 "amdgpu_gca_config",
Tom St Denisadcec282016-04-15 13:08:44 -04002771};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002772
2773static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
2774{
2775 struct drm_minor *minor = adev->ddev->primary;
2776 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04002777 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002778
Tom St Denisadcec282016-04-15 13:08:44 -04002779 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
2780 ent = debugfs_create_file(debugfs_regs_names[i],
2781 S_IFREG | S_IRUGO, root,
2782 adev, debugfs_regs[i]);
2783 if (IS_ERR(ent)) {
2784 for (j = 0; j < i; j++) {
2785 debugfs_remove(adev->debugfs_regs[i]);
2786 adev->debugfs_regs[i] = NULL;
2787 }
2788 return PTR_ERR(ent);
2789 }
2790
2791 if (!i)
2792 i_size_write(ent->d_inode, adev->rmmio_size);
2793 adev->debugfs_regs[i] = ent;
2794 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002795
2796 return 0;
2797}
2798
2799static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
2800{
Tom St Denisadcec282016-04-15 13:08:44 -04002801 unsigned i;
2802
2803 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
2804 if (adev->debugfs_regs[i]) {
2805 debugfs_remove(adev->debugfs_regs[i]);
2806 adev->debugfs_regs[i] = NULL;
2807 }
2808 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002809}
2810
2811int amdgpu_debugfs_init(struct drm_minor *minor)
2812{
2813 return 0;
2814}
2815
2816void amdgpu_debugfs_cleanup(struct drm_minor *minor)
2817{
2818}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06002819#else
2820static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
2821{
2822 return 0;
2823}
2824static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002825#endif