blob: c6ce0c586d593c3beb2bef38fd3b2af85cb8e53e [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050043#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080044#ifdef CONFIG_DRM_AMDGPU_SI
45#include "si.h"
46#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040047#ifdef CONFIG_DRM_AMDGPU_CIK
48#include "cik.h"
49#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040050#include "vi.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040051#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080052#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080053#include <linux/firmware.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040054
55static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
56static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
57
58static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080059 "TAHITI",
60 "PITCAIRN",
61 "VERDE",
62 "OLAND",
63 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040064 "BONAIRE",
65 "KAVERI",
66 "KABINI",
67 "HAWAII",
68 "MULLINS",
69 "TOPAZ",
70 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080071 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040072 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040073 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040074 "POLARIS10",
75 "POLARIS11",
Junwei Zhangc4642a42016-12-14 15:32:28 -050076 "POLARIS12",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040077 "LAST",
78};
79
80bool amdgpu_device_is_px(struct drm_device *dev)
81{
82 struct amdgpu_device *adev = dev->dev_private;
83
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080084 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040085 return true;
86 return false;
87}
88
89/*
90 * MMIO register access helper functions.
91 */
92uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
93 bool always_indirect)
94{
Tom St Denisf4b373f2016-05-31 08:02:27 -040095 uint32_t ret;
96
Xiangliang Yubc992ba2017-01-12 14:29:34 +080097 if (amdgpu_sriov_runtime(adev)) {
98 BUG_ON(in_interrupt());
99 return amdgpu_virt_kiq_rreg(adev, reg);
100 }
101
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400102 if ((reg * 4) < adev->rmmio_size && !always_indirect)
Tom St Denisf4b373f2016-05-31 08:02:27 -0400103 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400104 else {
105 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400106
107 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
108 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
109 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
110 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400111 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400112 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
113 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400114}
115
116void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
117 bool always_indirect)
118{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400119 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800120
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800121 if (amdgpu_sriov_runtime(adev)) {
122 BUG_ON(in_interrupt());
123 return amdgpu_virt_kiq_wreg(adev, reg, v);
124 }
125
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400126 if ((reg * 4) < adev->rmmio_size && !always_indirect)
127 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
128 else {
129 unsigned long flags;
130
131 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
132 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
133 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
134 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
135 }
136}
137
138u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
139{
140 if ((reg * 4) < adev->rio_mem_size)
141 return ioread32(adev->rio_mem + (reg * 4));
142 else {
143 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
144 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
145 }
146}
147
148void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
149{
150
151 if ((reg * 4) < adev->rio_mem_size)
152 iowrite32(v, adev->rio_mem + (reg * 4));
153 else {
154 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
155 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
156 }
157}
158
159/**
160 * amdgpu_mm_rdoorbell - read a doorbell dword
161 *
162 * @adev: amdgpu_device pointer
163 * @index: doorbell index
164 *
165 * Returns the value in the doorbell aperture at the
166 * requested doorbell index (CIK).
167 */
168u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
169{
170 if (index < adev->doorbell.num_doorbells) {
171 return readl(adev->doorbell.ptr + index);
172 } else {
173 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
174 return 0;
175 }
176}
177
178/**
179 * amdgpu_mm_wdoorbell - write a doorbell dword
180 *
181 * @adev: amdgpu_device pointer
182 * @index: doorbell index
183 * @v: value to write
184 *
185 * Writes @v to the doorbell aperture at the
186 * requested doorbell index (CIK).
187 */
188void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
189{
190 if (index < adev->doorbell.num_doorbells) {
191 writel(v, adev->doorbell.ptr + index);
192 } else {
193 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
194 }
195}
196
197/**
198 * amdgpu_invalid_rreg - dummy reg read function
199 *
200 * @adev: amdgpu device pointer
201 * @reg: offset of register
202 *
203 * Dummy register read function. Used for register blocks
204 * that certain asics don't have (all asics).
205 * Returns the value in the register.
206 */
207static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
208{
209 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
210 BUG();
211 return 0;
212}
213
214/**
215 * amdgpu_invalid_wreg - dummy reg write function
216 *
217 * @adev: amdgpu device pointer
218 * @reg: offset of register
219 * @v: value to write to the register
220 *
221 * Dummy register read function. Used for register blocks
222 * that certain asics don't have (all asics).
223 */
224static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
225{
226 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
227 reg, v);
228 BUG();
229}
230
231/**
232 * amdgpu_block_invalid_rreg - dummy reg read function
233 *
234 * @adev: amdgpu device pointer
235 * @block: offset of instance
236 * @reg: offset of register
237 *
238 * Dummy register read function. Used for register blocks
239 * that certain asics don't have (all asics).
240 * Returns the value in the register.
241 */
242static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
243 uint32_t block, uint32_t reg)
244{
245 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
246 reg, block);
247 BUG();
248 return 0;
249}
250
251/**
252 * amdgpu_block_invalid_wreg - dummy reg write function
253 *
254 * @adev: amdgpu device pointer
255 * @block: offset of instance
256 * @reg: offset of register
257 * @v: value to write to the register
258 *
259 * Dummy register read function. Used for register blocks
260 * that certain asics don't have (all asics).
261 */
262static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
263 uint32_t block,
264 uint32_t reg, uint32_t v)
265{
266 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
267 reg, block, v);
268 BUG();
269}
270
271static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
272{
273 int r;
274
275 if (adev->vram_scratch.robj == NULL) {
276 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
Alex Deucher857d9132015-08-27 00:14:16 -0400277 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
Christian König03f48dd2016-08-15 17:00:22 +0200278 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
279 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
Christian König72d76682015-09-03 17:34:59 +0200280 NULL, NULL, &adev->vram_scratch.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400281 if (r) {
282 return r;
283 }
284 }
285
286 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
287 if (unlikely(r != 0))
288 return r;
289 r = amdgpu_bo_pin(adev->vram_scratch.robj,
290 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
291 if (r) {
292 amdgpu_bo_unreserve(adev->vram_scratch.robj);
293 return r;
294 }
295 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
296 (void **)&adev->vram_scratch.ptr);
297 if (r)
298 amdgpu_bo_unpin(adev->vram_scratch.robj);
299 amdgpu_bo_unreserve(adev->vram_scratch.robj);
300
301 return r;
302}
303
304static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
305{
306 int r;
307
308 if (adev->vram_scratch.robj == NULL) {
309 return;
310 }
311 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
312 if (likely(r == 0)) {
313 amdgpu_bo_kunmap(adev->vram_scratch.robj);
314 amdgpu_bo_unpin(adev->vram_scratch.robj);
315 amdgpu_bo_unreserve(adev->vram_scratch.robj);
316 }
317 amdgpu_bo_unref(&adev->vram_scratch.robj);
318}
319
320/**
321 * amdgpu_program_register_sequence - program an array of registers.
322 *
323 * @adev: amdgpu_device pointer
324 * @registers: pointer to the register array
325 * @array_size: size of the register array
326 *
327 * Programs an array or registers with and and or masks.
328 * This is a helper for setting golden registers.
329 */
330void amdgpu_program_register_sequence(struct amdgpu_device *adev,
331 const u32 *registers,
332 const u32 array_size)
333{
334 u32 tmp, reg, and_mask, or_mask;
335 int i;
336
337 if (array_size % 3)
338 return;
339
340 for (i = 0; i < array_size; i +=3) {
341 reg = registers[i + 0];
342 and_mask = registers[i + 1];
343 or_mask = registers[i + 2];
344
345 if (and_mask == 0xffffffff) {
346 tmp = or_mask;
347 } else {
348 tmp = RREG32(reg);
349 tmp &= ~and_mask;
350 tmp |= or_mask;
351 }
352 WREG32(reg, tmp);
353 }
354}
355
356void amdgpu_pci_config_reset(struct amdgpu_device *adev)
357{
358 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
359}
360
361/*
362 * GPU doorbell aperture helpers function.
363 */
364/**
365 * amdgpu_doorbell_init - Init doorbell driver information.
366 *
367 * @adev: amdgpu_device pointer
368 *
369 * Init doorbell driver information (CIK)
370 * Returns 0 on success, error on failure.
371 */
372static int amdgpu_doorbell_init(struct amdgpu_device *adev)
373{
374 /* doorbell bar mapping */
375 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
376 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
377
Christian Königedf600d2016-05-03 15:54:54 +0200378 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400379 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
380 if (adev->doorbell.num_doorbells == 0)
381 return -EINVAL;
382
383 adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
384 if (adev->doorbell.ptr == NULL) {
385 return -ENOMEM;
386 }
387 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
388 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
389
390 return 0;
391}
392
393/**
394 * amdgpu_doorbell_fini - Tear down doorbell driver information.
395 *
396 * @adev: amdgpu_device pointer
397 *
398 * Tear down doorbell driver information (CIK)
399 */
400static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
401{
402 iounmap(adev->doorbell.ptr);
403 adev->doorbell.ptr = NULL;
404}
405
406/**
407 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
408 * setup amdkfd
409 *
410 * @adev: amdgpu_device pointer
411 * @aperture_base: output returning doorbell aperture base physical address
412 * @aperture_size: output returning doorbell aperture size in bytes
413 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
414 *
415 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
416 * takes doorbells required for its own rings and reports the setup to amdkfd.
417 * amdgpu reserved doorbells are at the start of the doorbell aperture.
418 */
419void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
420 phys_addr_t *aperture_base,
421 size_t *aperture_size,
422 size_t *start_offset)
423{
424 /*
425 * The first num_doorbells are used by amdgpu.
426 * amdkfd takes whatever's left in the aperture.
427 */
428 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
429 *aperture_base = adev->doorbell.base;
430 *aperture_size = adev->doorbell.size;
431 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
432 } else {
433 *aperture_base = 0;
434 *aperture_size = 0;
435 *start_offset = 0;
436 }
437}
438
439/*
440 * amdgpu_wb_*()
441 * Writeback is the the method by which the the GPU updates special pages
442 * in memory with the status of certain GPU events (fences, ring pointers,
443 * etc.).
444 */
445
446/**
447 * amdgpu_wb_fini - Disable Writeback and free memory
448 *
449 * @adev: amdgpu_device pointer
450 *
451 * Disables Writeback and frees the Writeback memory (all asics).
452 * Used at driver shutdown.
453 */
454static void amdgpu_wb_fini(struct amdgpu_device *adev)
455{
456 if (adev->wb.wb_obj) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400457 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
458 &adev->wb.gpu_addr,
459 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400460 adev->wb.wb_obj = NULL;
461 }
462}
463
464/**
465 * amdgpu_wb_init- Init Writeback driver info and allocate memory
466 *
467 * @adev: amdgpu_device pointer
468 *
469 * Disables Writeback and frees the Writeback memory (all asics).
470 * Used at driver startup.
471 * Returns 0 on success or an -error on failure.
472 */
473static int amdgpu_wb_init(struct amdgpu_device *adev)
474{
475 int r;
476
477 if (adev->wb.wb_obj == NULL) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400478 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
479 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
480 &adev->wb.wb_obj, &adev->wb.gpu_addr,
481 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400482 if (r) {
483 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
484 return r;
485 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400486
487 adev->wb.num_wb = AMDGPU_MAX_WB;
488 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
489
490 /* clear wb memory */
491 memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
492 }
493
494 return 0;
495}
496
497/**
498 * amdgpu_wb_get - Allocate a wb entry
499 *
500 * @adev: amdgpu_device pointer
501 * @wb: wb index
502 *
503 * Allocate a wb slot for use by the driver (all asics).
504 * Returns 0 on success or -EINVAL on failure.
505 */
506int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
507{
508 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
509 if (offset < adev->wb.num_wb) {
510 __set_bit(offset, adev->wb.used);
511 *wb = offset;
512 return 0;
513 } else {
514 return -EINVAL;
515 }
516}
517
518/**
519 * amdgpu_wb_free - Free a wb entry
520 *
521 * @adev: amdgpu_device pointer
522 * @wb: wb index
523 *
524 * Free a wb slot allocated for use by the driver (all asics)
525 */
526void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
527{
528 if (wb < adev->wb.num_wb)
529 __clear_bit(wb, adev->wb.used);
530}
531
532/**
533 * amdgpu_vram_location - try to find VRAM location
534 * @adev: amdgpu device structure holding all necessary informations
535 * @mc: memory controller structure holding memory informations
536 * @base: base address at which to put VRAM
537 *
538 * Function will place try to place VRAM at base address provided
539 * as parameter (which is so far either PCI aperture address or
540 * for IGP TOM base address).
541 *
542 * If there is not enough space to fit the unvisible VRAM in the 32bits
543 * address space then we limit the VRAM size to the aperture.
544 *
545 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
546 * this shouldn't be a problem as we are using the PCI aperture as a reference.
547 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
548 * not IGP.
549 *
550 * Note: we use mc_vram_size as on some board we need to program the mc to
551 * cover the whole aperture even if VRAM size is inferior to aperture size
552 * Novell bug 204882 + along with lots of ubuntu ones
553 *
554 * Note: when limiting vram it's safe to overwritte real_vram_size because
555 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
556 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
557 * ones)
558 *
559 * Note: IGP TOM addr should be the same as the aperture addr, we don't
560 * explicitly check for that thought.
561 *
562 * FIXME: when reducing VRAM size align new size on power of 2.
563 */
564void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
565{
566 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
567
568 mc->vram_start = base;
569 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
570 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
571 mc->real_vram_size = mc->aper_size;
572 mc->mc_vram_size = mc->aper_size;
573 }
574 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
575 if (limit && limit < mc->real_vram_size)
576 mc->real_vram_size = limit;
577 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
578 mc->mc_vram_size >> 20, mc->vram_start,
579 mc->vram_end, mc->real_vram_size >> 20);
580}
581
582/**
583 * amdgpu_gtt_location - try to find GTT location
584 * @adev: amdgpu device structure holding all necessary informations
585 * @mc: memory controller structure holding memory informations
586 *
587 * Function will place try to place GTT before or after VRAM.
588 *
589 * If GTT size is bigger than space left then we ajust GTT size.
590 * Thus function will never fails.
591 *
592 * FIXME: when reducing GTT size align new size on power of 2.
593 */
594void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
595{
596 u64 size_af, size_bf;
597
598 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
599 size_bf = mc->vram_start & ~mc->gtt_base_align;
600 if (size_bf > size_af) {
601 if (mc->gtt_size > size_bf) {
602 dev_warn(adev->dev, "limiting GTT\n");
603 mc->gtt_size = size_bf;
604 }
605 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
606 } else {
607 if (mc->gtt_size > size_af) {
608 dev_warn(adev->dev, "limiting GTT\n");
609 mc->gtt_size = size_af;
610 }
611 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
612 }
613 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
614 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
615 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
616}
617
618/*
619 * GPU helpers function.
620 */
621/**
622 * amdgpu_card_posted - check if the hw has already been initialized
623 *
624 * @adev: amdgpu_device pointer
625 *
626 * Check if the asic has been initialized (all asics).
627 * Used at driver startup.
628 * Returns true if initialized or false if not.
629 */
630bool amdgpu_card_posted(struct amdgpu_device *adev)
631{
632 uint32_t reg;
633
634 /* then check MEM_SIZE, in case the crtcs are off */
635 reg = RREG32(mmCONFIG_MEMSIZE);
636
637 if (reg)
638 return true;
639
640 return false;
641
642}
643
Monk Liubec86372016-09-14 19:38:08 +0800644static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
645{
646 if (amdgpu_sriov_vf(adev))
647 return false;
648
649 if (amdgpu_passthrough(adev)) {
Monk Liu1da2c322016-11-11 11:24:29 +0800650 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
651 * some old smc fw still need driver do vPost otherwise gpu hang, while
652 * those smc fw version above 22.15 doesn't have this flaw, so we force
653 * vpost executed for smc version below 22.15
Monk Liubec86372016-09-14 19:38:08 +0800654 */
655 if (adev->asic_type == CHIP_FIJI) {
656 int err;
657 uint32_t fw_ver;
658 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
659 /* force vPost if error occured */
660 if (err)
661 return true;
662
663 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
Monk Liu1da2c322016-11-11 11:24:29 +0800664 if (fw_ver < 0x00160e00)
665 return true;
Monk Liubec86372016-09-14 19:38:08 +0800666 }
Monk Liubec86372016-09-14 19:38:08 +0800667 }
Monk Liu1da2c322016-11-11 11:24:29 +0800668 return !amdgpu_card_posted(adev);
Monk Liubec86372016-09-14 19:38:08 +0800669}
670
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400671/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400672 * amdgpu_dummy_page_init - init dummy page used by the driver
673 *
674 * @adev: amdgpu_device pointer
675 *
676 * Allocate the dummy page used by the driver (all asics).
677 * This dummy page is used by the driver as a filler for gart entries
678 * when pages are taken out of the GART
679 * Returns 0 on sucess, -ENOMEM on failure.
680 */
681int amdgpu_dummy_page_init(struct amdgpu_device *adev)
682{
683 if (adev->dummy_page.page)
684 return 0;
685 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
686 if (adev->dummy_page.page == NULL)
687 return -ENOMEM;
688 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
689 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
690 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
691 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
692 __free_page(adev->dummy_page.page);
693 adev->dummy_page.page = NULL;
694 return -ENOMEM;
695 }
696 return 0;
697}
698
699/**
700 * amdgpu_dummy_page_fini - free dummy page used by the driver
701 *
702 * @adev: amdgpu_device pointer
703 *
704 * Frees the dummy page used by the driver (all asics).
705 */
706void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
707{
708 if (adev->dummy_page.page == NULL)
709 return;
710 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
711 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
712 __free_page(adev->dummy_page.page);
713 adev->dummy_page.page = NULL;
714}
715
716
717/* ATOM accessor methods */
718/*
719 * ATOM is an interpreted byte code stored in tables in the vbios. The
720 * driver registers callbacks to access registers and the interpreter
721 * in the driver parses the tables and executes then to program specific
722 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
723 * atombios.h, and atom.c
724 */
725
726/**
727 * cail_pll_read - read PLL register
728 *
729 * @info: atom card_info pointer
730 * @reg: PLL register offset
731 *
732 * Provides a PLL register accessor for the atom interpreter (r4xx+).
733 * Returns the value of the PLL register.
734 */
735static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
736{
737 return 0;
738}
739
740/**
741 * cail_pll_write - write PLL register
742 *
743 * @info: atom card_info pointer
744 * @reg: PLL register offset
745 * @val: value to write to the pll register
746 *
747 * Provides a PLL register accessor for the atom interpreter (r4xx+).
748 */
749static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
750{
751
752}
753
754/**
755 * cail_mc_read - read MC (Memory Controller) register
756 *
757 * @info: atom card_info pointer
758 * @reg: MC register offset
759 *
760 * Provides an MC register accessor for the atom interpreter (r4xx+).
761 * Returns the value of the MC register.
762 */
763static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
764{
765 return 0;
766}
767
768/**
769 * cail_mc_write - write MC (Memory Controller) register
770 *
771 * @info: atom card_info pointer
772 * @reg: MC register offset
773 * @val: value to write to the pll register
774 *
775 * Provides a MC register accessor for the atom interpreter (r4xx+).
776 */
777static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
778{
779
780}
781
782/**
783 * cail_reg_write - write MMIO register
784 *
785 * @info: atom card_info pointer
786 * @reg: MMIO register offset
787 * @val: value to write to the pll register
788 *
789 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
790 */
791static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
792{
793 struct amdgpu_device *adev = info->dev->dev_private;
794
795 WREG32(reg, val);
796}
797
798/**
799 * cail_reg_read - read MMIO register
800 *
801 * @info: atom card_info pointer
802 * @reg: MMIO register offset
803 *
804 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
805 * Returns the value of the MMIO register.
806 */
807static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
808{
809 struct amdgpu_device *adev = info->dev->dev_private;
810 uint32_t r;
811
812 r = RREG32(reg);
813 return r;
814}
815
816/**
817 * cail_ioreg_write - write IO register
818 *
819 * @info: atom card_info pointer
820 * @reg: IO register offset
821 * @val: value to write to the pll register
822 *
823 * Provides a IO register accessor for the atom interpreter (r4xx+).
824 */
825static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
826{
827 struct amdgpu_device *adev = info->dev->dev_private;
828
829 WREG32_IO(reg, val);
830}
831
832/**
833 * cail_ioreg_read - read IO register
834 *
835 * @info: atom card_info pointer
836 * @reg: IO register offset
837 *
838 * Provides an IO register accessor for the atom interpreter (r4xx+).
839 * Returns the value of the IO register.
840 */
841static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
842{
843 struct amdgpu_device *adev = info->dev->dev_private;
844 uint32_t r;
845
846 r = RREG32_IO(reg);
847 return r;
848}
849
850/**
851 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
852 *
853 * @adev: amdgpu_device pointer
854 *
855 * Frees the driver info and register access callbacks for the ATOM
856 * interpreter (r4xx+).
857 * Called at driver shutdown.
858 */
859static void amdgpu_atombios_fini(struct amdgpu_device *adev)
860{
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800861 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400862 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800863 kfree(adev->mode_info.atom_context->iio);
864 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400865 kfree(adev->mode_info.atom_context);
866 adev->mode_info.atom_context = NULL;
867 kfree(adev->mode_info.atom_card_info);
868 adev->mode_info.atom_card_info = NULL;
869}
870
871/**
872 * amdgpu_atombios_init - init the driver info and callbacks for atombios
873 *
874 * @adev: amdgpu_device pointer
875 *
876 * Initializes the driver info and register access callbacks for the
877 * ATOM interpreter (r4xx+).
878 * Returns 0 on sucess, -ENOMEM on failure.
879 * Called at driver startup.
880 */
881static int amdgpu_atombios_init(struct amdgpu_device *adev)
882{
883 struct card_info *atom_card_info =
884 kzalloc(sizeof(struct card_info), GFP_KERNEL);
885
886 if (!atom_card_info)
887 return -ENOMEM;
888
889 adev->mode_info.atom_card_info = atom_card_info;
890 atom_card_info->dev = adev->ddev;
891 atom_card_info->reg_read = cail_reg_read;
892 atom_card_info->reg_write = cail_reg_write;
893 /* needed for iio ops */
894 if (adev->rio_mem) {
895 atom_card_info->ioreg_read = cail_ioreg_read;
896 atom_card_info->ioreg_write = cail_ioreg_write;
897 } else {
Amber Linb64a18c2017-01-04 08:06:58 -0500898 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400899 atom_card_info->ioreg_read = cail_reg_read;
900 atom_card_info->ioreg_write = cail_reg_write;
901 }
902 atom_card_info->mc_read = cail_mc_read;
903 atom_card_info->mc_write = cail_mc_write;
904 atom_card_info->pll_read = cail_pll_read;
905 atom_card_info->pll_write = cail_pll_write;
906
907 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
908 if (!adev->mode_info.atom_context) {
909 amdgpu_atombios_fini(adev);
910 return -ENOMEM;
911 }
912
913 mutex_init(&adev->mode_info.atom_context->mutex);
914 amdgpu_atombios_scratch_regs_init(adev);
915 amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
916 return 0;
917}
918
919/* if we get transitioned to only one device, take VGA back */
920/**
921 * amdgpu_vga_set_decode - enable/disable vga decode
922 *
923 * @cookie: amdgpu_device pointer
924 * @state: enable/disable vga decode
925 *
926 * Enable/disable vga decode (all asics).
927 * Returns VGA resource flags.
928 */
929static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
930{
931 struct amdgpu_device *adev = cookie;
932 amdgpu_asic_set_vga_state(adev, state);
933 if (state)
934 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
935 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
936 else
937 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
938}
939
940/**
941 * amdgpu_check_pot_argument - check that argument is a power of two
942 *
943 * @arg: value to check
944 *
945 * Validates that a certain argument is a power of two (all asics).
946 * Returns true if argument is valid.
947 */
948static bool amdgpu_check_pot_argument(int arg)
949{
950 return (arg & (arg - 1)) == 0;
951}
952
953/**
954 * amdgpu_check_arguments - validate module params
955 *
956 * @adev: amdgpu_device pointer
957 *
958 * Validates certain module parameters and updates
959 * the associated values used by the driver (all asics).
960 */
961static void amdgpu_check_arguments(struct amdgpu_device *adev)
962{
Chunming Zhou5b011232015-12-10 17:34:33 +0800963 if (amdgpu_sched_jobs < 4) {
964 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
965 amdgpu_sched_jobs);
966 amdgpu_sched_jobs = 4;
967 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
968 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
969 amdgpu_sched_jobs);
970 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
971 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400972
973 if (amdgpu_gart_size != -1) {
Christian Königc4e1a132016-03-17 16:25:15 +0100974 /* gtt size must be greater or equal to 32M */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400975 if (amdgpu_gart_size < 32) {
976 dev_warn(adev->dev, "gart size (%d) too small\n",
977 amdgpu_gart_size);
978 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400979 }
980 }
981
982 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
983 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
984 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -0400985 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400986 }
987
988 if (amdgpu_vm_size < 1) {
989 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
990 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -0400991 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400992 }
993
994 /*
995 * Max GPUVM size for Cayman, SI and CI are 40 bits.
996 */
997 if (amdgpu_vm_size > 1024) {
998 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
999 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -04001000 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001001 }
1002
1003 /* defines number of bits in page table versus page directory,
1004 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1005 * page table and the remaining bits are in the page directory */
1006 if (amdgpu_vm_block_size == -1) {
1007
1008 /* Total bits covered by PD + PTs */
1009 unsigned bits = ilog2(amdgpu_vm_size) + 18;
1010
1011 /* Make sure the PD is 4K in size up to 8GB address space.
1012 Above that split equal between PD and PTs */
1013 if (amdgpu_vm_size <= 8)
1014 amdgpu_vm_block_size = bits - 9;
1015 else
1016 amdgpu_vm_block_size = (bits + 3) / 2;
1017
1018 } else if (amdgpu_vm_block_size < 9) {
1019 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1020 amdgpu_vm_block_size);
1021 amdgpu_vm_block_size = 9;
1022 }
1023
1024 if (amdgpu_vm_block_size > 24 ||
1025 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1026 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1027 amdgpu_vm_block_size);
1028 amdgpu_vm_block_size = 9;
1029 }
Christian König6a7f76e2016-08-24 15:51:49 +02001030
jimqu526bae32016-11-07 09:53:10 +08001031 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1032 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
Christian König6a7f76e2016-08-24 15:51:49 +02001033 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1034 amdgpu_vram_page_split);
1035 amdgpu_vram_page_split = 1024;
1036 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001037}
1038
1039/**
1040 * amdgpu_switcheroo_set_state - set switcheroo state
1041 *
1042 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001043 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001044 *
1045 * Callback for the switcheroo driver. Suspends or resumes the
1046 * the asics before or after it is powered up using ACPI methods.
1047 */
1048static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1049{
1050 struct drm_device *dev = pci_get_drvdata(pdev);
1051
1052 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1053 return;
1054
1055 if (state == VGA_SWITCHEROO_ON) {
1056 unsigned d3_delay = dev->pdev->d3_delay;
1057
1058 printk(KERN_INFO "amdgpu: switched on\n");
1059 /* don't suspend or resume card normally */
1060 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1061
Alex Deucher810ddc32016-08-23 13:25:49 -04001062 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001063
1064 dev->pdev->d3_delay = d3_delay;
1065
1066 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1067 drm_kms_helper_poll_enable(dev);
1068 } else {
1069 printk(KERN_INFO "amdgpu: switched off\n");
1070 drm_kms_helper_poll_disable(dev);
1071 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001072 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001073 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1074 }
1075}
1076
1077/**
1078 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1079 *
1080 * @pdev: pci dev pointer
1081 *
1082 * Callback for the switcheroo driver. Check of the switcheroo
1083 * state can be changed.
1084 * Returns true if the state can be changed, false if not.
1085 */
1086static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1087{
1088 struct drm_device *dev = pci_get_drvdata(pdev);
1089
1090 /*
1091 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1092 * locking inversion with the driver load path. And the access here is
1093 * completely racy anyway. So don't bother with locking for now.
1094 */
1095 return dev->open_count == 0;
1096}
1097
1098static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1099 .set_gpu_state = amdgpu_switcheroo_set_state,
1100 .reprobe = NULL,
1101 .can_switch = amdgpu_switcheroo_can_switch,
1102};
1103
1104int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001105 enum amd_ip_block_type block_type,
1106 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001107{
1108 int i, r = 0;
1109
1110 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001111 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001112 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001113 if (adev->ip_blocks[i].version->type == block_type) {
1114 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1115 state);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001116 if (r)
1117 return r;
Alex Deuchera225bf12016-06-23 11:48:30 -04001118 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001119 }
1120 }
1121 return r;
1122}
1123
1124int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001125 enum amd_ip_block_type block_type,
1126 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001127{
1128 int i, r = 0;
1129
1130 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001131 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001132 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001133 if (adev->ip_blocks[i].version->type == block_type) {
1134 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1135 state);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001136 if (r)
1137 return r;
Alex Deuchera225bf12016-06-23 11:48:30 -04001138 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001139 }
1140 }
1141 return r;
1142}
1143
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001144void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1145{
1146 int i;
1147
1148 for (i = 0; i < adev->num_ip_blocks; i++) {
1149 if (!adev->ip_blocks[i].status.valid)
1150 continue;
1151 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1152 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1153 }
1154}
1155
Alex Deucher5dbbb602016-06-23 11:41:04 -04001156int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1157 enum amd_ip_block_type block_type)
1158{
1159 int i, r;
1160
1161 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001162 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001163 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001164 if (adev->ip_blocks[i].version->type == block_type) {
1165 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001166 if (r)
1167 return r;
1168 break;
1169 }
1170 }
1171 return 0;
1172
1173}
1174
1175bool amdgpu_is_idle(struct amdgpu_device *adev,
1176 enum amd_ip_block_type block_type)
1177{
1178 int i;
1179
1180 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001181 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001182 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001183 if (adev->ip_blocks[i].version->type == block_type)
1184 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001185 }
1186 return true;
1187
1188}
1189
Alex Deuchera1255102016-10-13 17:41:13 -04001190struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1191 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001192{
1193 int i;
1194
1195 for (i = 0; i < adev->num_ip_blocks; i++)
Alex Deuchera1255102016-10-13 17:41:13 -04001196 if (adev->ip_blocks[i].version->type == type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001197 return &adev->ip_blocks[i];
1198
1199 return NULL;
1200}
1201
1202/**
1203 * amdgpu_ip_block_version_cmp
1204 *
1205 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001206 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001207 * @major: major version
1208 * @minor: minor version
1209 *
1210 * return 0 if equal or greater
1211 * return 1 if smaller or the ip_block doesn't exist
1212 */
1213int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001214 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001215 u32 major, u32 minor)
1216{
Alex Deuchera1255102016-10-13 17:41:13 -04001217 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001218
Alex Deuchera1255102016-10-13 17:41:13 -04001219 if (ip_block && ((ip_block->version->major > major) ||
1220 ((ip_block->version->major == major) &&
1221 (ip_block->version->minor >= minor))))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001222 return 0;
1223
1224 return 1;
1225}
1226
Alex Deuchera1255102016-10-13 17:41:13 -04001227/**
1228 * amdgpu_ip_block_add
1229 *
1230 * @adev: amdgpu_device pointer
1231 * @ip_block_version: pointer to the IP to add
1232 *
1233 * Adds the IP block driver information to the collection of IPs
1234 * on the asic.
1235 */
1236int amdgpu_ip_block_add(struct amdgpu_device *adev,
1237 const struct amdgpu_ip_block_version *ip_block_version)
1238{
1239 if (!ip_block_version)
1240 return -EINVAL;
1241
1242 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1243
1244 return 0;
1245}
1246
Alex Deucher483ef982016-09-30 12:43:04 -04001247static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
Emily Deng9accf2f2016-08-10 16:01:25 +08001248{
1249 adev->enable_virtual_display = false;
1250
1251 if (amdgpu_virtual_display) {
1252 struct drm_device *ddev = adev->ddev;
1253 const char *pci_address_name = pci_name(ddev->pdev);
Emily Deng0f663562016-09-30 13:02:18 -04001254 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
Emily Deng9accf2f2016-08-10 16:01:25 +08001255
1256 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1257 pciaddstr_tmp = pciaddstr;
Emily Deng0f663562016-09-30 13:02:18 -04001258 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1259 pciaddname = strsep(&pciaddname_tmp, ",");
Yintian Tao967de2a2017-01-22 15:16:51 +08001260 if (!strcmp("all", pciaddname)
1261 || !strcmp(pci_address_name, pciaddname)) {
Emily Deng0f663562016-09-30 13:02:18 -04001262 long num_crtc;
1263 int res = -1;
1264
Emily Deng9accf2f2016-08-10 16:01:25 +08001265 adev->enable_virtual_display = true;
Emily Deng0f663562016-09-30 13:02:18 -04001266
1267 if (pciaddname_tmp)
1268 res = kstrtol(pciaddname_tmp, 10,
1269 &num_crtc);
1270
1271 if (!res) {
1272 if (num_crtc < 1)
1273 num_crtc = 1;
1274 if (num_crtc > 6)
1275 num_crtc = 6;
1276 adev->mode_info.num_crtc = num_crtc;
1277 } else {
1278 adev->mode_info.num_crtc = 1;
1279 }
Emily Deng9accf2f2016-08-10 16:01:25 +08001280 break;
1281 }
1282 }
1283
Emily Deng0f663562016-09-30 13:02:18 -04001284 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1285 amdgpu_virtual_display, pci_address_name,
1286 adev->enable_virtual_display, adev->mode_info.num_crtc);
Emily Deng9accf2f2016-08-10 16:01:25 +08001287
1288 kfree(pciaddstr);
1289 }
1290}
1291
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001292static int amdgpu_early_init(struct amdgpu_device *adev)
1293{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001294 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001295
Alex Deucher483ef982016-09-30 12:43:04 -04001296 amdgpu_device_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001297
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001298 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001299 case CHIP_TOPAZ:
1300 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001301 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001302 case CHIP_POLARIS11:
1303 case CHIP_POLARIS10:
Junwei Zhangc4642a42016-12-14 15:32:28 -05001304 case CHIP_POLARIS12:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001305 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001306 case CHIP_STONEY:
1307 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001308 adev->family = AMDGPU_FAMILY_CZ;
1309 else
1310 adev->family = AMDGPU_FAMILY_VI;
1311
1312 r = vi_set_ip_blocks(adev);
1313 if (r)
1314 return r;
1315 break;
Ken Wang33f34802016-01-21 17:29:41 +08001316#ifdef CONFIG_DRM_AMDGPU_SI
1317 case CHIP_VERDE:
1318 case CHIP_TAHITI:
1319 case CHIP_PITCAIRN:
1320 case CHIP_OLAND:
1321 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001322 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001323 r = si_set_ip_blocks(adev);
1324 if (r)
1325 return r;
1326 break;
1327#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001328#ifdef CONFIG_DRM_AMDGPU_CIK
1329 case CHIP_BONAIRE:
1330 case CHIP_HAWAII:
1331 case CHIP_KAVERI:
1332 case CHIP_KABINI:
1333 case CHIP_MULLINS:
1334 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1335 adev->family = AMDGPU_FAMILY_CI;
1336 else
1337 adev->family = AMDGPU_FAMILY_KV;
1338
1339 r = cik_set_ip_blocks(adev);
1340 if (r)
1341 return r;
1342 break;
1343#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001344 default:
1345 /* FIXME: not supported yet */
1346 return -EINVAL;
1347 }
1348
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001349 if (amdgpu_sriov_vf(adev)) {
1350 r = amdgpu_virt_request_full_gpu(adev, true);
1351 if (r)
1352 return r;
1353 }
1354
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001355 for (i = 0; i < adev->num_ip_blocks; i++) {
1356 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1357 DRM_ERROR("disabled ip block: %d\n", i);
Alex Deuchera1255102016-10-13 17:41:13 -04001358 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001359 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001360 if (adev->ip_blocks[i].version->funcs->early_init) {
1361 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001362 if (r == -ENOENT) {
Alex Deuchera1255102016-10-13 17:41:13 -04001363 adev->ip_blocks[i].status.valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001364 } else if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001365 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1366 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001367 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001368 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001369 adev->ip_blocks[i].status.valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001370 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001371 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001372 adev->ip_blocks[i].status.valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001373 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001374 }
1375 }
1376
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001377 adev->cg_flags &= amdgpu_cg_mask;
1378 adev->pg_flags &= amdgpu_pg_mask;
1379
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001380 return 0;
1381}
1382
1383static int amdgpu_init(struct amdgpu_device *adev)
1384{
1385 int i, r;
1386
1387 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001388 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001389 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001390 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001391 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001392 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1393 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001394 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001395 }
Alex Deuchera1255102016-10-13 17:41:13 -04001396 adev->ip_blocks[i].status.sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001397 /* need to do gmc hw init early so we can allocate gpu mem */
Alex Deuchera1255102016-10-13 17:41:13 -04001398 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001399 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001400 if (r) {
1401 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001402 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001403 }
Alex Deuchera1255102016-10-13 17:41:13 -04001404 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001405 if (r) {
1406 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001407 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001408 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001409 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001410 if (r) {
1411 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001412 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001413 }
Alex Deuchera1255102016-10-13 17:41:13 -04001414 adev->ip_blocks[i].status.hw = true;
Monk Liu24936642017-01-09 15:54:32 +08001415
1416 /* right after GMC hw init, we create CSA */
1417 if (amdgpu_sriov_vf(adev)) {
1418 r = amdgpu_allocate_static_csa(adev);
1419 if (r) {
1420 DRM_ERROR("allocate CSA failed %d\n", r);
1421 return r;
1422 }
1423 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001424 }
1425 }
1426
1427 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001428 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001429 continue;
1430 /* gmc hw init is done early */
Alex Deuchera1255102016-10-13 17:41:13 -04001431 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001432 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001433 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001434 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001435 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1436 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001437 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001438 }
Alex Deuchera1255102016-10-13 17:41:13 -04001439 adev->ip_blocks[i].status.hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001440 }
1441
1442 return 0;
1443}
1444
1445static int amdgpu_late_init(struct amdgpu_device *adev)
1446{
1447 int i = 0, r;
1448
1449 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001450 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001451 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001452 if (adev->ip_blocks[i].version->funcs->late_init) {
1453 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001454 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001455 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1456 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001457 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001458 }
Alex Deuchera1255102016-10-13 17:41:13 -04001459 adev->ip_blocks[i].status.late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001460 }
Alex Deucher4a446d52016-10-07 14:48:18 -04001461 /* skip CG for VCE/UVD, it's handled specially */
Alex Deuchera1255102016-10-13 17:41:13 -04001462 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1463 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
Alex Deucher4a446d52016-10-07 14:48:18 -04001464 /* enable clockgating to save power */
Alex Deuchera1255102016-10-13 17:41:13 -04001465 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1466 AMD_CG_STATE_GATE);
Alex Deucher4a446d52016-10-07 14:48:18 -04001467 if (r) {
1468 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001469 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher4a446d52016-10-07 14:48:18 -04001470 return r;
1471 }
Arindam Nathb0b00ff2016-10-07 19:01:37 +05301472 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001473 }
1474
1475 return 0;
1476}
1477
1478static int amdgpu_fini(struct amdgpu_device *adev)
1479{
1480 int i, r;
1481
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001482 /* need to disable SMC first */
1483 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001484 if (!adev->ip_blocks[i].status.hw)
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001485 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001486 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001487 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
Alex Deuchera1255102016-10-13 17:41:13 -04001488 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1489 AMD_CG_STATE_UNGATE);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001490 if (r) {
1491 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001492 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001493 return r;
1494 }
Alex Deuchera1255102016-10-13 17:41:13 -04001495 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001496 /* XXX handle errors */
1497 if (r) {
1498 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001499 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001500 }
Alex Deuchera1255102016-10-13 17:41:13 -04001501 adev->ip_blocks[i].status.hw = false;
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001502 break;
1503 }
1504 }
1505
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001506 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001507 if (!adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001508 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001509 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001510 amdgpu_wb_fini(adev);
1511 amdgpu_vram_scratch_fini(adev);
1512 }
Rex Zhu8201a672016-11-24 21:44:44 +08001513
1514 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1515 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1516 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1517 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1518 AMD_CG_STATE_UNGATE);
1519 if (r) {
1520 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1521 adev->ip_blocks[i].version->funcs->name, r);
1522 return r;
1523 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001524 }
Rex Zhu8201a672016-11-24 21:44:44 +08001525
Alex Deuchera1255102016-10-13 17:41:13 -04001526 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001527 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001528 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001529 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1530 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001531 }
Rex Zhu8201a672016-11-24 21:44:44 +08001532
Alex Deuchera1255102016-10-13 17:41:13 -04001533 adev->ip_blocks[i].status.hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001534 }
1535
1536 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001537 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001538 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001539 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001540 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001541 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001542 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1543 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001544 }
Alex Deuchera1255102016-10-13 17:41:13 -04001545 adev->ip_blocks[i].status.sw = false;
1546 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001547 }
1548
Monk Liua6dcfd92016-05-19 14:36:34 +08001549 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001550 if (!adev->ip_blocks[i].status.late_initialized)
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001551 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001552 if (adev->ip_blocks[i].version->funcs->late_fini)
1553 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1554 adev->ip_blocks[i].status.late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001555 }
1556
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001557 if (amdgpu_sriov_vf(adev)) {
Monk Liu24936642017-01-09 15:54:32 +08001558 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001559 amdgpu_virt_release_full_gpu(adev, false);
1560 }
Monk Liu24936642017-01-09 15:54:32 +08001561
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001562 return 0;
1563}
1564
Alex Deucherfaefba92016-12-06 10:38:29 -05001565int amdgpu_suspend(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001566{
1567 int i, r;
1568
Xiangliang Yue941ea92017-01-18 12:47:55 +08001569 if (amdgpu_sriov_vf(adev))
1570 amdgpu_virt_request_full_gpu(adev, false);
1571
Flora Cuic5a93a22016-02-26 10:45:25 +08001572 /* ungate SMC block first */
1573 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1574 AMD_CG_STATE_UNGATE);
1575 if (r) {
1576 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1577 }
1578
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001579 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001580 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001581 continue;
1582 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001583 if (i != AMD_IP_BLOCK_TYPE_SMC) {
Alex Deuchera1255102016-10-13 17:41:13 -04001584 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1585 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001586 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001587 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1588 adev->ip_blocks[i].version->funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001589 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001590 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001591 /* XXX handle errors */
Alex Deuchera1255102016-10-13 17:41:13 -04001592 r = adev->ip_blocks[i].version->funcs->suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001593 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001594 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001595 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1596 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001597 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001598 }
1599
Xiangliang Yue941ea92017-01-18 12:47:55 +08001600 if (amdgpu_sriov_vf(adev))
1601 amdgpu_virt_release_full_gpu(adev, false);
1602
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001603 return 0;
1604}
1605
1606static int amdgpu_resume(struct amdgpu_device *adev)
1607{
1608 int i, r;
1609
1610 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001611 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001612 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001613 r = adev->ip_blocks[i].version->funcs->resume(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001614 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001615 DRM_ERROR("resume of IP block <%s> failed %d\n",
1616 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001617 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001618 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001619 }
1620
1621 return 0;
1622}
1623
Monk Liu4e99a442016-03-31 13:26:59 +08001624static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04001625{
Monk Liu4e99a442016-03-31 13:26:59 +08001626 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
Xiangliang Yu5a5099c2017-01-09 18:06:57 -05001627 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
Andres Rodriguez048765a2016-06-11 02:51:32 -04001628}
1629
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001630/**
1631 * amdgpu_device_init - initialize the driver
1632 *
1633 * @adev: amdgpu_device pointer
1634 * @pdev: drm dev pointer
1635 * @pdev: pci dev pointer
1636 * @flags: driver flags
1637 *
1638 * Initializes the driver info and hw (all asics).
1639 * Returns 0 for success or an error on failure.
1640 * Called at driver startup.
1641 */
1642int amdgpu_device_init(struct amdgpu_device *adev,
1643 struct drm_device *ddev,
1644 struct pci_dev *pdev,
1645 uint32_t flags)
1646{
1647 int r, i;
1648 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02001649 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001650
1651 adev->shutdown = false;
1652 adev->dev = &pdev->dev;
1653 adev->ddev = ddev;
1654 adev->pdev = pdev;
1655 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08001656 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001657 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1658 adev->mc.gtt_size = 512 * 1024 * 1024;
1659 adev->accel_working = false;
1660 adev->num_rings = 0;
1661 adev->mman.buffer_funcs = NULL;
1662 adev->mman.buffer_funcs_ring = NULL;
1663 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01001664 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001665 adev->gart.gart_funcs = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001666 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001667
1668 adev->smc_rreg = &amdgpu_invalid_rreg;
1669 adev->smc_wreg = &amdgpu_invalid_wreg;
1670 adev->pcie_rreg = &amdgpu_invalid_rreg;
1671 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08001672 adev->pciep_rreg = &amdgpu_invalid_rreg;
1673 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001674 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1675 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1676 adev->didt_rreg = &amdgpu_invalid_rreg;
1677 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08001678 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1679 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001680 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1681 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1682
Rex Zhuccdbb202016-06-08 12:47:41 +08001683
Alex Deucher3e39ab92015-06-05 15:04:33 -04001684 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1685 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1686 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001687
1688 /* mutex initialization are all done here so we
1689 * can recall function without having locking issues */
Christian König8d0a7ce2015-11-03 20:58:50 +01001690 mutex_init(&adev->vm_manager.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001691 atomic_set(&adev->irq.ih.lock, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001692 mutex_init(&adev->pm.mutex);
1693 mutex_init(&adev->gfx.gpu_clock_mutex);
1694 mutex_init(&adev->srbm_mutex);
1695 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001696 mutex_init(&adev->mn_lock);
1697 hash_init(adev->mn_hash);
1698
1699 amdgpu_check_arguments(adev);
1700
1701 /* Registers mapping */
1702 /* TODO: block userspace mapping of io register */
1703 spin_lock_init(&adev->mmio_idx_lock);
1704 spin_lock_init(&adev->smc_idx_lock);
1705 spin_lock_init(&adev->pcie_idx_lock);
1706 spin_lock_init(&adev->uvd_ctx_idx_lock);
1707 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08001708 spin_lock_init(&adev->gc_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001709 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02001710 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001711
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08001712 INIT_LIST_HEAD(&adev->shadow_list);
1713 mutex_init(&adev->shadow_list_lock);
1714
Chunming Zhou5c1354b2016-08-30 16:13:10 +08001715 INIT_LIST_HEAD(&adev->gtt_list);
1716 spin_lock_init(&adev->gtt_list_lock);
1717
Ken Wangda69c1612016-01-21 19:08:55 +08001718 if (adev->asic_type >= CHIP_BONAIRE) {
1719 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1720 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1721 } else {
1722 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1723 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1724 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08001725
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001726 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1727 if (adev->rmmio == NULL) {
1728 return -ENOMEM;
1729 }
1730 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1731 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1732
Ken Wangda69c1612016-01-21 19:08:55 +08001733 if (adev->asic_type >= CHIP_BONAIRE)
1734 /* doorbell bar mapping */
1735 amdgpu_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001736
1737 /* io port mapping */
1738 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1739 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1740 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1741 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1742 break;
1743 }
1744 }
1745 if (adev->rio_mem == NULL)
Amber Linb64a18c2017-01-04 08:06:58 -05001746 DRM_INFO("PCI I/O BAR is not found.\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001747
1748 /* early init functions */
1749 r = amdgpu_early_init(adev);
1750 if (r)
1751 return r;
1752
1753 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1754 /* this will fail for cards that aren't VGA class devices, just
1755 * ignore it */
1756 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1757
1758 if (amdgpu_runtime_pm == 1)
1759 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04001760 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001761 runtime = true;
1762 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1763 if (runtime)
1764 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1765
1766 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04001767 if (!amdgpu_get_bios(adev)) {
1768 r = -EINVAL;
1769 goto failed;
1770 }
Nils Wallméniusf7e9e9f2016-12-14 21:52:45 +01001771
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001772 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001773 if (r) {
1774 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001775 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001776 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001777
Monk Liu4e99a442016-03-31 13:26:59 +08001778 /* detect if we are with an SRIOV vbios */
1779 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04001780
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001781 /* Post card if necessary */
Monk Liubec86372016-09-14 19:38:08 +08001782 if (amdgpu_vpost_needed(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001783 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08001784 dev_err(adev->dev, "no vBIOS found\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001785 r = -EINVAL;
1786 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001787 }
Monk Liubec86372016-09-14 19:38:08 +08001788 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08001789 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1790 if (r) {
1791 dev_err(adev->dev, "gpu post error!\n");
1792 goto failed;
1793 }
1794 } else {
1795 DRM_INFO("GPU post is not needed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001796 }
1797
1798 /* Initialize clocks */
1799 r = amdgpu_atombios_get_clock_info(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001800 if (r) {
1801 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001802 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001803 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001804 /* init i2c buses */
1805 amdgpu_atombios_i2c_init(adev);
1806
1807 /* Fence driver */
1808 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001809 if (r) {
1810 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001811 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001812 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001813
1814 /* init the mode config */
1815 drm_mode_config_init(adev->ddev);
1816
1817 r = amdgpu_init(adev);
1818 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05001819 dev_err(adev->dev, "amdgpu_init failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001820 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04001821 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001822 }
1823
1824 adev->accel_working = true;
1825
Marek Olšák95844d22016-08-17 23:49:27 +02001826 /* Initialize the buffer migration limit. */
1827 if (amdgpu_moverate >= 0)
1828 max_MBps = amdgpu_moverate;
1829 else
1830 max_MBps = 8; /* Allow 8 MB/s. */
1831 /* Get a log2 for easy divisions. */
1832 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
1833
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001834 amdgpu_fbdev_init(adev);
1835
1836 r = amdgpu_ib_pool_init(adev);
1837 if (r) {
1838 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Alex Deucher83ba1262016-06-03 18:21:41 -04001839 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001840 }
1841
1842 r = amdgpu_ib_ring_tests(adev);
1843 if (r)
1844 DRM_ERROR("ib ring test failed (%d).\n", r);
1845
1846 r = amdgpu_gem_debugfs_init(adev);
1847 if (r) {
1848 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1849 }
1850
1851 r = amdgpu_debugfs_regs_init(adev);
1852 if (r) {
1853 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1854 }
1855
Huang Rui50ab2532016-06-12 15:51:09 +08001856 r = amdgpu_debugfs_firmware_init(adev);
1857 if (r) {
1858 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1859 return r;
1860 }
1861
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001862 if ((amdgpu_testing & 1)) {
1863 if (adev->accel_working)
1864 amdgpu_test_moves(adev);
1865 else
1866 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
1867 }
1868 if ((amdgpu_testing & 2)) {
1869 if (adev->accel_working)
1870 amdgpu_test_syncing(adev);
1871 else
1872 DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
1873 }
1874 if (amdgpu_benchmarking) {
1875 if (adev->accel_working)
1876 amdgpu_benchmark(adev, amdgpu_benchmarking);
1877 else
1878 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
1879 }
1880
1881 /* enable clockgating, etc. after ib tests, etc. since some blocks require
1882 * explicit gating rather than handling it automatically.
1883 */
1884 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001885 if (r) {
1886 dev_err(adev->dev, "amdgpu_late_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001887 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001888 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001889
1890 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04001891
1892failed:
1893 if (runtime)
1894 vga_switcheroo_fini_domain_pm_ops(adev->dev);
1895 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001896}
1897
1898static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
1899
1900/**
1901 * amdgpu_device_fini - tear down the driver
1902 *
1903 * @adev: amdgpu_device pointer
1904 *
1905 * Tear down the driver info (all asics).
1906 * Called at driver shutdown.
1907 */
1908void amdgpu_device_fini(struct amdgpu_device *adev)
1909{
1910 int r;
1911
1912 DRM_INFO("amdgpu: finishing device.\n");
1913 adev->shutdown = true;
Grazvydas Ignotasa951ed82016-09-25 23:34:48 +03001914 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001915 /* evict vram memory */
1916 amdgpu_bo_evict_vram(adev);
1917 amdgpu_ib_pool_fini(adev);
1918 amdgpu_fence_driver_fini(adev);
1919 amdgpu_fbdev_fini(adev);
1920 r = amdgpu_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001921 adev->accel_working = false;
1922 /* free i2c buses */
1923 amdgpu_i2c_fini(adev);
1924 amdgpu_atombios_fini(adev);
1925 kfree(adev->bios);
1926 adev->bios = NULL;
1927 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04001928 if (adev->flags & AMD_IS_PX)
1929 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001930 vga_client_register(adev->pdev, NULL, NULL, NULL);
1931 if (adev->rio_mem)
1932 pci_iounmap(adev->pdev, adev->rio_mem);
1933 adev->rio_mem = NULL;
1934 iounmap(adev->rmmio);
1935 adev->rmmio = NULL;
Ken Wangda69c1612016-01-21 19:08:55 +08001936 if (adev->asic_type >= CHIP_BONAIRE)
1937 amdgpu_doorbell_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001938 amdgpu_debugfs_regs_cleanup(adev);
1939 amdgpu_debugfs_remove_files(adev);
1940}
1941
1942
1943/*
1944 * Suspend & resume.
1945 */
1946/**
Alex Deucher810ddc32016-08-23 13:25:49 -04001947 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001948 *
1949 * @pdev: drm dev pointer
1950 * @state: suspend state
1951 *
1952 * Puts the hw in the suspend state (all asics).
1953 * Returns 0 for success or an error on failure.
1954 * Called at driver suspend.
1955 */
Alex Deucher810ddc32016-08-23 13:25:49 -04001956int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001957{
1958 struct amdgpu_device *adev;
1959 struct drm_crtc *crtc;
1960 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04001961 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001962
1963 if (dev == NULL || dev->dev_private == NULL) {
1964 return -ENODEV;
1965 }
1966
1967 adev = dev->dev_private;
1968
1969 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1970 return 0;
1971
1972 drm_kms_helper_poll_disable(dev);
1973
1974 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04001975 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001976 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1977 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1978 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04001979 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001980
Alex Deucher756e6882015-10-08 00:03:36 -04001981 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001982 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04001983 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001984 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
1985 struct amdgpu_bo *robj;
1986
Alex Deucher756e6882015-10-08 00:03:36 -04001987 if (amdgpu_crtc->cursor_bo) {
1988 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1989 r = amdgpu_bo_reserve(aobj, false);
1990 if (r == 0) {
1991 amdgpu_bo_unpin(aobj);
1992 amdgpu_bo_unreserve(aobj);
1993 }
1994 }
1995
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001996 if (rfb == NULL || rfb->obj == NULL) {
1997 continue;
1998 }
1999 robj = gem_to_amdgpu_bo(rfb->obj);
2000 /* don't unpin kernel fb objects */
2001 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2002 r = amdgpu_bo_reserve(robj, false);
2003 if (r == 0) {
2004 amdgpu_bo_unpin(robj);
2005 amdgpu_bo_unreserve(robj);
2006 }
2007 }
2008 }
2009 /* evict vram memory */
2010 amdgpu_bo_evict_vram(adev);
2011
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002012 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002013
2014 r = amdgpu_suspend(adev);
2015
Alex Deuchera0a71e42016-10-10 12:41:36 -04002016 /* evict remaining vram memory
2017 * This second call to evict vram is to evict the gart page table
2018 * using the CPU.
2019 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002020 amdgpu_bo_evict_vram(adev);
2021
Alex Deuchere695e772016-10-19 14:40:58 -04002022 amdgpu_atombios_scratch_regs_save(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002023 pci_save_state(dev->pdev);
2024 if (suspend) {
2025 /* Shut down the device */
2026 pci_disable_device(dev->pdev);
2027 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08002028 } else {
2029 r = amdgpu_asic_reset(adev);
2030 if (r)
2031 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002032 }
2033
2034 if (fbcon) {
2035 console_lock();
2036 amdgpu_fbdev_set_suspend(adev, 1);
2037 console_unlock();
2038 }
2039 return 0;
2040}
2041
2042/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002043 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002044 *
2045 * @pdev: drm dev pointer
2046 *
2047 * Bring the hw back to operating state (all asics).
2048 * Returns 0 for success or an error on failure.
2049 * Called at driver resume.
2050 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002051int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002052{
2053 struct drm_connector *connector;
2054 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04002055 struct drm_crtc *crtc;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002056 int r;
2057
2058 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2059 return 0;
2060
jimqu74b0b152016-09-07 17:09:12 +08002061 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002062 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08002063
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002064 if (resume) {
2065 pci_set_power_state(dev->pdev, PCI_D0);
2066 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08002067 r = pci_enable_device(dev->pdev);
2068 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002069 if (fbcon)
2070 console_unlock();
jimqu74b0b152016-09-07 17:09:12 +08002071 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002072 }
2073 }
Alex Deuchere695e772016-10-19 14:40:58 -04002074 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002075
2076 /* post card */
jimqu74b0b152016-09-07 17:09:12 +08002077 if (!amdgpu_card_posted(adev) || !resume) {
2078 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2079 if (r)
2080 DRM_ERROR("amdgpu asic init failed\n");
2081 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002082
2083 r = amdgpu_resume(adev);
Flora Cuica198522016-02-04 15:10:08 +08002084 if (r)
2085 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002086
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002087 amdgpu_fence_driver_resume(adev);
2088
Flora Cuica198522016-02-04 15:10:08 +08002089 if (resume) {
2090 r = amdgpu_ib_ring_tests(adev);
2091 if (r)
2092 DRM_ERROR("ib ring test failed (%d).\n", r);
2093 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002094
2095 r = amdgpu_late_init(adev);
2096 if (r)
2097 return r;
2098
Alex Deucher756e6882015-10-08 00:03:36 -04002099 /* pin cursors */
2100 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2101 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2102
2103 if (amdgpu_crtc->cursor_bo) {
2104 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2105 r = amdgpu_bo_reserve(aobj, false);
2106 if (r == 0) {
2107 r = amdgpu_bo_pin(aobj,
2108 AMDGPU_GEM_DOMAIN_VRAM,
2109 &amdgpu_crtc->cursor_addr);
2110 if (r != 0)
2111 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2112 amdgpu_bo_unreserve(aobj);
2113 }
2114 }
2115 }
2116
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002117 /* blat the mode back in */
2118 if (fbcon) {
2119 drm_helper_resume_force_mode(dev);
2120 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002121 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002122 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2123 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2124 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002125 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002126 }
2127
2128 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002129
2130 /*
2131 * Most of the connector probing functions try to acquire runtime pm
2132 * refs to ensure that the GPU is powered on when connector polling is
2133 * performed. Since we're calling this from a runtime PM callback,
2134 * trying to acquire rpm refs will cause us to deadlock.
2135 *
2136 * Since we're guaranteed to be holding the rpm lock, it's safe to
2137 * temporarily disable the rpm helpers so this doesn't deadlock us.
2138 */
2139#ifdef CONFIG_PM
2140 dev->dev->power.disable_depth++;
2141#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05002142 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002143#ifdef CONFIG_PM
2144 dev->dev->power.disable_depth--;
2145#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002146
2147 if (fbcon) {
2148 amdgpu_fbdev_set_suspend(adev, 0);
2149 console_unlock();
2150 }
2151
2152 return 0;
2153}
2154
Chunming Zhou63fbf422016-07-15 11:19:20 +08002155static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2156{
2157 int i;
2158 bool asic_hang = false;
2159
2160 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002161 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002162 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002163 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2164 adev->ip_blocks[i].status.hang =
2165 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2166 if (adev->ip_blocks[i].status.hang) {
2167 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
Chunming Zhou63fbf422016-07-15 11:19:20 +08002168 asic_hang = true;
2169 }
2170 }
2171 return asic_hang;
2172}
2173
Baoyou Xie4d446652016-09-18 22:09:35 +08002174static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002175{
2176 int i, r = 0;
2177
2178 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002179 if (!adev->ip_blocks[i].status.valid)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002180 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002181 if (adev->ip_blocks[i].status.hang &&
2182 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2183 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
Chunming Zhoud31a5012016-07-18 10:04:34 +08002184 if (r)
2185 return r;
2186 }
2187 }
2188
2189 return 0;
2190}
2191
Chunming Zhou35d782f2016-07-15 15:57:13 +08002192static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2193{
Alex Deucherda146d32016-10-13 16:07:03 -04002194 int i;
2195
2196 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002197 if (!adev->ip_blocks[i].status.valid)
Alex Deucherda146d32016-10-13 16:07:03 -04002198 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002199 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2200 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2201 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2202 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2203 if (adev->ip_blocks[i].status.hang) {
Alex Deucherda146d32016-10-13 16:07:03 -04002204 DRM_INFO("Some block need full reset!\n");
2205 return true;
2206 }
2207 }
Chunming Zhou35d782f2016-07-15 15:57:13 +08002208 }
2209 return false;
2210}
2211
2212static int amdgpu_soft_reset(struct amdgpu_device *adev)
2213{
2214 int i, r = 0;
2215
2216 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002217 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002218 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002219 if (adev->ip_blocks[i].status.hang &&
2220 adev->ip_blocks[i].version->funcs->soft_reset) {
2221 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002222 if (r)
2223 return r;
2224 }
2225 }
2226
2227 return 0;
2228}
2229
2230static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2231{
2232 int i, r = 0;
2233
2234 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002235 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002236 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002237 if (adev->ip_blocks[i].status.hang &&
2238 adev->ip_blocks[i].version->funcs->post_soft_reset)
2239 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002240 if (r)
2241 return r;
2242 }
2243
2244 return 0;
2245}
2246
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002247bool amdgpu_need_backup(struct amdgpu_device *adev)
2248{
2249 if (adev->flags & AMD_IS_APU)
2250 return false;
2251
2252 return amdgpu_lockup_timeout > 0 ? true : false;
2253}
2254
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002255static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2256 struct amdgpu_ring *ring,
2257 struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +01002258 struct dma_fence **fence)
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002259{
2260 uint32_t domain;
2261 int r;
2262
2263 if (!bo->shadow)
2264 return 0;
2265
2266 r = amdgpu_bo_reserve(bo, false);
2267 if (r)
2268 return r;
2269 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2270 /* if bo has been evicted, then no need to recover */
2271 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2272 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2273 NULL, fence, true);
2274 if (r) {
2275 DRM_ERROR("recover page table failed!\n");
2276 goto err;
2277 }
2278 }
2279err:
2280 amdgpu_bo_unreserve(bo);
2281 return r;
2282}
2283
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002284/**
2285 * amdgpu_gpu_reset - reset the asic
2286 *
2287 * @adev: amdgpu device pointer
2288 *
2289 * Attempt the reset the GPU if it has hung (all asics).
2290 * Returns 0 for success or an error on failure.
2291 */
2292int amdgpu_gpu_reset(struct amdgpu_device *adev)
2293{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002294 int i, r;
2295 int resched;
Chunming Zhou35d782f2016-07-15 15:57:13 +08002296 bool need_full_reset;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002297
Xiangliang Yufb140b22016-12-17 22:48:57 +08002298 if (amdgpu_sriov_vf(adev))
2299 return 0;
2300
Chunming Zhou63fbf422016-07-15 11:19:20 +08002301 if (!amdgpu_check_soft_reset(adev)) {
2302 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2303 return 0;
2304 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002305
Marek Olšákd94aed52015-05-05 21:13:49 +02002306 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002307
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002308 /* block TTM */
2309 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2310
Chunming Zhou0875dc92016-06-12 15:41:58 +08002311 /* block scheduler */
2312 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2313 struct amdgpu_ring *ring = adev->rings[i];
2314
2315 if (!ring)
2316 continue;
2317 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002318 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002319 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002320 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2321 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002322
Chunming Zhou35d782f2016-07-15 15:57:13 +08002323 need_full_reset = amdgpu_need_full_reset(adev);
2324
2325 if (!need_full_reset) {
2326 amdgpu_pre_soft_reset(adev);
2327 r = amdgpu_soft_reset(adev);
2328 amdgpu_post_soft_reset(adev);
2329 if (r || amdgpu_check_soft_reset(adev)) {
2330 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2331 need_full_reset = true;
2332 }
2333 }
2334
2335 if (need_full_reset) {
Chunming Zhou35d782f2016-07-15 15:57:13 +08002336 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002337
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002338retry:
Chunming Zhou35d782f2016-07-15 15:57:13 +08002339 /* Disable fb access */
2340 if (adev->mode_info.num_crtc) {
2341 struct amdgpu_mode_mc_save save;
2342 amdgpu_display_stop_mc_access(adev, &save);
2343 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2344 }
Alex Deuchere695e772016-10-19 14:40:58 -04002345 amdgpu_atombios_scratch_regs_save(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002346 r = amdgpu_asic_reset(adev);
Alex Deuchere695e772016-10-19 14:40:58 -04002347 amdgpu_atombios_scratch_regs_restore(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002348 /* post card */
2349 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002350
Chunming Zhou35d782f2016-07-15 15:57:13 +08002351 if (!r) {
2352 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2353 r = amdgpu_resume(adev);
2354 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002355 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002356 if (!r) {
Chunming Zhoue72cfd52016-07-27 13:15:20 +08002357 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002358 if (need_full_reset && amdgpu_need_backup(adev)) {
2359 r = amdgpu_ttm_recover_gart(adev);
2360 if (r)
2361 DRM_ERROR("gart recovery failed!!!\n");
2362 }
Chunming Zhou1f465082016-06-30 15:02:26 +08002363 r = amdgpu_ib_ring_tests(adev);
2364 if (r) {
2365 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002366 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002367 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002368 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002369 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002370 /**
2371 * recovery vm page tables, since we cannot depend on VRAM is
2372 * consistent after gpu full reset.
2373 */
2374 if (need_full_reset && amdgpu_need_backup(adev)) {
2375 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2376 struct amdgpu_bo *bo, *tmp;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002377 struct dma_fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002378
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002379 DRM_INFO("recover vram bo from shadow\n");
2380 mutex_lock(&adev->shadow_list_lock);
2381 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2382 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2383 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002384 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002385 if (r) {
2386 WARN(r, "recovery from shadow isn't comleted\n");
2387 break;
2388 }
2389 }
2390
Chris Wilsonf54d1862016-10-25 13:00:45 +01002391 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002392 fence = next;
2393 }
2394 mutex_unlock(&adev->shadow_list_lock);
2395 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002396 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002397 if (r)
2398 WARN(r, "recovery from shadow isn't comleted\n");
2399 }
Chris Wilsonf54d1862016-10-25 13:00:45 +01002400 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002401 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002402 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2403 struct amdgpu_ring *ring = adev->rings[i];
2404 if (!ring)
2405 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002406
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002407 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002408 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002409 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002410 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08002411 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002412 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08002413 if (adev->rings[i]) {
2414 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002415 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002416 }
2417 }
2418
2419 drm_helper_resume_force_mode(adev->ddev);
2420
2421 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2422 if (r) {
2423 /* bad news, how to tell it to userspace ? */
2424 dev_info(adev->dev, "GPU reset failed\n");
2425 }
2426
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002427 return r;
2428}
2429
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002430void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2431{
2432 u32 mask;
2433 int ret;
2434
Alex Deuchercd474ba2016-02-04 10:21:23 -05002435 if (amdgpu_pcie_gen_cap)
2436 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2437
2438 if (amdgpu_pcie_lane_cap)
2439 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2440
2441 /* covers APUs as well */
2442 if (pci_is_root_bus(adev->pdev->bus)) {
2443 if (adev->pm.pcie_gen_mask == 0)
2444 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2445 if (adev->pm.pcie_mlw_mask == 0)
2446 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002447 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002448 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05002449
2450 if (adev->pm.pcie_gen_mask == 0) {
2451 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2452 if (!ret) {
2453 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2454 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2455 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2456
2457 if (mask & DRM_PCIE_SPEED_25)
2458 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2459 if (mask & DRM_PCIE_SPEED_50)
2460 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2461 if (mask & DRM_PCIE_SPEED_80)
2462 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2463 } else {
2464 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2465 }
2466 }
2467 if (adev->pm.pcie_mlw_mask == 0) {
2468 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2469 if (!ret) {
2470 switch (mask) {
2471 case 32:
2472 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2473 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2474 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2475 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2476 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2477 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2478 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2479 break;
2480 case 16:
2481 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2482 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2483 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2484 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2485 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2486 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2487 break;
2488 case 12:
2489 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2490 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2491 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2492 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2493 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2494 break;
2495 case 8:
2496 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2497 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2498 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2499 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2500 break;
2501 case 4:
2502 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2503 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2504 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2505 break;
2506 case 2:
2507 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2508 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2509 break;
2510 case 1:
2511 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2512 break;
2513 default:
2514 break;
2515 }
2516 } else {
2517 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002518 }
2519 }
2520}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002521
2522/*
2523 * Debugfs
2524 */
2525int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04002526 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002527 unsigned nfiles)
2528{
2529 unsigned i;
2530
2531 for (i = 0; i < adev->debugfs_count; i++) {
2532 if (adev->debugfs[i].files == files) {
2533 /* Already registered */
2534 return 0;
2535 }
2536 }
2537
2538 i = adev->debugfs_count + 1;
2539 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2540 DRM_ERROR("Reached maximum number of debugfs components.\n");
2541 DRM_ERROR("Report so we increase "
2542 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2543 return -EINVAL;
2544 }
2545 adev->debugfs[adev->debugfs_count].files = files;
2546 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2547 adev->debugfs_count = i;
2548#if defined(CONFIG_DEBUG_FS)
2549 drm_debugfs_create_files(files, nfiles,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002550 adev->ddev->primary->debugfs_root,
2551 adev->ddev->primary);
2552#endif
2553 return 0;
2554}
2555
2556static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
2557{
2558#if defined(CONFIG_DEBUG_FS)
2559 unsigned i;
2560
2561 for (i = 0; i < adev->debugfs_count; i++) {
2562 drm_debugfs_remove_files(adev->debugfs[i].files,
2563 adev->debugfs[i].num_files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002564 adev->ddev->primary);
2565 }
2566#endif
2567}
2568
2569#if defined(CONFIG_DEBUG_FS)
2570
2571static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2572 size_t size, loff_t *pos)
2573{
Al Viro45063092016-12-04 18:24:56 -05002574 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002575 ssize_t result = 0;
2576 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04002577 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04002578 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002579
2580 if (size & 0x3 || *pos & 0x3)
2581 return -EINVAL;
2582
Tom St Denisbd122672016-07-28 09:39:22 -04002583 /* are we reading registers for which a PG lock is necessary? */
2584 pm_pg_lock = (*pos >> 23) & 1;
2585
Tom St Denis566281592016-06-27 11:55:07 -04002586 if (*pos & (1ULL << 62)) {
2587 se_bank = (*pos >> 24) & 0x3FF;
2588 sh_bank = (*pos >> 34) & 0x3FF;
2589 instance_bank = (*pos >> 44) & 0x3FF;
Tom St Denis32977f92016-10-09 07:41:26 -04002590
2591 if (se_bank == 0x3FF)
2592 se_bank = 0xFFFFFFFF;
2593 if (sh_bank == 0x3FF)
2594 sh_bank = 0xFFFFFFFF;
2595 if (instance_bank == 0x3FF)
2596 instance_bank = 0xFFFFFFFF;
Tom St Denis566281592016-06-27 11:55:07 -04002597 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04002598 } else {
2599 use_bank = 0;
2600 }
2601
Tom St Denisbd122672016-07-28 09:39:22 -04002602 *pos &= 0x3FFFF;
2603
Tom St Denis566281592016-06-27 11:55:07 -04002604 if (use_bank) {
Tom St Denis32977f92016-10-09 07:41:26 -04002605 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2606 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
Tom St Denis566281592016-06-27 11:55:07 -04002607 return -EINVAL;
2608 mutex_lock(&adev->grbm_idx_mutex);
2609 amdgpu_gfx_select_se_sh(adev, se_bank,
2610 sh_bank, instance_bank);
2611 }
2612
Tom St Denisbd122672016-07-28 09:39:22 -04002613 if (pm_pg_lock)
2614 mutex_lock(&adev->pm.mutex);
2615
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002616 while (size) {
2617 uint32_t value;
2618
2619 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04002620 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002621
2622 value = RREG32(*pos >> 2);
2623 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04002624 if (r) {
2625 result = r;
2626 goto end;
2627 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002628
2629 result += 4;
2630 buf += 4;
2631 *pos += 4;
2632 size -= 4;
2633 }
2634
Tom St Denis566281592016-06-27 11:55:07 -04002635end:
2636 if (use_bank) {
2637 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2638 mutex_unlock(&adev->grbm_idx_mutex);
2639 }
2640
Tom St Denisbd122672016-07-28 09:39:22 -04002641 if (pm_pg_lock)
2642 mutex_unlock(&adev->pm.mutex);
2643
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002644 return result;
2645}
2646
2647static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2648 size_t size, loff_t *pos)
2649{
Al Viro45063092016-12-04 18:24:56 -05002650 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002651 ssize_t result = 0;
2652 int r;
Tom St Denis394fdde2016-10-10 07:31:23 -04002653 bool pm_pg_lock, use_bank;
2654 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002655
2656 if (size & 0x3 || *pos & 0x3)
2657 return -EINVAL;
2658
Tom St Denis394fdde2016-10-10 07:31:23 -04002659 /* are we reading registers for which a PG lock is necessary? */
2660 pm_pg_lock = (*pos >> 23) & 1;
2661
2662 if (*pos & (1ULL << 62)) {
2663 se_bank = (*pos >> 24) & 0x3FF;
2664 sh_bank = (*pos >> 34) & 0x3FF;
2665 instance_bank = (*pos >> 44) & 0x3FF;
2666
2667 if (se_bank == 0x3FF)
2668 se_bank = 0xFFFFFFFF;
2669 if (sh_bank == 0x3FF)
2670 sh_bank = 0xFFFFFFFF;
2671 if (instance_bank == 0x3FF)
2672 instance_bank = 0xFFFFFFFF;
2673 use_bank = 1;
2674 } else {
2675 use_bank = 0;
2676 }
2677
2678 *pos &= 0x3FFFF;
2679
2680 if (use_bank) {
2681 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2682 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2683 return -EINVAL;
2684 mutex_lock(&adev->grbm_idx_mutex);
2685 amdgpu_gfx_select_se_sh(adev, se_bank,
2686 sh_bank, instance_bank);
2687 }
2688
2689 if (pm_pg_lock)
2690 mutex_lock(&adev->pm.mutex);
2691
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002692 while (size) {
2693 uint32_t value;
2694
2695 if (*pos > adev->rmmio_size)
2696 return result;
2697
2698 r = get_user(value, (uint32_t *)buf);
2699 if (r)
2700 return r;
2701
2702 WREG32(*pos >> 2, value);
2703
2704 result += 4;
2705 buf += 4;
2706 *pos += 4;
2707 size -= 4;
2708 }
2709
Tom St Denis394fdde2016-10-10 07:31:23 -04002710 if (use_bank) {
2711 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2712 mutex_unlock(&adev->grbm_idx_mutex);
2713 }
2714
2715 if (pm_pg_lock)
2716 mutex_unlock(&adev->pm.mutex);
2717
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002718 return result;
2719}
2720
Tom St Denisadcec282016-04-15 13:08:44 -04002721static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
2722 size_t size, loff_t *pos)
2723{
Al Viro45063092016-12-04 18:24:56 -05002724 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04002725 ssize_t result = 0;
2726 int r;
2727
2728 if (size & 0x3 || *pos & 0x3)
2729 return -EINVAL;
2730
2731 while (size) {
2732 uint32_t value;
2733
2734 value = RREG32_PCIE(*pos >> 2);
2735 r = put_user(value, (uint32_t *)buf);
2736 if (r)
2737 return r;
2738
2739 result += 4;
2740 buf += 4;
2741 *pos += 4;
2742 size -= 4;
2743 }
2744
2745 return result;
2746}
2747
2748static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
2749 size_t size, loff_t *pos)
2750{
Al Viro45063092016-12-04 18:24:56 -05002751 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04002752 ssize_t result = 0;
2753 int r;
2754
2755 if (size & 0x3 || *pos & 0x3)
2756 return -EINVAL;
2757
2758 while (size) {
2759 uint32_t value;
2760
2761 r = get_user(value, (uint32_t *)buf);
2762 if (r)
2763 return r;
2764
2765 WREG32_PCIE(*pos >> 2, value);
2766
2767 result += 4;
2768 buf += 4;
2769 *pos += 4;
2770 size -= 4;
2771 }
2772
2773 return result;
2774}
2775
2776static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
2777 size_t size, loff_t *pos)
2778{
Al Viro45063092016-12-04 18:24:56 -05002779 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04002780 ssize_t result = 0;
2781 int r;
2782
2783 if (size & 0x3 || *pos & 0x3)
2784 return -EINVAL;
2785
2786 while (size) {
2787 uint32_t value;
2788
2789 value = RREG32_DIDT(*pos >> 2);
2790 r = put_user(value, (uint32_t *)buf);
2791 if (r)
2792 return r;
2793
2794 result += 4;
2795 buf += 4;
2796 *pos += 4;
2797 size -= 4;
2798 }
2799
2800 return result;
2801}
2802
2803static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
2804 size_t size, loff_t *pos)
2805{
Al Viro45063092016-12-04 18:24:56 -05002806 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04002807 ssize_t result = 0;
2808 int r;
2809
2810 if (size & 0x3 || *pos & 0x3)
2811 return -EINVAL;
2812
2813 while (size) {
2814 uint32_t value;
2815
2816 r = get_user(value, (uint32_t *)buf);
2817 if (r)
2818 return r;
2819
2820 WREG32_DIDT(*pos >> 2, value);
2821
2822 result += 4;
2823 buf += 4;
2824 *pos += 4;
2825 size -= 4;
2826 }
2827
2828 return result;
2829}
2830
2831static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
2832 size_t size, loff_t *pos)
2833{
Al Viro45063092016-12-04 18:24:56 -05002834 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04002835 ssize_t result = 0;
2836 int r;
2837
2838 if (size & 0x3 || *pos & 0x3)
2839 return -EINVAL;
2840
2841 while (size) {
2842 uint32_t value;
2843
Tom St Denis6fc0dea2016-08-29 08:39:29 -04002844 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04002845 r = put_user(value, (uint32_t *)buf);
2846 if (r)
2847 return r;
2848
2849 result += 4;
2850 buf += 4;
2851 *pos += 4;
2852 size -= 4;
2853 }
2854
2855 return result;
2856}
2857
2858static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
2859 size_t size, loff_t *pos)
2860{
Al Viro45063092016-12-04 18:24:56 -05002861 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04002862 ssize_t result = 0;
2863 int r;
2864
2865 if (size & 0x3 || *pos & 0x3)
2866 return -EINVAL;
2867
2868 while (size) {
2869 uint32_t value;
2870
2871 r = get_user(value, (uint32_t *)buf);
2872 if (r)
2873 return r;
2874
Tom St Denis6fc0dea2016-08-29 08:39:29 -04002875 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04002876
2877 result += 4;
2878 buf += 4;
2879 *pos += 4;
2880 size -= 4;
2881 }
2882
2883 return result;
2884}
2885
Tom St Denis1e051412016-06-27 09:57:18 -04002886static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
2887 size_t size, loff_t *pos)
2888{
Al Viro45063092016-12-04 18:24:56 -05002889 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis1e051412016-06-27 09:57:18 -04002890 ssize_t result = 0;
2891 int r;
2892 uint32_t *config, no_regs = 0;
2893
2894 if (size & 0x3 || *pos & 0x3)
2895 return -EINVAL;
2896
Markus Elfringecab7662016-09-18 17:00:52 +02002897 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
Tom St Denis1e051412016-06-27 09:57:18 -04002898 if (!config)
2899 return -ENOMEM;
2900
2901 /* version, increment each time something is added */
Tom St Denis9a999352017-01-18 13:01:25 -05002902 config[no_regs++] = 3;
Tom St Denis1e051412016-06-27 09:57:18 -04002903 config[no_regs++] = adev->gfx.config.max_shader_engines;
2904 config[no_regs++] = adev->gfx.config.max_tile_pipes;
2905 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
2906 config[no_regs++] = adev->gfx.config.max_sh_per_se;
2907 config[no_regs++] = adev->gfx.config.max_backends_per_se;
2908 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
2909 config[no_regs++] = adev->gfx.config.max_gprs;
2910 config[no_regs++] = adev->gfx.config.max_gs_threads;
2911 config[no_regs++] = adev->gfx.config.max_hw_contexts;
2912 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
2913 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
2914 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
2915 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
2916 config[no_regs++] = adev->gfx.config.num_tile_pipes;
2917 config[no_regs++] = adev->gfx.config.backend_enable_mask;
2918 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
2919 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
2920 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
2921 config[no_regs++] = adev->gfx.config.num_gpus;
2922 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
2923 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
2924 config[no_regs++] = adev->gfx.config.gb_addr_config;
2925 config[no_regs++] = adev->gfx.config.num_rbs;
2926
Tom St Denis89a8f302016-08-12 15:14:31 -04002927 /* rev==1 */
2928 config[no_regs++] = adev->rev_id;
2929 config[no_regs++] = adev->pg_flags;
2930 config[no_regs++] = adev->cg_flags;
2931
Tom St Denise9f11dc2016-08-17 12:00:51 -04002932 /* rev==2 */
2933 config[no_regs++] = adev->family;
2934 config[no_regs++] = adev->external_rev_id;
2935
Tom St Denis9a999352017-01-18 13:01:25 -05002936 /* rev==3 */
2937 config[no_regs++] = adev->pdev->device;
2938 config[no_regs++] = adev->pdev->revision;
2939 config[no_regs++] = adev->pdev->subsystem_device;
2940 config[no_regs++] = adev->pdev->subsystem_vendor;
2941
Tom St Denis1e051412016-06-27 09:57:18 -04002942 while (size && (*pos < no_regs * 4)) {
2943 uint32_t value;
2944
2945 value = config[*pos >> 2];
2946 r = put_user(value, (uint32_t *)buf);
2947 if (r) {
2948 kfree(config);
2949 return r;
2950 }
2951
2952 result += 4;
2953 buf += 4;
2954 *pos += 4;
2955 size -= 4;
2956 }
2957
2958 kfree(config);
2959 return result;
2960}
2961
Tom St Denisf2cdaf22016-09-15 10:08:44 -04002962static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
2963 size_t size, loff_t *pos)
2964{
Al Viro45063092016-12-04 18:24:56 -05002965 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04002966 int idx, r;
2967 int32_t value;
2968
2969 if (size != 4 || *pos & 0x3)
2970 return -EINVAL;
2971
2972 /* convert offset to sensor number */
2973 idx = *pos >> 2;
2974
2975 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
2976 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
2977 else
2978 return -EINVAL;
2979
2980 if (!r)
2981 r = put_user(value, (int32_t *)buf);
2982
2983 return !r ? 4 : r;
2984}
Tom St Denis1e051412016-06-27 09:57:18 -04002985
Tom St Denis273d7aa2016-10-11 14:48:55 -04002986static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
2987 size_t size, loff_t *pos)
2988{
2989 struct amdgpu_device *adev = f->f_inode->i_private;
2990 int r, x;
2991 ssize_t result=0;
Tom St Denis472259f2016-10-14 09:49:09 -04002992 uint32_t offset, se, sh, cu, wave, simd, data[32];
Tom St Denis273d7aa2016-10-11 14:48:55 -04002993
2994 if (size & 3 || *pos & 3)
2995 return -EINVAL;
2996
2997 /* decode offset */
2998 offset = (*pos & 0x7F);
2999 se = ((*pos >> 7) & 0xFF);
3000 sh = ((*pos >> 15) & 0xFF);
3001 cu = ((*pos >> 23) & 0xFF);
3002 wave = ((*pos >> 31) & 0xFF);
3003 simd = ((*pos >> 37) & 0xFF);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003004
3005 /* switch to the specific se/sh/cu */
3006 mutex_lock(&adev->grbm_idx_mutex);
3007 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3008
3009 x = 0;
Tom St Denis472259f2016-10-14 09:49:09 -04003010 if (adev->gfx.funcs->read_wave_data)
3011 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003012
3013 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3014 mutex_unlock(&adev->grbm_idx_mutex);
3015
Tom St Denis5ecfb3b2016-10-13 12:15:03 -04003016 if (!x)
3017 return -EINVAL;
3018
Tom St Denis472259f2016-10-14 09:49:09 -04003019 while (size && (offset < x * 4)) {
Tom St Denis273d7aa2016-10-11 14:48:55 -04003020 uint32_t value;
3021
Tom St Denis472259f2016-10-14 09:49:09 -04003022 value = data[offset >> 2];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003023 r = put_user(value, (uint32_t *)buf);
3024 if (r)
3025 return r;
3026
3027 result += 4;
3028 buf += 4;
Tom St Denis472259f2016-10-14 09:49:09 -04003029 offset += 4;
Tom St Denis273d7aa2016-10-11 14:48:55 -04003030 size -= 4;
3031 }
3032
3033 return result;
3034}
3035
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003036static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3037 size_t size, loff_t *pos)
3038{
3039 struct amdgpu_device *adev = f->f_inode->i_private;
3040 int r;
3041 ssize_t result = 0;
3042 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3043
3044 if (size & 3 || *pos & 3)
3045 return -EINVAL;
3046
3047 /* decode offset */
3048 offset = (*pos & 0xFFF); /* in dwords */
3049 se = ((*pos >> 12) & 0xFF);
3050 sh = ((*pos >> 20) & 0xFF);
3051 cu = ((*pos >> 28) & 0xFF);
3052 wave = ((*pos >> 36) & 0xFF);
3053 simd = ((*pos >> 44) & 0xFF);
3054 thread = ((*pos >> 52) & 0xFF);
3055 bank = ((*pos >> 60) & 1);
3056
3057 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3058 if (!data)
3059 return -ENOMEM;
3060
3061 /* switch to the specific se/sh/cu */
3062 mutex_lock(&adev->grbm_idx_mutex);
3063 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3064
3065 if (bank == 0) {
3066 if (adev->gfx.funcs->read_wave_vgprs)
3067 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3068 } else {
3069 if (adev->gfx.funcs->read_wave_sgprs)
3070 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3071 }
3072
3073 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3074 mutex_unlock(&adev->grbm_idx_mutex);
3075
3076 while (size) {
3077 uint32_t value;
3078
3079 value = data[offset++];
3080 r = put_user(value, (uint32_t *)buf);
3081 if (r) {
3082 result = r;
3083 goto err;
3084 }
3085
3086 result += 4;
3087 buf += 4;
3088 size -= 4;
3089 }
3090
3091err:
3092 kfree(data);
3093 return result;
3094}
3095
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003096static const struct file_operations amdgpu_debugfs_regs_fops = {
3097 .owner = THIS_MODULE,
3098 .read = amdgpu_debugfs_regs_read,
3099 .write = amdgpu_debugfs_regs_write,
3100 .llseek = default_llseek
3101};
Tom St Denisadcec282016-04-15 13:08:44 -04003102static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3103 .owner = THIS_MODULE,
3104 .read = amdgpu_debugfs_regs_didt_read,
3105 .write = amdgpu_debugfs_regs_didt_write,
3106 .llseek = default_llseek
3107};
3108static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3109 .owner = THIS_MODULE,
3110 .read = amdgpu_debugfs_regs_pcie_read,
3111 .write = amdgpu_debugfs_regs_pcie_write,
3112 .llseek = default_llseek
3113};
3114static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3115 .owner = THIS_MODULE,
3116 .read = amdgpu_debugfs_regs_smc_read,
3117 .write = amdgpu_debugfs_regs_smc_write,
3118 .llseek = default_llseek
3119};
3120
Tom St Denis1e051412016-06-27 09:57:18 -04003121static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3122 .owner = THIS_MODULE,
3123 .read = amdgpu_debugfs_gca_config_read,
3124 .llseek = default_llseek
3125};
3126
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003127static const struct file_operations amdgpu_debugfs_sensors_fops = {
3128 .owner = THIS_MODULE,
3129 .read = amdgpu_debugfs_sensor_read,
3130 .llseek = default_llseek
3131};
3132
Tom St Denis273d7aa2016-10-11 14:48:55 -04003133static const struct file_operations amdgpu_debugfs_wave_fops = {
3134 .owner = THIS_MODULE,
3135 .read = amdgpu_debugfs_wave_read,
3136 .llseek = default_llseek
3137};
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003138static const struct file_operations amdgpu_debugfs_gpr_fops = {
3139 .owner = THIS_MODULE,
3140 .read = amdgpu_debugfs_gpr_read,
3141 .llseek = default_llseek
3142};
Tom St Denis273d7aa2016-10-11 14:48:55 -04003143
Tom St Denisadcec282016-04-15 13:08:44 -04003144static const struct file_operations *debugfs_regs[] = {
3145 &amdgpu_debugfs_regs_fops,
3146 &amdgpu_debugfs_regs_didt_fops,
3147 &amdgpu_debugfs_regs_pcie_fops,
3148 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04003149 &amdgpu_debugfs_gca_config_fops,
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003150 &amdgpu_debugfs_sensors_fops,
Tom St Denis273d7aa2016-10-11 14:48:55 -04003151 &amdgpu_debugfs_wave_fops,
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003152 &amdgpu_debugfs_gpr_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04003153};
3154
3155static const char *debugfs_regs_names[] = {
3156 "amdgpu_regs",
3157 "amdgpu_regs_didt",
3158 "amdgpu_regs_pcie",
3159 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04003160 "amdgpu_gca_config",
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003161 "amdgpu_sensors",
Tom St Denis273d7aa2016-10-11 14:48:55 -04003162 "amdgpu_wave",
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003163 "amdgpu_gpr",
Tom St Denisadcec282016-04-15 13:08:44 -04003164};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003165
3166static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3167{
3168 struct drm_minor *minor = adev->ddev->primary;
3169 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04003170 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003171
Tom St Denisadcec282016-04-15 13:08:44 -04003172 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3173 ent = debugfs_create_file(debugfs_regs_names[i],
3174 S_IFREG | S_IRUGO, root,
3175 adev, debugfs_regs[i]);
3176 if (IS_ERR(ent)) {
3177 for (j = 0; j < i; j++) {
3178 debugfs_remove(adev->debugfs_regs[i]);
3179 adev->debugfs_regs[i] = NULL;
3180 }
3181 return PTR_ERR(ent);
3182 }
3183
3184 if (!i)
3185 i_size_write(ent->d_inode, adev->rmmio_size);
3186 adev->debugfs_regs[i] = ent;
3187 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003188
3189 return 0;
3190}
3191
3192static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3193{
Tom St Denisadcec282016-04-15 13:08:44 -04003194 unsigned i;
3195
3196 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3197 if (adev->debugfs_regs[i]) {
3198 debugfs_remove(adev->debugfs_regs[i]);
3199 adev->debugfs_regs[i] = NULL;
3200 }
3201 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003202}
3203
3204int amdgpu_debugfs_init(struct drm_minor *minor)
3205{
3206 return 0;
3207}
3208
3209void amdgpu_debugfs_cleanup(struct drm_minor *minor)
3210{
3211}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003212#else
3213static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3214{
3215 return 0;
3216}
3217static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003218#endif