blob: cce94d836221c47ae888a37f90815db726470b77 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deuchera5bde2f2016-09-23 16:23:41 -040043#include "amdgpu_atomfirmware.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050044#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080045#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040048#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040051#include "vi.h"
Ken Wang460826e2017-03-06 14:53:16 -050052#include "soc15.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080054#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080055#include <linux/firmware.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040056
Alex Deuchere2a75f82017-04-27 16:58:01 -040057MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
Alex Deucher2d2e5e72017-05-09 12:27:35 -040058MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
Alex Deuchere2a75f82017-04-27 16:58:01 -040059
Shirish S2dc80b02017-05-25 10:05:25 +053060#define AMDGPU_RESUME_MS 2000
61
Alex Deucherd38ceaf2015-04-20 16:55:21 -040062static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
63static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
64
65static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080066 "TAHITI",
67 "PITCAIRN",
68 "VERDE",
69 "OLAND",
70 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040071 "BONAIRE",
72 "KAVERI",
73 "KABINI",
74 "HAWAII",
75 "MULLINS",
76 "TOPAZ",
77 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080078 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040079 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040080 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040081 "POLARIS10",
82 "POLARIS11",
Junwei Zhangc4642a42016-12-14 15:32:28 -050083 "POLARIS12",
Ken Wangd4196f02016-03-09 09:28:32 +080084 "VEGA10",
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +080085 "RAVEN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040086 "LAST",
87};
88
89bool amdgpu_device_is_px(struct drm_device *dev)
90{
91 struct amdgpu_device *adev = dev->dev_private;
92
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080093 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040094 return true;
95 return false;
96}
97
98/*
99 * MMIO register access helper functions.
100 */
101uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
Monk Liu15d72fd2017-01-25 15:07:40 +0800102 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400103{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400104 uint32_t ret;
105
Monk Liu15d72fd2017-01-25 15:07:40 +0800106 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800107 BUG_ON(in_interrupt());
108 return amdgpu_virt_kiq_rreg(adev, reg);
109 }
110
Monk Liu15d72fd2017-01-25 15:07:40 +0800111 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Tom St Denisf4b373f2016-05-31 08:02:27 -0400112 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400113 else {
114 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400115
116 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
117 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
118 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
119 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400120 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400121 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
122 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400123}
124
125void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
Monk Liu15d72fd2017-01-25 15:07:40 +0800126 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400127{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400128 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800129
Monk Liu15d72fd2017-01-25 15:07:40 +0800130 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800131 BUG_ON(in_interrupt());
132 return amdgpu_virt_kiq_wreg(adev, reg, v);
133 }
134
Monk Liu15d72fd2017-01-25 15:07:40 +0800135 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400136 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
137 else {
138 unsigned long flags;
139
140 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
141 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
142 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
143 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
144 }
145}
146
147u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
148{
149 if ((reg * 4) < adev->rio_mem_size)
150 return ioread32(adev->rio_mem + (reg * 4));
151 else {
152 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
153 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
154 }
155}
156
157void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
158{
159
160 if ((reg * 4) < adev->rio_mem_size)
161 iowrite32(v, adev->rio_mem + (reg * 4));
162 else {
163 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
164 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
165 }
166}
167
168/**
169 * amdgpu_mm_rdoorbell - read a doorbell dword
170 *
171 * @adev: amdgpu_device pointer
172 * @index: doorbell index
173 *
174 * Returns the value in the doorbell aperture at the
175 * requested doorbell index (CIK).
176 */
177u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
178{
179 if (index < adev->doorbell.num_doorbells) {
180 return readl(adev->doorbell.ptr + index);
181 } else {
182 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
183 return 0;
184 }
185}
186
187/**
188 * amdgpu_mm_wdoorbell - write a doorbell dword
189 *
190 * @adev: amdgpu_device pointer
191 * @index: doorbell index
192 * @v: value to write
193 *
194 * Writes @v to the doorbell aperture at the
195 * requested doorbell index (CIK).
196 */
197void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
198{
199 if (index < adev->doorbell.num_doorbells) {
200 writel(v, adev->doorbell.ptr + index);
201 } else {
202 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
203 }
204}
205
206/**
Ken Wang832be402016-03-18 15:23:08 +0800207 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
208 *
209 * @adev: amdgpu_device pointer
210 * @index: doorbell index
211 *
212 * Returns the value in the doorbell aperture at the
213 * requested doorbell index (VEGA10+).
214 */
215u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
216{
217 if (index < adev->doorbell.num_doorbells) {
218 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
219 } else {
220 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
221 return 0;
222 }
223}
224
225/**
226 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
227 *
228 * @adev: amdgpu_device pointer
229 * @index: doorbell index
230 * @v: value to write
231 *
232 * Writes @v to the doorbell aperture at the
233 * requested doorbell index (VEGA10+).
234 */
235void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
236{
237 if (index < adev->doorbell.num_doorbells) {
238 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
239 } else {
240 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
241 }
242}
243
244/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400245 * amdgpu_invalid_rreg - dummy reg read function
246 *
247 * @adev: amdgpu device pointer
248 * @reg: offset of register
249 *
250 * Dummy register read function. Used for register blocks
251 * that certain asics don't have (all asics).
252 * Returns the value in the register.
253 */
254static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
255{
256 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
257 BUG();
258 return 0;
259}
260
261/**
262 * amdgpu_invalid_wreg - dummy reg write function
263 *
264 * @adev: amdgpu device pointer
265 * @reg: offset of register
266 * @v: value to write to the register
267 *
268 * Dummy register read function. Used for register blocks
269 * that certain asics don't have (all asics).
270 */
271static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
272{
273 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
274 reg, v);
275 BUG();
276}
277
278/**
279 * amdgpu_block_invalid_rreg - dummy reg read function
280 *
281 * @adev: amdgpu device pointer
282 * @block: offset of instance
283 * @reg: offset of register
284 *
285 * Dummy register read function. Used for register blocks
286 * that certain asics don't have (all asics).
287 * Returns the value in the register.
288 */
289static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
290 uint32_t block, uint32_t reg)
291{
292 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
293 reg, block);
294 BUG();
295 return 0;
296}
297
298/**
299 * amdgpu_block_invalid_wreg - dummy reg write function
300 *
301 * @adev: amdgpu device pointer
302 * @block: offset of instance
303 * @reg: offset of register
304 * @v: value to write to the register
305 *
306 * Dummy register read function. Used for register blocks
307 * that certain asics don't have (all asics).
308 */
309static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
310 uint32_t block,
311 uint32_t reg, uint32_t v)
312{
313 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
314 reg, block, v);
315 BUG();
316}
317
318static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
319{
320 int r;
321
322 if (adev->vram_scratch.robj == NULL) {
323 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
Alex Deucher857d9132015-08-27 00:14:16 -0400324 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
Christian König03f48dd2016-08-15 17:00:22 +0200325 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
326 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
Christian König72d76682015-09-03 17:34:59 +0200327 NULL, NULL, &adev->vram_scratch.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400328 if (r) {
329 return r;
330 }
331 }
332
333 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
334 if (unlikely(r != 0))
335 return r;
336 r = amdgpu_bo_pin(adev->vram_scratch.robj,
337 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
338 if (r) {
339 amdgpu_bo_unreserve(adev->vram_scratch.robj);
340 return r;
341 }
342 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
343 (void **)&adev->vram_scratch.ptr);
344 if (r)
345 amdgpu_bo_unpin(adev->vram_scratch.robj);
346 amdgpu_bo_unreserve(adev->vram_scratch.robj);
347
348 return r;
349}
350
351static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
352{
353 int r;
354
355 if (adev->vram_scratch.robj == NULL) {
356 return;
357 }
Alex Xie8ab25b42017-04-24 13:30:43 -0400358 r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400359 if (likely(r == 0)) {
360 amdgpu_bo_kunmap(adev->vram_scratch.robj);
361 amdgpu_bo_unpin(adev->vram_scratch.robj);
362 amdgpu_bo_unreserve(adev->vram_scratch.robj);
363 }
364 amdgpu_bo_unref(&adev->vram_scratch.robj);
365}
366
367/**
368 * amdgpu_program_register_sequence - program an array of registers.
369 *
370 * @adev: amdgpu_device pointer
371 * @registers: pointer to the register array
372 * @array_size: size of the register array
373 *
374 * Programs an array or registers with and and or masks.
375 * This is a helper for setting golden registers.
376 */
377void amdgpu_program_register_sequence(struct amdgpu_device *adev,
378 const u32 *registers,
379 const u32 array_size)
380{
381 u32 tmp, reg, and_mask, or_mask;
382 int i;
383
384 if (array_size % 3)
385 return;
386
387 for (i = 0; i < array_size; i +=3) {
388 reg = registers[i + 0];
389 and_mask = registers[i + 1];
390 or_mask = registers[i + 2];
391
392 if (and_mask == 0xffffffff) {
393 tmp = or_mask;
394 } else {
395 tmp = RREG32(reg);
396 tmp &= ~and_mask;
397 tmp |= or_mask;
398 }
399 WREG32(reg, tmp);
400 }
401}
402
403void amdgpu_pci_config_reset(struct amdgpu_device *adev)
404{
405 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
406}
407
408/*
409 * GPU doorbell aperture helpers function.
410 */
411/**
412 * amdgpu_doorbell_init - Init doorbell driver information.
413 *
414 * @adev: amdgpu_device pointer
415 *
416 * Init doorbell driver information (CIK)
417 * Returns 0 on success, error on failure.
418 */
419static int amdgpu_doorbell_init(struct amdgpu_device *adev)
420{
421 /* doorbell bar mapping */
422 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
423 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
424
Christian Königedf600d2016-05-03 15:54:54 +0200425 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400426 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
427 if (adev->doorbell.num_doorbells == 0)
428 return -EINVAL;
429
Christian König8972e5d2017-03-06 13:34:57 +0100430 adev->doorbell.ptr = ioremap(adev->doorbell.base,
431 adev->doorbell.num_doorbells *
432 sizeof(u32));
433 if (adev->doorbell.ptr == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400434 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400435
436 return 0;
437}
438
439/**
440 * amdgpu_doorbell_fini - Tear down doorbell driver information.
441 *
442 * @adev: amdgpu_device pointer
443 *
444 * Tear down doorbell driver information (CIK)
445 */
446static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
447{
448 iounmap(adev->doorbell.ptr);
449 adev->doorbell.ptr = NULL;
450}
451
452/**
453 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
454 * setup amdkfd
455 *
456 * @adev: amdgpu_device pointer
457 * @aperture_base: output returning doorbell aperture base physical address
458 * @aperture_size: output returning doorbell aperture size in bytes
459 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
460 *
461 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
462 * takes doorbells required for its own rings and reports the setup to amdkfd.
463 * amdgpu reserved doorbells are at the start of the doorbell aperture.
464 */
465void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
466 phys_addr_t *aperture_base,
467 size_t *aperture_size,
468 size_t *start_offset)
469{
470 /*
471 * The first num_doorbells are used by amdgpu.
472 * amdkfd takes whatever's left in the aperture.
473 */
474 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
475 *aperture_base = adev->doorbell.base;
476 *aperture_size = adev->doorbell.size;
477 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
478 } else {
479 *aperture_base = 0;
480 *aperture_size = 0;
481 *start_offset = 0;
482 }
483}
484
485/*
486 * amdgpu_wb_*()
Alex Xie455a7bc2017-05-08 21:36:03 -0400487 * Writeback is the method by which the GPU updates special pages in memory
Alex Xieea81a172017-05-08 13:41:11 -0400488 * with the status of certain GPU events (fences, ring pointers,etc.).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400489 */
490
491/**
492 * amdgpu_wb_fini - Disable Writeback and free memory
493 *
494 * @adev: amdgpu_device pointer
495 *
496 * Disables Writeback and frees the Writeback memory (all asics).
497 * Used at driver shutdown.
498 */
499static void amdgpu_wb_fini(struct amdgpu_device *adev)
500{
501 if (adev->wb.wb_obj) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400502 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
503 &adev->wb.gpu_addr,
504 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400505 adev->wb.wb_obj = NULL;
506 }
507}
508
509/**
510 * amdgpu_wb_init- Init Writeback driver info and allocate memory
511 *
512 * @adev: amdgpu_device pointer
513 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400514 * Initializes writeback and allocates writeback memory (all asics).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400515 * Used at driver startup.
516 * Returns 0 on success or an -error on failure.
517 */
518static int amdgpu_wb_init(struct amdgpu_device *adev)
519{
520 int r;
521
522 if (adev->wb.wb_obj == NULL) {
Huang Rui60a970a62017-03-15 10:13:32 +0800523 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
Alex Deuchera76ed482016-10-21 15:30:36 -0400524 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
525 &adev->wb.wb_obj, &adev->wb.gpu_addr,
526 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400527 if (r) {
528 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
529 return r;
530 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400531
532 adev->wb.num_wb = AMDGPU_MAX_WB;
533 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
534
535 /* clear wb memory */
Huang Rui60a970a62017-03-15 10:13:32 +0800536 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400537 }
538
539 return 0;
540}
541
542/**
543 * amdgpu_wb_get - Allocate a wb entry
544 *
545 * @adev: amdgpu_device pointer
546 * @wb: wb index
547 *
548 * Allocate a wb slot for use by the driver (all asics).
549 * Returns 0 on success or -EINVAL on failure.
550 */
551int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
552{
553 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
554 if (offset < adev->wb.num_wb) {
555 __set_bit(offset, adev->wb.used);
556 *wb = offset;
557 return 0;
558 } else {
559 return -EINVAL;
560 }
561}
562
563/**
Ken Wang70142852016-03-18 15:08:49 +0800564 * amdgpu_wb_get_64bit - Allocate a wb entry
565 *
566 * @adev: amdgpu_device pointer
567 * @wb: wb index
568 *
569 * Allocate a wb slot for use by the driver (all asics).
570 * Returns 0 on success or -EINVAL on failure.
571 */
572int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
573{
574 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
575 adev->wb.num_wb, 0, 2, 7, 0);
576 if ((offset + 1) < adev->wb.num_wb) {
577 __set_bit(offset, adev->wb.used);
578 __set_bit(offset + 1, adev->wb.used);
579 *wb = offset;
580 return 0;
581 } else {
582 return -EINVAL;
583 }
584}
585
586/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400587 * amdgpu_wb_free - Free a wb entry
588 *
589 * @adev: amdgpu_device pointer
590 * @wb: wb index
591 *
592 * Free a wb slot allocated for use by the driver (all asics)
593 */
594void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
595{
596 if (wb < adev->wb.num_wb)
597 __clear_bit(wb, adev->wb.used);
598}
599
600/**
Ken Wang70142852016-03-18 15:08:49 +0800601 * amdgpu_wb_free_64bit - Free a wb entry
602 *
603 * @adev: amdgpu_device pointer
604 * @wb: wb index
605 *
606 * Free a wb slot allocated for use by the driver (all asics)
607 */
608void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
609{
610 if ((wb + 1) < adev->wb.num_wb) {
611 __clear_bit(wb, adev->wb.used);
612 __clear_bit(wb + 1, adev->wb.used);
613 }
614}
615
616/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400617 * amdgpu_vram_location - try to find VRAM location
618 * @adev: amdgpu device structure holding all necessary informations
619 * @mc: memory controller structure holding memory informations
620 * @base: base address at which to put VRAM
621 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400622 * Function will try to place VRAM at base address provided
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400623 * as parameter (which is so far either PCI aperture address or
624 * for IGP TOM base address).
625 *
626 * If there is not enough space to fit the unvisible VRAM in the 32bits
627 * address space then we limit the VRAM size to the aperture.
628 *
629 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
630 * this shouldn't be a problem as we are using the PCI aperture as a reference.
631 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
632 * not IGP.
633 *
634 * Note: we use mc_vram_size as on some board we need to program the mc to
635 * cover the whole aperture even if VRAM size is inferior to aperture size
636 * Novell bug 204882 + along with lots of ubuntu ones
637 *
638 * Note: when limiting vram it's safe to overwritte real_vram_size because
639 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
640 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
641 * ones)
642 *
643 * Note: IGP TOM addr should be the same as the aperture addr, we don't
Alex Xie455a7bc2017-05-08 21:36:03 -0400644 * explicitly check for that though.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400645 *
646 * FIXME: when reducing VRAM size align new size on power of 2.
647 */
648void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
649{
650 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
651
652 mc->vram_start = base;
653 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
654 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
655 mc->real_vram_size = mc->aper_size;
656 mc->mc_vram_size = mc->aper_size;
657 }
658 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
659 if (limit && limit < mc->real_vram_size)
660 mc->real_vram_size = limit;
661 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
662 mc->mc_vram_size >> 20, mc->vram_start,
663 mc->vram_end, mc->real_vram_size >> 20);
664}
665
666/**
667 * amdgpu_gtt_location - try to find GTT location
668 * @adev: amdgpu device structure holding all necessary informations
669 * @mc: memory controller structure holding memory informations
670 *
671 * Function will place try to place GTT before or after VRAM.
672 *
673 * If GTT size is bigger than space left then we ajust GTT size.
674 * Thus function will never fails.
675 *
676 * FIXME: when reducing GTT size align new size on power of 2.
677 */
678void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
679{
680 u64 size_af, size_bf;
681
682 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
683 size_bf = mc->vram_start & ~mc->gtt_base_align;
684 if (size_bf > size_af) {
685 if (mc->gtt_size > size_bf) {
686 dev_warn(adev->dev, "limiting GTT\n");
687 mc->gtt_size = size_bf;
688 }
Alex Deucher9dc5a912016-11-17 15:40:22 -0500689 mc->gtt_start = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400690 } else {
691 if (mc->gtt_size > size_af) {
692 dev_warn(adev->dev, "limiting GTT\n");
693 mc->gtt_size = size_af;
694 }
695 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
696 }
697 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
698 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
699 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
700}
701
702/*
703 * GPU helpers function.
704 */
705/**
Jim Quc836fec2017-02-10 15:59:59 +0800706 * amdgpu_need_post - check if the hw need post or not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400707 *
708 * @adev: amdgpu_device pointer
709 *
Jim Quc836fec2017-02-10 15:59:59 +0800710 * Check if the asic has been initialized (all asics) at driver startup
711 * or post is needed if hw reset is performed.
712 * Returns true if need or false if not.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400713 */
Jim Quc836fec2017-02-10 15:59:59 +0800714bool amdgpu_need_post(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400715{
716 uint32_t reg;
717
Jim Quc836fec2017-02-10 15:59:59 +0800718 if (adev->has_hw_reset) {
719 adev->has_hw_reset = false;
720 return true;
721 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400722 /* then check MEM_SIZE, in case the crtcs are off */
Alex Deucherbbf282d2017-03-03 17:26:10 -0500723 reg = amdgpu_asic_get_config_memsize(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400724
Alex Deucherf2713e82017-03-28 12:19:31 -0400725 if ((reg != 0) && (reg != 0xffffffff))
Jim Quc836fec2017-02-10 15:59:59 +0800726 return false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400727
Jim Quc836fec2017-02-10 15:59:59 +0800728 return true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400729
730}
731
Monk Liubec86372016-09-14 19:38:08 +0800732static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
733{
734 if (amdgpu_sriov_vf(adev))
735 return false;
736
737 if (amdgpu_passthrough(adev)) {
Monk Liu1da2c322016-11-11 11:24:29 +0800738 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
739 * some old smc fw still need driver do vPost otherwise gpu hang, while
740 * those smc fw version above 22.15 doesn't have this flaw, so we force
741 * vpost executed for smc version below 22.15
Monk Liubec86372016-09-14 19:38:08 +0800742 */
743 if (adev->asic_type == CHIP_FIJI) {
744 int err;
745 uint32_t fw_ver;
746 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
747 /* force vPost if error occured */
748 if (err)
749 return true;
750
751 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
Monk Liu1da2c322016-11-11 11:24:29 +0800752 if (fw_ver < 0x00160e00)
753 return true;
Monk Liubec86372016-09-14 19:38:08 +0800754 }
Monk Liubec86372016-09-14 19:38:08 +0800755 }
Jim Quc836fec2017-02-10 15:59:59 +0800756 return amdgpu_need_post(adev);
Monk Liubec86372016-09-14 19:38:08 +0800757}
758
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400759/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400760 * amdgpu_dummy_page_init - init dummy page used by the driver
761 *
762 * @adev: amdgpu_device pointer
763 *
764 * Allocate the dummy page used by the driver (all asics).
765 * This dummy page is used by the driver as a filler for gart entries
766 * when pages are taken out of the GART
767 * Returns 0 on sucess, -ENOMEM on failure.
768 */
769int amdgpu_dummy_page_init(struct amdgpu_device *adev)
770{
771 if (adev->dummy_page.page)
772 return 0;
773 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
774 if (adev->dummy_page.page == NULL)
775 return -ENOMEM;
776 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
777 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
778 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
779 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
780 __free_page(adev->dummy_page.page);
781 adev->dummy_page.page = NULL;
782 return -ENOMEM;
783 }
784 return 0;
785}
786
787/**
788 * amdgpu_dummy_page_fini - free dummy page used by the driver
789 *
790 * @adev: amdgpu_device pointer
791 *
792 * Frees the dummy page used by the driver (all asics).
793 */
794void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
795{
796 if (adev->dummy_page.page == NULL)
797 return;
798 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
799 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
800 __free_page(adev->dummy_page.page);
801 adev->dummy_page.page = NULL;
802}
803
804
805/* ATOM accessor methods */
806/*
807 * ATOM is an interpreted byte code stored in tables in the vbios. The
808 * driver registers callbacks to access registers and the interpreter
809 * in the driver parses the tables and executes then to program specific
810 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
811 * atombios.h, and atom.c
812 */
813
814/**
815 * cail_pll_read - read PLL register
816 *
817 * @info: atom card_info pointer
818 * @reg: PLL register offset
819 *
820 * Provides a PLL register accessor for the atom interpreter (r4xx+).
821 * Returns the value of the PLL register.
822 */
823static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
824{
825 return 0;
826}
827
828/**
829 * cail_pll_write - write PLL register
830 *
831 * @info: atom card_info pointer
832 * @reg: PLL register offset
833 * @val: value to write to the pll register
834 *
835 * Provides a PLL register accessor for the atom interpreter (r4xx+).
836 */
837static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
838{
839
840}
841
842/**
843 * cail_mc_read - read MC (Memory Controller) register
844 *
845 * @info: atom card_info pointer
846 * @reg: MC register offset
847 *
848 * Provides an MC register accessor for the atom interpreter (r4xx+).
849 * Returns the value of the MC register.
850 */
851static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
852{
853 return 0;
854}
855
856/**
857 * cail_mc_write - write MC (Memory Controller) register
858 *
859 * @info: atom card_info pointer
860 * @reg: MC register offset
861 * @val: value to write to the pll register
862 *
863 * Provides a MC register accessor for the atom interpreter (r4xx+).
864 */
865static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
866{
867
868}
869
870/**
871 * cail_reg_write - write MMIO register
872 *
873 * @info: atom card_info pointer
874 * @reg: MMIO register offset
875 * @val: value to write to the pll register
876 *
877 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
878 */
879static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
880{
881 struct amdgpu_device *adev = info->dev->dev_private;
882
883 WREG32(reg, val);
884}
885
886/**
887 * cail_reg_read - read MMIO register
888 *
889 * @info: atom card_info pointer
890 * @reg: MMIO register offset
891 *
892 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
893 * Returns the value of the MMIO register.
894 */
895static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
896{
897 struct amdgpu_device *adev = info->dev->dev_private;
898 uint32_t r;
899
900 r = RREG32(reg);
901 return r;
902}
903
904/**
905 * cail_ioreg_write - write IO register
906 *
907 * @info: atom card_info pointer
908 * @reg: IO register offset
909 * @val: value to write to the pll register
910 *
911 * Provides a IO register accessor for the atom interpreter (r4xx+).
912 */
913static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
914{
915 struct amdgpu_device *adev = info->dev->dev_private;
916
917 WREG32_IO(reg, val);
918}
919
920/**
921 * cail_ioreg_read - read IO register
922 *
923 * @info: atom card_info pointer
924 * @reg: IO register offset
925 *
926 * Provides an IO register accessor for the atom interpreter (r4xx+).
927 * Returns the value of the IO register.
928 */
929static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
930{
931 struct amdgpu_device *adev = info->dev->dev_private;
932 uint32_t r;
933
934 r = RREG32_IO(reg);
935 return r;
936}
937
938/**
939 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
940 *
941 * @adev: amdgpu_device pointer
942 *
943 * Frees the driver info and register access callbacks for the ATOM
944 * interpreter (r4xx+).
945 * Called at driver shutdown.
946 */
947static void amdgpu_atombios_fini(struct amdgpu_device *adev)
948{
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800949 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400950 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800951 kfree(adev->mode_info.atom_context->iio);
952 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400953 kfree(adev->mode_info.atom_context);
954 adev->mode_info.atom_context = NULL;
955 kfree(adev->mode_info.atom_card_info);
956 adev->mode_info.atom_card_info = NULL;
957}
958
959/**
960 * amdgpu_atombios_init - init the driver info and callbacks for atombios
961 *
962 * @adev: amdgpu_device pointer
963 *
964 * Initializes the driver info and register access callbacks for the
965 * ATOM interpreter (r4xx+).
966 * Returns 0 on sucess, -ENOMEM on failure.
967 * Called at driver startup.
968 */
969static int amdgpu_atombios_init(struct amdgpu_device *adev)
970{
971 struct card_info *atom_card_info =
972 kzalloc(sizeof(struct card_info), GFP_KERNEL);
973
974 if (!atom_card_info)
975 return -ENOMEM;
976
977 adev->mode_info.atom_card_info = atom_card_info;
978 atom_card_info->dev = adev->ddev;
979 atom_card_info->reg_read = cail_reg_read;
980 atom_card_info->reg_write = cail_reg_write;
981 /* needed for iio ops */
982 if (adev->rio_mem) {
983 atom_card_info->ioreg_read = cail_ioreg_read;
984 atom_card_info->ioreg_write = cail_ioreg_write;
985 } else {
Amber Linb64a18c2017-01-04 08:06:58 -0500986 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400987 atom_card_info->ioreg_read = cail_reg_read;
988 atom_card_info->ioreg_write = cail_reg_write;
989 }
990 atom_card_info->mc_read = cail_mc_read;
991 atom_card_info->mc_write = cail_mc_write;
992 atom_card_info->pll_read = cail_pll_read;
993 atom_card_info->pll_write = cail_pll_write;
994
995 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
996 if (!adev->mode_info.atom_context) {
997 amdgpu_atombios_fini(adev);
998 return -ENOMEM;
999 }
1000
1001 mutex_init(&adev->mode_info.atom_context->mutex);
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001002 if (adev->is_atom_fw) {
1003 amdgpu_atomfirmware_scratch_regs_init(adev);
1004 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1005 } else {
1006 amdgpu_atombios_scratch_regs_init(adev);
1007 amdgpu_atombios_allocate_fb_scratch(adev);
1008 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001009 return 0;
1010}
1011
1012/* if we get transitioned to only one device, take VGA back */
1013/**
1014 * amdgpu_vga_set_decode - enable/disable vga decode
1015 *
1016 * @cookie: amdgpu_device pointer
1017 * @state: enable/disable vga decode
1018 *
1019 * Enable/disable vga decode (all asics).
1020 * Returns VGA resource flags.
1021 */
1022static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1023{
1024 struct amdgpu_device *adev = cookie;
1025 amdgpu_asic_set_vga_state(adev, state);
1026 if (state)
1027 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1028 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1029 else
1030 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1031}
1032
1033/**
1034 * amdgpu_check_pot_argument - check that argument is a power of two
1035 *
1036 * @arg: value to check
1037 *
1038 * Validates that a certain argument is a power of two (all asics).
1039 * Returns true if argument is valid.
1040 */
1041static bool amdgpu_check_pot_argument(int arg)
1042{
1043 return (arg & (arg - 1)) == 0;
1044}
1045
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001046static void amdgpu_check_block_size(struct amdgpu_device *adev)
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001047{
1048 /* defines number of bits in page table versus page directory,
1049 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1050 * page table and the remaining bits are in the page directory */
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001051 if (amdgpu_vm_block_size == -1)
1052 return;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001053
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001054 if (amdgpu_vm_block_size < 9) {
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001055 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1056 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001057 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001058 }
1059
1060 if (amdgpu_vm_block_size > 24 ||
1061 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1062 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1063 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001064 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001065 }
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001066
1067 return;
1068
1069def_value:
1070 amdgpu_vm_block_size = -1;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001071}
1072
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001073static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1074{
1075 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1076 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1077 amdgpu_vm_size);
1078 goto def_value;
1079 }
1080
1081 if (amdgpu_vm_size < 1) {
1082 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1083 amdgpu_vm_size);
1084 goto def_value;
1085 }
1086
1087 /*
1088 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1089 */
1090 if (amdgpu_vm_size > 1024) {
1091 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1092 amdgpu_vm_size);
1093 goto def_value;
1094 }
1095
1096 return;
1097
1098def_value:
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001099 amdgpu_vm_size = -1;
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001100}
1101
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001102/**
1103 * amdgpu_check_arguments - validate module params
1104 *
1105 * @adev: amdgpu_device pointer
1106 *
1107 * Validates certain module parameters and updates
1108 * the associated values used by the driver (all asics).
1109 */
1110static void amdgpu_check_arguments(struct amdgpu_device *adev)
1111{
Chunming Zhou5b011232015-12-10 17:34:33 +08001112 if (amdgpu_sched_jobs < 4) {
1113 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1114 amdgpu_sched_jobs);
1115 amdgpu_sched_jobs = 4;
1116 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
1117 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1118 amdgpu_sched_jobs);
1119 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1120 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001121
1122 if (amdgpu_gart_size != -1) {
Christian Königc4e1a132016-03-17 16:25:15 +01001123 /* gtt size must be greater or equal to 32M */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001124 if (amdgpu_gart_size < 32) {
1125 dev_warn(adev->dev, "gart size (%d) too small\n",
1126 amdgpu_gart_size);
1127 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001128 }
1129 }
1130
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001131 amdgpu_check_vm_size(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001132
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001133 amdgpu_check_block_size(adev);
Christian König6a7f76e2016-08-24 15:51:49 +02001134
jimqu526bae32016-11-07 09:53:10 +08001135 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1136 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
Christian König6a7f76e2016-08-24 15:51:49 +02001137 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1138 amdgpu_vram_page_split);
1139 amdgpu_vram_page_split = 1024;
1140 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001141}
1142
1143/**
1144 * amdgpu_switcheroo_set_state - set switcheroo state
1145 *
1146 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001147 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001148 *
1149 * Callback for the switcheroo driver. Suspends or resumes the
1150 * the asics before or after it is powered up using ACPI methods.
1151 */
1152static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1153{
1154 struct drm_device *dev = pci_get_drvdata(pdev);
1155
1156 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1157 return;
1158
1159 if (state == VGA_SWITCHEROO_ON) {
1160 unsigned d3_delay = dev->pdev->d3_delay;
1161
Joe Perches7ca85292017-02-28 04:55:52 -08001162 pr_info("amdgpu: switched on\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001163 /* don't suspend or resume card normally */
1164 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1165
Alex Deucher810ddc32016-08-23 13:25:49 -04001166 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001167
1168 dev->pdev->d3_delay = d3_delay;
1169
1170 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1171 drm_kms_helper_poll_enable(dev);
1172 } else {
Joe Perches7ca85292017-02-28 04:55:52 -08001173 pr_info("amdgpu: switched off\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001174 drm_kms_helper_poll_disable(dev);
1175 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001176 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001177 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1178 }
1179}
1180
1181/**
1182 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1183 *
1184 * @pdev: pci dev pointer
1185 *
1186 * Callback for the switcheroo driver. Check of the switcheroo
1187 * state can be changed.
1188 * Returns true if the state can be changed, false if not.
1189 */
1190static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1191{
1192 struct drm_device *dev = pci_get_drvdata(pdev);
1193
1194 /*
1195 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1196 * locking inversion with the driver load path. And the access here is
1197 * completely racy anyway. So don't bother with locking for now.
1198 */
1199 return dev->open_count == 0;
1200}
1201
1202static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1203 .set_gpu_state = amdgpu_switcheroo_set_state,
1204 .reprobe = NULL,
1205 .can_switch = amdgpu_switcheroo_can_switch,
1206};
1207
1208int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001209 enum amd_ip_block_type block_type,
1210 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001211{
1212 int i, r = 0;
1213
1214 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001215 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001216 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001217 if (adev->ip_blocks[i].version->type != block_type)
1218 continue;
1219 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1220 continue;
1221 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1222 (void *)adev, state);
1223 if (r)
1224 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1225 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001226 }
1227 return r;
1228}
1229
1230int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001231 enum amd_ip_block_type block_type,
1232 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001233{
1234 int i, r = 0;
1235
1236 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001237 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001238 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001239 if (adev->ip_blocks[i].version->type != block_type)
1240 continue;
1241 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1242 continue;
1243 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1244 (void *)adev, state);
1245 if (r)
1246 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1247 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001248 }
1249 return r;
1250}
1251
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001252void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1253{
1254 int i;
1255
1256 for (i = 0; i < adev->num_ip_blocks; i++) {
1257 if (!adev->ip_blocks[i].status.valid)
1258 continue;
1259 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1260 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1261 }
1262}
1263
Alex Deucher5dbbb602016-06-23 11:41:04 -04001264int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1265 enum amd_ip_block_type block_type)
1266{
1267 int i, r;
1268
1269 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001270 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001271 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001272 if (adev->ip_blocks[i].version->type == block_type) {
1273 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001274 if (r)
1275 return r;
1276 break;
1277 }
1278 }
1279 return 0;
1280
1281}
1282
1283bool amdgpu_is_idle(struct amdgpu_device *adev,
1284 enum amd_ip_block_type block_type)
1285{
1286 int i;
1287
1288 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001289 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001290 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001291 if (adev->ip_blocks[i].version->type == block_type)
1292 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001293 }
1294 return true;
1295
1296}
1297
Alex Deuchera1255102016-10-13 17:41:13 -04001298struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1299 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001300{
1301 int i;
1302
1303 for (i = 0; i < adev->num_ip_blocks; i++)
Alex Deuchera1255102016-10-13 17:41:13 -04001304 if (adev->ip_blocks[i].version->type == type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001305 return &adev->ip_blocks[i];
1306
1307 return NULL;
1308}
1309
1310/**
1311 * amdgpu_ip_block_version_cmp
1312 *
1313 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001314 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001315 * @major: major version
1316 * @minor: minor version
1317 *
1318 * return 0 if equal or greater
1319 * return 1 if smaller or the ip_block doesn't exist
1320 */
1321int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001322 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001323 u32 major, u32 minor)
1324{
Alex Deuchera1255102016-10-13 17:41:13 -04001325 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001326
Alex Deuchera1255102016-10-13 17:41:13 -04001327 if (ip_block && ((ip_block->version->major > major) ||
1328 ((ip_block->version->major == major) &&
1329 (ip_block->version->minor >= minor))))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001330 return 0;
1331
1332 return 1;
1333}
1334
Alex Deuchera1255102016-10-13 17:41:13 -04001335/**
1336 * amdgpu_ip_block_add
1337 *
1338 * @adev: amdgpu_device pointer
1339 * @ip_block_version: pointer to the IP to add
1340 *
1341 * Adds the IP block driver information to the collection of IPs
1342 * on the asic.
1343 */
1344int amdgpu_ip_block_add(struct amdgpu_device *adev,
1345 const struct amdgpu_ip_block_version *ip_block_version)
1346{
1347 if (!ip_block_version)
1348 return -EINVAL;
1349
1350 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1351
1352 return 0;
1353}
1354
Alex Deucher483ef982016-09-30 12:43:04 -04001355static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
Emily Deng9accf2f2016-08-10 16:01:25 +08001356{
1357 adev->enable_virtual_display = false;
1358
1359 if (amdgpu_virtual_display) {
1360 struct drm_device *ddev = adev->ddev;
1361 const char *pci_address_name = pci_name(ddev->pdev);
Emily Deng0f663562016-09-30 13:02:18 -04001362 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
Emily Deng9accf2f2016-08-10 16:01:25 +08001363
1364 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1365 pciaddstr_tmp = pciaddstr;
Emily Deng0f663562016-09-30 13:02:18 -04001366 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1367 pciaddname = strsep(&pciaddname_tmp, ",");
Yintian Tao967de2a2017-01-22 15:16:51 +08001368 if (!strcmp("all", pciaddname)
1369 || !strcmp(pci_address_name, pciaddname)) {
Emily Deng0f663562016-09-30 13:02:18 -04001370 long num_crtc;
1371 int res = -1;
1372
Emily Deng9accf2f2016-08-10 16:01:25 +08001373 adev->enable_virtual_display = true;
Emily Deng0f663562016-09-30 13:02:18 -04001374
1375 if (pciaddname_tmp)
1376 res = kstrtol(pciaddname_tmp, 10,
1377 &num_crtc);
1378
1379 if (!res) {
1380 if (num_crtc < 1)
1381 num_crtc = 1;
1382 if (num_crtc > 6)
1383 num_crtc = 6;
1384 adev->mode_info.num_crtc = num_crtc;
1385 } else {
1386 adev->mode_info.num_crtc = 1;
1387 }
Emily Deng9accf2f2016-08-10 16:01:25 +08001388 break;
1389 }
1390 }
1391
Emily Deng0f663562016-09-30 13:02:18 -04001392 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1393 amdgpu_virtual_display, pci_address_name,
1394 adev->enable_virtual_display, adev->mode_info.num_crtc);
Emily Deng9accf2f2016-08-10 16:01:25 +08001395
1396 kfree(pciaddstr);
1397 }
1398}
1399
Alex Deuchere2a75f82017-04-27 16:58:01 -04001400static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1401{
1402 const struct firmware *fw;
1403 const char *chip_name;
1404 char fw_name[30];
1405 int err;
1406 const struct gpu_info_firmware_header_v1_0 *hdr;
1407
1408 switch (adev->asic_type) {
1409 case CHIP_TOPAZ:
1410 case CHIP_TONGA:
1411 case CHIP_FIJI:
1412 case CHIP_POLARIS11:
1413 case CHIP_POLARIS10:
1414 case CHIP_POLARIS12:
1415 case CHIP_CARRIZO:
1416 case CHIP_STONEY:
1417#ifdef CONFIG_DRM_AMDGPU_SI
1418 case CHIP_VERDE:
1419 case CHIP_TAHITI:
1420 case CHIP_PITCAIRN:
1421 case CHIP_OLAND:
1422 case CHIP_HAINAN:
1423#endif
1424#ifdef CONFIG_DRM_AMDGPU_CIK
1425 case CHIP_BONAIRE:
1426 case CHIP_HAWAII:
1427 case CHIP_KAVERI:
1428 case CHIP_KABINI:
1429 case CHIP_MULLINS:
1430#endif
1431 default:
1432 return 0;
1433 case CHIP_VEGA10:
1434 chip_name = "vega10";
1435 break;
Alex Deucher2d2e5e72017-05-09 12:27:35 -04001436 case CHIP_RAVEN:
1437 chip_name = "raven";
1438 break;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001439 }
1440
1441 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1442 err = request_firmware(&fw, fw_name, adev->dev);
1443 if (err) {
1444 dev_err(adev->dev,
1445 "Failed to load gpu_info firmware \"%s\"\n",
1446 fw_name);
1447 goto out;
1448 }
1449 err = amdgpu_ucode_validate(fw);
1450 if (err) {
1451 dev_err(adev->dev,
1452 "Failed to validate gpu_info firmware \"%s\"\n",
1453 fw_name);
1454 goto out;
1455 }
1456
1457 hdr = (const struct gpu_info_firmware_header_v1_0 *)fw->data;
1458 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1459
1460 switch (hdr->version_major) {
1461 case 1:
1462 {
1463 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1464 (const struct gpu_info_firmware_v1_0 *)(fw->data +
1465 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1466
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001467 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1468 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1469 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1470 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001471 adev->gfx.config.max_texture_channel_caches =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001472 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1473 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1474 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1475 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1476 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001477 adev->gfx.config.double_offchip_lds_buf =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001478 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1479 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001480 break;
1481 }
1482 default:
1483 dev_err(adev->dev,
1484 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1485 err = -EINVAL;
1486 goto out;
1487 }
1488out:
1489 release_firmware(fw);
1490 fw = NULL;
1491
1492 return err;
1493}
1494
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001495static int amdgpu_early_init(struct amdgpu_device *adev)
1496{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001497 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001498
Alex Deucher483ef982016-09-30 12:43:04 -04001499 amdgpu_device_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001500
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001501 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001502 case CHIP_TOPAZ:
1503 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001504 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001505 case CHIP_POLARIS11:
1506 case CHIP_POLARIS10:
Junwei Zhangc4642a42016-12-14 15:32:28 -05001507 case CHIP_POLARIS12:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001508 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001509 case CHIP_STONEY:
1510 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001511 adev->family = AMDGPU_FAMILY_CZ;
1512 else
1513 adev->family = AMDGPU_FAMILY_VI;
1514
1515 r = vi_set_ip_blocks(adev);
1516 if (r)
1517 return r;
1518 break;
Ken Wang33f34802016-01-21 17:29:41 +08001519#ifdef CONFIG_DRM_AMDGPU_SI
1520 case CHIP_VERDE:
1521 case CHIP_TAHITI:
1522 case CHIP_PITCAIRN:
1523 case CHIP_OLAND:
1524 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001525 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001526 r = si_set_ip_blocks(adev);
1527 if (r)
1528 return r;
1529 break;
1530#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001531#ifdef CONFIG_DRM_AMDGPU_CIK
1532 case CHIP_BONAIRE:
1533 case CHIP_HAWAII:
1534 case CHIP_KAVERI:
1535 case CHIP_KABINI:
1536 case CHIP_MULLINS:
1537 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1538 adev->family = AMDGPU_FAMILY_CI;
1539 else
1540 adev->family = AMDGPU_FAMILY_KV;
1541
1542 r = cik_set_ip_blocks(adev);
1543 if (r)
1544 return r;
1545 break;
1546#endif
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +08001547 case CHIP_VEGA10:
1548 case CHIP_RAVEN:
1549 if (adev->asic_type == CHIP_RAVEN)
1550 adev->family = AMDGPU_FAMILY_RV;
1551 else
1552 adev->family = AMDGPU_FAMILY_AI;
Ken Wang460826e2017-03-06 14:53:16 -05001553
1554 r = soc15_set_ip_blocks(adev);
1555 if (r)
1556 return r;
1557 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001558 default:
1559 /* FIXME: not supported yet */
1560 return -EINVAL;
1561 }
1562
Alex Deuchere2a75f82017-04-27 16:58:01 -04001563 r = amdgpu_device_parse_gpu_info_fw(adev);
1564 if (r)
1565 return r;
1566
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001567 if (amdgpu_sriov_vf(adev)) {
1568 r = amdgpu_virt_request_full_gpu(adev, true);
1569 if (r)
1570 return r;
1571 }
1572
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001573 for (i = 0; i < adev->num_ip_blocks; i++) {
1574 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1575 DRM_ERROR("disabled ip block: %d\n", i);
Alex Deuchera1255102016-10-13 17:41:13 -04001576 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001577 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001578 if (adev->ip_blocks[i].version->funcs->early_init) {
1579 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001580 if (r == -ENOENT) {
Alex Deuchera1255102016-10-13 17:41:13 -04001581 adev->ip_blocks[i].status.valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001582 } else if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001583 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1584 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001585 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001586 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001587 adev->ip_blocks[i].status.valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001588 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001589 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001590 adev->ip_blocks[i].status.valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001591 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001592 }
1593 }
1594
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001595 adev->cg_flags &= amdgpu_cg_mask;
1596 adev->pg_flags &= amdgpu_pg_mask;
1597
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001598 return 0;
1599}
1600
1601static int amdgpu_init(struct amdgpu_device *adev)
1602{
1603 int i, r;
1604
1605 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001606 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001607 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001608 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001609 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001610 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1611 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001612 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001613 }
Alex Deuchera1255102016-10-13 17:41:13 -04001614 adev->ip_blocks[i].status.sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001615 /* need to do gmc hw init early so we can allocate gpu mem */
Alex Deuchera1255102016-10-13 17:41:13 -04001616 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001617 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001618 if (r) {
1619 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001620 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001621 }
Alex Deuchera1255102016-10-13 17:41:13 -04001622 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001623 if (r) {
1624 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001625 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001626 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001627 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001628 if (r) {
1629 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001630 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001631 }
Alex Deuchera1255102016-10-13 17:41:13 -04001632 adev->ip_blocks[i].status.hw = true;
Monk Liu24936642017-01-09 15:54:32 +08001633
1634 /* right after GMC hw init, we create CSA */
1635 if (amdgpu_sriov_vf(adev)) {
1636 r = amdgpu_allocate_static_csa(adev);
1637 if (r) {
1638 DRM_ERROR("allocate CSA failed %d\n", r);
1639 return r;
1640 }
1641 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001642 }
1643 }
1644
1645 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001646 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001647 continue;
1648 /* gmc hw init is done early */
Alex Deuchera1255102016-10-13 17:41:13 -04001649 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001650 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001651 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001652 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001653 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1654 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001655 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001656 }
Alex Deuchera1255102016-10-13 17:41:13 -04001657 adev->ip_blocks[i].status.hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001658 }
1659
1660 return 0;
1661}
1662
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001663static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1664{
1665 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1666}
1667
1668static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1669{
1670 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1671 AMDGPU_RESET_MAGIC_NUM);
1672}
1673
Shirish S2dc80b02017-05-25 10:05:25 +05301674static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
1675{
1676 int i = 0, r;
1677
1678 for (i = 0; i < adev->num_ip_blocks; i++) {
1679 if (!adev->ip_blocks[i].status.valid)
1680 continue;
1681 /* skip CG for VCE/UVD, it's handled specially */
1682 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1683 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1684 /* enable clockgating to save power */
1685 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1686 AMD_CG_STATE_GATE);
1687 if (r) {
1688 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1689 adev->ip_blocks[i].version->funcs->name, r);
1690 return r;
1691 }
1692 }
1693 }
1694 return 0;
1695}
1696
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001697static int amdgpu_late_init(struct amdgpu_device *adev)
1698{
1699 int i = 0, r;
1700
1701 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001702 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001703 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001704 if (adev->ip_blocks[i].version->funcs->late_init) {
1705 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001706 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001707 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1708 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001709 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001710 }
Alex Deuchera1255102016-10-13 17:41:13 -04001711 adev->ip_blocks[i].status.late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001712 }
1713 }
1714
Shirish S2dc80b02017-05-25 10:05:25 +05301715 mod_delayed_work(system_wq, &adev->late_init_work,
1716 msecs_to_jiffies(AMDGPU_RESUME_MS));
1717
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001718 amdgpu_fill_reset_magic(adev);
1719
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001720 return 0;
1721}
1722
1723static int amdgpu_fini(struct amdgpu_device *adev)
1724{
1725 int i, r;
1726
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001727 /* need to disable SMC first */
1728 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001729 if (!adev->ip_blocks[i].status.hw)
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001730 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001731 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001732 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
Alex Deuchera1255102016-10-13 17:41:13 -04001733 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1734 AMD_CG_STATE_UNGATE);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001735 if (r) {
1736 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001737 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001738 return r;
1739 }
Alex Deuchera1255102016-10-13 17:41:13 -04001740 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001741 /* XXX handle errors */
1742 if (r) {
1743 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001744 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001745 }
Alex Deuchera1255102016-10-13 17:41:13 -04001746 adev->ip_blocks[i].status.hw = false;
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001747 break;
1748 }
1749 }
1750
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001751 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001752 if (!adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001753 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001754 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001755 amdgpu_wb_fini(adev);
1756 amdgpu_vram_scratch_fini(adev);
1757 }
Rex Zhu8201a672016-11-24 21:44:44 +08001758
1759 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1760 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1761 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1762 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1763 AMD_CG_STATE_UNGATE);
1764 if (r) {
1765 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1766 adev->ip_blocks[i].version->funcs->name, r);
1767 return r;
1768 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001769 }
Rex Zhu8201a672016-11-24 21:44:44 +08001770
Alex Deuchera1255102016-10-13 17:41:13 -04001771 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001772 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001773 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001774 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1775 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001776 }
Rex Zhu8201a672016-11-24 21:44:44 +08001777
Alex Deuchera1255102016-10-13 17:41:13 -04001778 adev->ip_blocks[i].status.hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001779 }
1780
1781 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001782 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001783 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001784 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001785 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001786 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001787 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1788 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001789 }
Alex Deuchera1255102016-10-13 17:41:13 -04001790 adev->ip_blocks[i].status.sw = false;
1791 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001792 }
1793
Monk Liua6dcfd92016-05-19 14:36:34 +08001794 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001795 if (!adev->ip_blocks[i].status.late_initialized)
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001796 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001797 if (adev->ip_blocks[i].version->funcs->late_fini)
1798 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1799 adev->ip_blocks[i].status.late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001800 }
1801
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001802 if (amdgpu_sriov_vf(adev)) {
Monk Liu24936642017-01-09 15:54:32 +08001803 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001804 amdgpu_virt_release_full_gpu(adev, false);
1805 }
Monk Liu24936642017-01-09 15:54:32 +08001806
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001807 return 0;
1808}
1809
Shirish S2dc80b02017-05-25 10:05:25 +05301810static void amdgpu_late_init_func_handler(struct work_struct *work)
1811{
1812 struct amdgpu_device *adev =
1813 container_of(work, struct amdgpu_device, late_init_work.work);
1814 amdgpu_late_set_cg_state(adev);
1815}
1816
Alex Deucherfaefba92016-12-06 10:38:29 -05001817int amdgpu_suspend(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001818{
1819 int i, r;
1820
Xiangliang Yue941ea92017-01-18 12:47:55 +08001821 if (amdgpu_sriov_vf(adev))
1822 amdgpu_virt_request_full_gpu(adev, false);
1823
Flora Cuic5a93a22016-02-26 10:45:25 +08001824 /* ungate SMC block first */
1825 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1826 AMD_CG_STATE_UNGATE);
1827 if (r) {
1828 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1829 }
1830
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001831 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001832 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001833 continue;
1834 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001835 if (i != AMD_IP_BLOCK_TYPE_SMC) {
Alex Deuchera1255102016-10-13 17:41:13 -04001836 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1837 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001838 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001839 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1840 adev->ip_blocks[i].version->funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001841 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001842 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001843 /* XXX handle errors */
Alex Deuchera1255102016-10-13 17:41:13 -04001844 r = adev->ip_blocks[i].version->funcs->suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001845 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001846 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001847 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1848 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001849 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001850 }
1851
Xiangliang Yue941ea92017-01-18 12:47:55 +08001852 if (amdgpu_sriov_vf(adev))
1853 amdgpu_virt_release_full_gpu(adev, false);
1854
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001855 return 0;
1856}
1857
Monk Liue4f0fdc2017-02-09 11:55:49 +08001858static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001859{
1860 int i, r;
1861
Monk Liu2cb681b2017-04-26 12:00:49 +08001862 static enum amd_ip_block_type ip_order[] = {
1863 AMD_IP_BLOCK_TYPE_GMC,
1864 AMD_IP_BLOCK_TYPE_COMMON,
1865 AMD_IP_BLOCK_TYPE_GFXHUB,
1866 AMD_IP_BLOCK_TYPE_MMHUB,
1867 AMD_IP_BLOCK_TYPE_IH,
1868 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001869
Monk Liu2cb681b2017-04-26 12:00:49 +08001870 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1871 int j;
1872 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001873
Monk Liu2cb681b2017-04-26 12:00:49 +08001874 for (j = 0; j < adev->num_ip_blocks; j++) {
1875 block = &adev->ip_blocks[j];
1876
1877 if (block->version->type != ip_order[i] ||
1878 !block->status.valid)
1879 continue;
1880
1881 r = block->version->funcs->hw_init(adev);
1882 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001883 }
1884 }
1885
1886 return 0;
1887}
1888
Monk Liue4f0fdc2017-02-09 11:55:49 +08001889static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001890{
1891 int i, r;
1892
Monk Liu2cb681b2017-04-26 12:00:49 +08001893 static enum amd_ip_block_type ip_order[] = {
1894 AMD_IP_BLOCK_TYPE_SMC,
1895 AMD_IP_BLOCK_TYPE_DCE,
1896 AMD_IP_BLOCK_TYPE_GFX,
1897 AMD_IP_BLOCK_TYPE_SDMA,
1898 AMD_IP_BLOCK_TYPE_VCE,
1899 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001900
Monk Liu2cb681b2017-04-26 12:00:49 +08001901 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1902 int j;
1903 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001904
Monk Liu2cb681b2017-04-26 12:00:49 +08001905 for (j = 0; j < adev->num_ip_blocks; j++) {
1906 block = &adev->ip_blocks[j];
1907
1908 if (block->version->type != ip_order[i] ||
1909 !block->status.valid)
1910 continue;
1911
1912 r = block->version->funcs->hw_init(adev);
1913 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001914 }
1915 }
1916
1917 return 0;
1918}
1919
Chunming Zhoufcf06492017-05-05 10:33:33 +08001920static int amdgpu_resume_phase1(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001921{
1922 int i, r;
1923
1924 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001925 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001926 continue;
Chunming Zhoufcf06492017-05-05 10:33:33 +08001927 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1928 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1929 adev->ip_blocks[i].version->type ==
1930 AMD_IP_BLOCK_TYPE_IH) {
1931 r = adev->ip_blocks[i].version->funcs->resume(adev);
1932 if (r) {
1933 DRM_ERROR("resume of IP block <%s> failed %d\n",
1934 adev->ip_blocks[i].version->funcs->name, r);
1935 return r;
1936 }
1937 }
1938 }
1939
1940 return 0;
1941}
1942
1943static int amdgpu_resume_phase2(struct amdgpu_device *adev)
1944{
1945 int i, r;
1946
1947 for (i = 0; i < adev->num_ip_blocks; i++) {
1948 if (!adev->ip_blocks[i].status.valid)
1949 continue;
1950 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1951 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1952 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1953 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001954 r = adev->ip_blocks[i].version->funcs->resume(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001955 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001956 DRM_ERROR("resume of IP block <%s> failed %d\n",
1957 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001958 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001959 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001960 }
1961
1962 return 0;
1963}
1964
Chunming Zhoufcf06492017-05-05 10:33:33 +08001965static int amdgpu_resume(struct amdgpu_device *adev)
1966{
1967 int r;
1968
1969 r = amdgpu_resume_phase1(adev);
1970 if (r)
1971 return r;
1972 r = amdgpu_resume_phase2(adev);
1973
1974 return r;
1975}
1976
Monk Liu4e99a442016-03-31 13:26:59 +08001977static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04001978{
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001979 if (adev->is_atom_fw) {
1980 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1981 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1982 } else {
1983 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1984 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1985 }
Andres Rodriguez048765a2016-06-11 02:51:32 -04001986}
1987
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001988/**
1989 * amdgpu_device_init - initialize the driver
1990 *
1991 * @adev: amdgpu_device pointer
1992 * @pdev: drm dev pointer
1993 * @pdev: pci dev pointer
1994 * @flags: driver flags
1995 *
1996 * Initializes the driver info and hw (all asics).
1997 * Returns 0 for success or an error on failure.
1998 * Called at driver startup.
1999 */
2000int amdgpu_device_init(struct amdgpu_device *adev,
2001 struct drm_device *ddev,
2002 struct pci_dev *pdev,
2003 uint32_t flags)
2004{
2005 int r, i;
2006 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02002007 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002008
2009 adev->shutdown = false;
2010 adev->dev = &pdev->dev;
2011 adev->ddev = ddev;
2012 adev->pdev = pdev;
2013 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08002014 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002015 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2016 adev->mc.gtt_size = 512 * 1024 * 1024;
2017 adev->accel_working = false;
2018 adev->num_rings = 0;
2019 adev->mman.buffer_funcs = NULL;
2020 adev->mman.buffer_funcs_ring = NULL;
2021 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01002022 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002023 adev->gart.gart_funcs = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002024 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002025
2026 adev->smc_rreg = &amdgpu_invalid_rreg;
2027 adev->smc_wreg = &amdgpu_invalid_wreg;
2028 adev->pcie_rreg = &amdgpu_invalid_rreg;
2029 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08002030 adev->pciep_rreg = &amdgpu_invalid_rreg;
2031 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002032 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2033 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2034 adev->didt_rreg = &amdgpu_invalid_rreg;
2035 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08002036 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2037 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002038 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2039 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2040
Rex Zhuccdbb202016-06-08 12:47:41 +08002041
Alex Deucher3e39ab92015-06-05 15:04:33 -04002042 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2043 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2044 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002045
2046 /* mutex initialization are all done here so we
2047 * can recall function without having locking issues */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002048 atomic_set(&adev->irq.ih.lock, 0);
Huang Rui0e5ca0d2017-03-03 18:37:23 -05002049 mutex_init(&adev->firmware.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002050 mutex_init(&adev->pm.mutex);
2051 mutex_init(&adev->gfx.gpu_clock_mutex);
2052 mutex_init(&adev->srbm_mutex);
2053 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002054 mutex_init(&adev->mn_lock);
2055 hash_init(adev->mn_hash);
2056
2057 amdgpu_check_arguments(adev);
2058
2059 /* Registers mapping */
2060 /* TODO: block userspace mapping of io register */
2061 spin_lock_init(&adev->mmio_idx_lock);
2062 spin_lock_init(&adev->smc_idx_lock);
2063 spin_lock_init(&adev->pcie_idx_lock);
2064 spin_lock_init(&adev->uvd_ctx_idx_lock);
2065 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08002066 spin_lock_init(&adev->gc_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002067 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02002068 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002069
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08002070 INIT_LIST_HEAD(&adev->shadow_list);
2071 mutex_init(&adev->shadow_list_lock);
2072
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002073 INIT_LIST_HEAD(&adev->gtt_list);
2074 spin_lock_init(&adev->gtt_list_lock);
2075
Andres Rodriguez795f2812017-03-06 16:27:55 -05002076 INIT_LIST_HEAD(&adev->ring_lru_list);
2077 spin_lock_init(&adev->ring_lru_list_lock);
2078
Shirish S2dc80b02017-05-25 10:05:25 +05302079 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2080
Ken Wangda69c1612016-01-21 19:08:55 +08002081 if (adev->asic_type >= CHIP_BONAIRE) {
2082 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2083 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2084 } else {
2085 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2086 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2087 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002088
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002089 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2090 if (adev->rmmio == NULL) {
2091 return -ENOMEM;
2092 }
2093 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2094 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2095
Ken Wangda69c1612016-01-21 19:08:55 +08002096 if (adev->asic_type >= CHIP_BONAIRE)
2097 /* doorbell bar mapping */
2098 amdgpu_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002099
2100 /* io port mapping */
2101 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2102 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2103 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2104 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2105 break;
2106 }
2107 }
2108 if (adev->rio_mem == NULL)
Amber Linb64a18c2017-01-04 08:06:58 -05002109 DRM_INFO("PCI I/O BAR is not found.\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002110
2111 /* early init functions */
2112 r = amdgpu_early_init(adev);
2113 if (r)
2114 return r;
2115
2116 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2117 /* this will fail for cards that aren't VGA class devices, just
2118 * ignore it */
2119 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2120
2121 if (amdgpu_runtime_pm == 1)
2122 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04002123 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002124 runtime = true;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002125 if (!pci_is_thunderbolt_attached(adev->pdev))
2126 vga_switcheroo_register_client(adev->pdev,
2127 &amdgpu_switcheroo_ops, runtime);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002128 if (runtime)
2129 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2130
2131 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04002132 if (!amdgpu_get_bios(adev)) {
2133 r = -EINVAL;
2134 goto failed;
2135 }
Nils Wallméniusf7e9e9f2016-12-14 21:52:45 +01002136
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002137 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002138 if (r) {
2139 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002140 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002141 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002142
Monk Liu4e99a442016-03-31 13:26:59 +08002143 /* detect if we are with an SRIOV vbios */
2144 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04002145
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002146 /* Post card if necessary */
Monk Liubec86372016-09-14 19:38:08 +08002147 if (amdgpu_vpost_needed(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002148 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08002149 dev_err(adev->dev, "no vBIOS found\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002150 r = -EINVAL;
2151 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002152 }
Monk Liubec86372016-09-14 19:38:08 +08002153 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08002154 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2155 if (r) {
2156 dev_err(adev->dev, "gpu post error!\n");
2157 goto failed;
2158 }
2159 } else {
2160 DRM_INFO("GPU post is not needed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002161 }
2162
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002163 if (!adev->is_atom_fw) {
2164 /* Initialize clocks */
2165 r = amdgpu_atombios_get_clock_info(adev);
2166 if (r) {
2167 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2168 return r;
2169 }
2170 /* init i2c buses */
2171 amdgpu_atombios_i2c_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002172 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002173
2174 /* Fence driver */
2175 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002176 if (r) {
2177 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002178 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002179 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002180
2181 /* init the mode config */
2182 drm_mode_config_init(adev->ddev);
2183
2184 r = amdgpu_init(adev);
2185 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05002186 dev_err(adev->dev, "amdgpu_init failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002187 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002188 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002189 }
2190
2191 adev->accel_working = true;
2192
Marek Olšák95844d22016-08-17 23:49:27 +02002193 /* Initialize the buffer migration limit. */
2194 if (amdgpu_moverate >= 0)
2195 max_MBps = amdgpu_moverate;
2196 else
2197 max_MBps = 8; /* Allow 8 MB/s. */
2198 /* Get a log2 for easy divisions. */
2199 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2200
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002201 r = amdgpu_ib_pool_init(adev);
2202 if (r) {
2203 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002204 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002205 }
2206
2207 r = amdgpu_ib_ring_tests(adev);
2208 if (r)
2209 DRM_ERROR("ib ring test failed (%d).\n", r);
2210
Monk Liu9bc92b92017-02-08 17:38:13 +08002211 amdgpu_fbdev_init(adev);
2212
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002213 r = amdgpu_gem_debugfs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002214 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002215 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002216
2217 r = amdgpu_debugfs_regs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002218 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002219 DRM_ERROR("registering register debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002220
Huang Rui50ab2532016-06-12 15:51:09 +08002221 r = amdgpu_debugfs_firmware_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002222 if (r)
Huang Rui50ab2532016-06-12 15:51:09 +08002223 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
Huang Rui50ab2532016-06-12 15:51:09 +08002224
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002225 if ((amdgpu_testing & 1)) {
2226 if (adev->accel_working)
2227 amdgpu_test_moves(adev);
2228 else
2229 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2230 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002231 if (amdgpu_benchmarking) {
2232 if (adev->accel_working)
2233 amdgpu_benchmark(adev, amdgpu_benchmarking);
2234 else
2235 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2236 }
2237
2238 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2239 * explicit gating rather than handling it automatically.
2240 */
2241 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002242 if (r) {
2243 dev_err(adev->dev, "amdgpu_late_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002244 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002245 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002246
2247 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04002248
2249failed:
2250 if (runtime)
2251 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2252 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002253}
2254
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002255/**
2256 * amdgpu_device_fini - tear down the driver
2257 *
2258 * @adev: amdgpu_device pointer
2259 *
2260 * Tear down the driver info (all asics).
2261 * Called at driver shutdown.
2262 */
2263void amdgpu_device_fini(struct amdgpu_device *adev)
2264{
2265 int r;
2266
2267 DRM_INFO("amdgpu: finishing device.\n");
2268 adev->shutdown = true;
Pixel Dingdb2c2a92017-04-25 16:47:42 +08002269 if (adev->mode_info.mode_config_initialized)
2270 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002271 /* evict vram memory */
2272 amdgpu_bo_evict_vram(adev);
2273 amdgpu_ib_pool_fini(adev);
2274 amdgpu_fence_driver_fini(adev);
2275 amdgpu_fbdev_fini(adev);
2276 r = amdgpu_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002277 adev->accel_working = false;
Shirish S2dc80b02017-05-25 10:05:25 +05302278 cancel_delayed_work_sync(&adev->late_init_work);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002279 /* free i2c buses */
2280 amdgpu_i2c_fini(adev);
2281 amdgpu_atombios_fini(adev);
2282 kfree(adev->bios);
2283 adev->bios = NULL;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002284 if (!pci_is_thunderbolt_attached(adev->pdev))
2285 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002286 if (adev->flags & AMD_IS_PX)
2287 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002288 vga_client_register(adev->pdev, NULL, NULL, NULL);
2289 if (adev->rio_mem)
2290 pci_iounmap(adev->pdev, adev->rio_mem);
2291 adev->rio_mem = NULL;
2292 iounmap(adev->rmmio);
2293 adev->rmmio = NULL;
Ken Wangda69c1612016-01-21 19:08:55 +08002294 if (adev->asic_type >= CHIP_BONAIRE)
2295 amdgpu_doorbell_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002296 amdgpu_debugfs_regs_cleanup(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002297}
2298
2299
2300/*
2301 * Suspend & resume.
2302 */
2303/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002304 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002305 *
2306 * @pdev: drm dev pointer
2307 * @state: suspend state
2308 *
2309 * Puts the hw in the suspend state (all asics).
2310 * Returns 0 for success or an error on failure.
2311 * Called at driver suspend.
2312 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002313int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002314{
2315 struct amdgpu_device *adev;
2316 struct drm_crtc *crtc;
2317 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002318 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002319
2320 if (dev == NULL || dev->dev_private == NULL) {
2321 return -ENODEV;
2322 }
2323
2324 adev = dev->dev_private;
2325
2326 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2327 return 0;
2328
2329 drm_kms_helper_poll_disable(dev);
2330
2331 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002332 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002333 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2334 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2335 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002336 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002337
Alex Deucher756e6882015-10-08 00:03:36 -04002338 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002339 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04002340 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002341 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2342 struct amdgpu_bo *robj;
2343
Alex Deucher756e6882015-10-08 00:03:36 -04002344 if (amdgpu_crtc->cursor_bo) {
2345 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002346 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002347 if (r == 0) {
2348 amdgpu_bo_unpin(aobj);
2349 amdgpu_bo_unreserve(aobj);
2350 }
2351 }
2352
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002353 if (rfb == NULL || rfb->obj == NULL) {
2354 continue;
2355 }
2356 robj = gem_to_amdgpu_bo(rfb->obj);
2357 /* don't unpin kernel fb objects */
2358 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
Alex Xie7a6901d2017-04-24 13:52:41 -04002359 r = amdgpu_bo_reserve(robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002360 if (r == 0) {
2361 amdgpu_bo_unpin(robj);
2362 amdgpu_bo_unreserve(robj);
2363 }
2364 }
2365 }
2366 /* evict vram memory */
2367 amdgpu_bo_evict_vram(adev);
2368
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002369 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002370
2371 r = amdgpu_suspend(adev);
2372
Alex Deuchera0a71e42016-10-10 12:41:36 -04002373 /* evict remaining vram memory
2374 * This second call to evict vram is to evict the gart page table
2375 * using the CPU.
2376 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002377 amdgpu_bo_evict_vram(adev);
2378
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002379 if (adev->is_atom_fw)
2380 amdgpu_atomfirmware_scratch_regs_save(adev);
2381 else
2382 amdgpu_atombios_scratch_regs_save(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002383 pci_save_state(dev->pdev);
2384 if (suspend) {
2385 /* Shut down the device */
2386 pci_disable_device(dev->pdev);
2387 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08002388 } else {
2389 r = amdgpu_asic_reset(adev);
2390 if (r)
2391 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002392 }
2393
2394 if (fbcon) {
2395 console_lock();
2396 amdgpu_fbdev_set_suspend(adev, 1);
2397 console_unlock();
2398 }
2399 return 0;
2400}
2401
2402/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002403 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002404 *
2405 * @pdev: drm dev pointer
2406 *
2407 * Bring the hw back to operating state (all asics).
2408 * Returns 0 for success or an error on failure.
2409 * Called at driver resume.
2410 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002411int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002412{
2413 struct drm_connector *connector;
2414 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04002415 struct drm_crtc *crtc;
Huang Rui03161a62017-04-13 16:12:26 +08002416 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002417
2418 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2419 return 0;
2420
jimqu74b0b152016-09-07 17:09:12 +08002421 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002422 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08002423
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002424 if (resume) {
2425 pci_set_power_state(dev->pdev, PCI_D0);
2426 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08002427 r = pci_enable_device(dev->pdev);
Huang Rui03161a62017-04-13 16:12:26 +08002428 if (r)
2429 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002430 }
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002431 if (adev->is_atom_fw)
2432 amdgpu_atomfirmware_scratch_regs_restore(adev);
2433 else
2434 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002435
2436 /* post card */
Jim Quc836fec2017-02-10 15:59:59 +08002437 if (amdgpu_need_post(adev)) {
jimqu74b0b152016-09-07 17:09:12 +08002438 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2439 if (r)
2440 DRM_ERROR("amdgpu asic init failed\n");
2441 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002442
2443 r = amdgpu_resume(adev);
Rex Zhue6707212017-03-30 13:21:01 +08002444 if (r) {
Flora Cuica198522016-02-04 15:10:08 +08002445 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Huang Rui03161a62017-04-13 16:12:26 +08002446 goto unlock;
Rex Zhue6707212017-03-30 13:21:01 +08002447 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002448 amdgpu_fence_driver_resume(adev);
2449
Flora Cuica198522016-02-04 15:10:08 +08002450 if (resume) {
2451 r = amdgpu_ib_ring_tests(adev);
2452 if (r)
2453 DRM_ERROR("ib ring test failed (%d).\n", r);
2454 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002455
2456 r = amdgpu_late_init(adev);
Huang Rui03161a62017-04-13 16:12:26 +08002457 if (r)
2458 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002459
Alex Deucher756e6882015-10-08 00:03:36 -04002460 /* pin cursors */
2461 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2462 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2463
2464 if (amdgpu_crtc->cursor_bo) {
2465 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002466 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002467 if (r == 0) {
2468 r = amdgpu_bo_pin(aobj,
2469 AMDGPU_GEM_DOMAIN_VRAM,
2470 &amdgpu_crtc->cursor_addr);
2471 if (r != 0)
2472 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2473 amdgpu_bo_unreserve(aobj);
2474 }
2475 }
2476 }
2477
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002478 /* blat the mode back in */
2479 if (fbcon) {
2480 drm_helper_resume_force_mode(dev);
2481 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002482 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002483 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2484 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2485 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002486 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002487 }
2488
2489 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002490
2491 /*
2492 * Most of the connector probing functions try to acquire runtime pm
2493 * refs to ensure that the GPU is powered on when connector polling is
2494 * performed. Since we're calling this from a runtime PM callback,
2495 * trying to acquire rpm refs will cause us to deadlock.
2496 *
2497 * Since we're guaranteed to be holding the rpm lock, it's safe to
2498 * temporarily disable the rpm helpers so this doesn't deadlock us.
2499 */
2500#ifdef CONFIG_PM
2501 dev->dev->power.disable_depth++;
2502#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05002503 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002504#ifdef CONFIG_PM
2505 dev->dev->power.disable_depth--;
2506#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002507
Huang Rui03161a62017-04-13 16:12:26 +08002508 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002509 amdgpu_fbdev_set_suspend(adev, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002510
Huang Rui03161a62017-04-13 16:12:26 +08002511unlock:
2512 if (fbcon)
2513 console_unlock();
2514
2515 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002516}
2517
Chunming Zhou63fbf422016-07-15 11:19:20 +08002518static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2519{
2520 int i;
2521 bool asic_hang = false;
2522
2523 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002524 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002525 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002526 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2527 adev->ip_blocks[i].status.hang =
2528 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2529 if (adev->ip_blocks[i].status.hang) {
2530 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
Chunming Zhou63fbf422016-07-15 11:19:20 +08002531 asic_hang = true;
2532 }
2533 }
2534 return asic_hang;
2535}
2536
Baoyou Xie4d446652016-09-18 22:09:35 +08002537static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002538{
2539 int i, r = 0;
2540
2541 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002542 if (!adev->ip_blocks[i].status.valid)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002543 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002544 if (adev->ip_blocks[i].status.hang &&
2545 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2546 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
Chunming Zhoud31a5012016-07-18 10:04:34 +08002547 if (r)
2548 return r;
2549 }
2550 }
2551
2552 return 0;
2553}
2554
Chunming Zhou35d782f2016-07-15 15:57:13 +08002555static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2556{
Alex Deucherda146d32016-10-13 16:07:03 -04002557 int i;
2558
2559 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002560 if (!adev->ip_blocks[i].status.valid)
Alex Deucherda146d32016-10-13 16:07:03 -04002561 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002562 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2563 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2564 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2565 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2566 if (adev->ip_blocks[i].status.hang) {
Alex Deucherda146d32016-10-13 16:07:03 -04002567 DRM_INFO("Some block need full reset!\n");
2568 return true;
2569 }
2570 }
Chunming Zhou35d782f2016-07-15 15:57:13 +08002571 }
2572 return false;
2573}
2574
2575static int amdgpu_soft_reset(struct amdgpu_device *adev)
2576{
2577 int i, r = 0;
2578
2579 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002580 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002581 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002582 if (adev->ip_blocks[i].status.hang &&
2583 adev->ip_blocks[i].version->funcs->soft_reset) {
2584 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002585 if (r)
2586 return r;
2587 }
2588 }
2589
2590 return 0;
2591}
2592
2593static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2594{
2595 int i, r = 0;
2596
2597 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002598 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002599 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002600 if (adev->ip_blocks[i].status.hang &&
2601 adev->ip_blocks[i].version->funcs->post_soft_reset)
2602 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002603 if (r)
2604 return r;
2605 }
2606
2607 return 0;
2608}
2609
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002610bool amdgpu_need_backup(struct amdgpu_device *adev)
2611{
2612 if (adev->flags & AMD_IS_APU)
2613 return false;
2614
2615 return amdgpu_lockup_timeout > 0 ? true : false;
2616}
2617
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002618static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2619 struct amdgpu_ring *ring,
2620 struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +01002621 struct dma_fence **fence)
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002622{
2623 uint32_t domain;
2624 int r;
2625
Roger.He23d2e502017-04-21 14:24:26 +08002626 if (!bo->shadow)
2627 return 0;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002628
Alex Xie1d284792017-04-24 13:53:04 -04002629 r = amdgpu_bo_reserve(bo, true);
Roger.He23d2e502017-04-21 14:24:26 +08002630 if (r)
2631 return r;
2632 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2633 /* if bo has been evicted, then no need to recover */
2634 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
Roger.He82521312017-04-21 13:08:43 +08002635 r = amdgpu_bo_validate(bo->shadow);
2636 if (r) {
2637 DRM_ERROR("bo validate failed!\n");
2638 goto err;
2639 }
2640
2641 r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
2642 if (r) {
2643 DRM_ERROR("%p bind failed\n", bo->shadow);
2644 goto err;
2645 }
2646
Roger.He23d2e502017-04-21 14:24:26 +08002647 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002648 NULL, fence, true);
Roger.He23d2e502017-04-21 14:24:26 +08002649 if (r) {
2650 DRM_ERROR("recover page table failed!\n");
2651 goto err;
2652 }
2653 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002654err:
Roger.He23d2e502017-04-21 14:24:26 +08002655 amdgpu_bo_unreserve(bo);
2656 return r;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002657}
2658
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002659/**
Monk Liua90ad3c2017-01-23 14:22:08 +08002660 * amdgpu_sriov_gpu_reset - reset the asic
2661 *
2662 * @adev: amdgpu device pointer
Monk Liu7225f872017-04-26 14:51:54 +08002663 * @job: which job trigger hang
Monk Liua90ad3c2017-01-23 14:22:08 +08002664 *
2665 * Attempt the reset the GPU if it has hung (all asics).
2666 * for SRIOV case.
2667 * Returns 0 for success or an error on failure.
2668 */
Monk Liu7225f872017-04-26 14:51:54 +08002669int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002670{
Monk Liu65781c72017-05-11 13:36:44 +08002671 int i, j, r = 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002672 int resched;
2673 struct amdgpu_bo *bo, *tmp;
2674 struct amdgpu_ring *ring;
2675 struct dma_fence *fence = NULL, *next = NULL;
2676
Monk Liu147b5982017-01-25 15:48:01 +08002677 mutex_lock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002678 atomic_inc(&adev->gpu_reset_counter);
Monk Liu1fb37a32017-01-26 15:36:37 +08002679 adev->gfx.in_reset = true;
Monk Liua90ad3c2017-01-23 14:22:08 +08002680
2681 /* block TTM */
2682 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2683
Monk Liu65781c72017-05-11 13:36:44 +08002684 /* we start from the ring trigger GPU hang */
2685 j = job ? job->ring->idx : 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002686
Monk Liu65781c72017-05-11 13:36:44 +08002687 /* block scheduler */
2688 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2689 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002690 if (!ring || !ring->sched.thread)
2691 continue;
2692
2693 kthread_park(ring->sched.thread);
Monk Liua90ad3c2017-01-23 14:22:08 +08002694
Monk Liu65781c72017-05-11 13:36:44 +08002695 if (job && j != i)
2696 continue;
2697
Monk Liu4f059ec2017-05-11 13:59:15 +08002698 /* here give the last chance to check if job removed from mirror-list
Monk Liu65781c72017-05-11 13:36:44 +08002699 * since we already pay some time on kthread_park */
Monk Liu4f059ec2017-05-11 13:59:15 +08002700 if (job && list_empty(&job->base.node)) {
Monk Liu65781c72017-05-11 13:36:44 +08002701 kthread_unpark(ring->sched.thread);
2702 goto give_up_reset;
2703 }
2704
2705 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2706 amd_sched_job_kickout(&job->base);
2707
2708 /* only do job_reset on the hang ring if @job not NULL */
2709 amd_sched_hw_job_reset(&ring->sched);
2710
2711 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2712 amdgpu_fence_driver_force_completion_ring(ring);
2713 }
Monk Liua90ad3c2017-01-23 14:22:08 +08002714
2715 /* request to take full control of GPU before re-initialization */
Monk Liu7225f872017-04-26 14:51:54 +08002716 if (job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002717 amdgpu_virt_reset_gpu(adev);
2718 else
2719 amdgpu_virt_request_full_gpu(adev, true);
2720
2721
2722 /* Resume IP prior to SMC */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002723 amdgpu_sriov_reinit_early(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002724
2725 /* we need recover gart prior to run SMC/CP/SDMA resume */
2726 amdgpu_ttm_recover_gart(adev);
2727
2728 /* now we are okay to resume SMC/CP/SDMA */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002729 amdgpu_sriov_reinit_late(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002730
2731 amdgpu_irq_gpu_reset_resume_helper(adev);
2732
2733 if (amdgpu_ib_ring_tests(adev))
2734 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2735
2736 /* release full control of GPU after ib test */
2737 amdgpu_virt_release_full_gpu(adev, true);
2738
2739 DRM_INFO("recover vram bo from shadow\n");
2740
2741 ring = adev->mman.buffer_funcs_ring;
2742 mutex_lock(&adev->shadow_list_lock);
2743 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002744 next = NULL;
Monk Liua90ad3c2017-01-23 14:22:08 +08002745 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2746 if (fence) {
2747 r = dma_fence_wait(fence, false);
2748 if (r) {
2749 WARN(r, "recovery from shadow isn't completed\n");
2750 break;
2751 }
2752 }
2753
2754 dma_fence_put(fence);
2755 fence = next;
2756 }
2757 mutex_unlock(&adev->shadow_list_lock);
2758
2759 if (fence) {
2760 r = dma_fence_wait(fence, false);
2761 if (r)
2762 WARN(r, "recovery from shadow isn't completed\n");
2763 }
2764 dma_fence_put(fence);
2765
Monk Liu65781c72017-05-11 13:36:44 +08002766 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2767 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002768 if (!ring || !ring->sched.thread)
2769 continue;
2770
Monk Liu65781c72017-05-11 13:36:44 +08002771 if (job && j != i) {
2772 kthread_unpark(ring->sched.thread);
2773 continue;
2774 }
2775
Monk Liua90ad3c2017-01-23 14:22:08 +08002776 amd_sched_job_recovery(&ring->sched);
2777 kthread_unpark(ring->sched.thread);
2778 }
2779
2780 drm_helper_resume_force_mode(adev->ddev);
Monk Liu65781c72017-05-11 13:36:44 +08002781give_up_reset:
Monk Liua90ad3c2017-01-23 14:22:08 +08002782 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2783 if (r) {
2784 /* bad news, how to tell it to userspace ? */
2785 dev_info(adev->dev, "GPU reset failed\n");
Monk Liu65781c72017-05-11 13:36:44 +08002786 } else {
2787 dev_info(adev->dev, "GPU reset successed!\n");
Monk Liua90ad3c2017-01-23 14:22:08 +08002788 }
2789
Monk Liu1fb37a32017-01-26 15:36:37 +08002790 adev->gfx.in_reset = false;
Monk Liu147b5982017-01-25 15:48:01 +08002791 mutex_unlock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002792 return r;
2793}
2794
2795/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002796 * amdgpu_gpu_reset - reset the asic
2797 *
2798 * @adev: amdgpu device pointer
2799 *
2800 * Attempt the reset the GPU if it has hung (all asics).
2801 * Returns 0 for success or an error on failure.
2802 */
2803int amdgpu_gpu_reset(struct amdgpu_device *adev)
2804{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002805 int i, r;
2806 int resched;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002807 bool need_full_reset, vram_lost = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002808
Chunming Zhou63fbf422016-07-15 11:19:20 +08002809 if (!amdgpu_check_soft_reset(adev)) {
2810 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2811 return 0;
2812 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002813
Marek Olšákd94aed52015-05-05 21:13:49 +02002814 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002815
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002816 /* block TTM */
2817 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2818
Chunming Zhou0875dc92016-06-12 15:41:58 +08002819 /* block scheduler */
2820 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2821 struct amdgpu_ring *ring = adev->rings[i];
2822
Chunming Zhou51687752017-04-24 17:09:15 +08002823 if (!ring || !ring->sched.thread)
Chunming Zhou0875dc92016-06-12 15:41:58 +08002824 continue;
2825 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002826 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002827 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002828 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2829 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002830
Chunming Zhou35d782f2016-07-15 15:57:13 +08002831 need_full_reset = amdgpu_need_full_reset(adev);
2832
2833 if (!need_full_reset) {
2834 amdgpu_pre_soft_reset(adev);
2835 r = amdgpu_soft_reset(adev);
2836 amdgpu_post_soft_reset(adev);
2837 if (r || amdgpu_check_soft_reset(adev)) {
2838 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2839 need_full_reset = true;
2840 }
2841 }
2842
2843 if (need_full_reset) {
Chunming Zhou35d782f2016-07-15 15:57:13 +08002844 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002845
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002846retry:
Chunming Zhou35d782f2016-07-15 15:57:13 +08002847 /* Disable fb access */
2848 if (adev->mode_info.num_crtc) {
2849 struct amdgpu_mode_mc_save save;
2850 amdgpu_display_stop_mc_access(adev, &save);
2851 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2852 }
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002853 if (adev->is_atom_fw)
2854 amdgpu_atomfirmware_scratch_regs_save(adev);
2855 else
2856 amdgpu_atombios_scratch_regs_save(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002857 r = amdgpu_asic_reset(adev);
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002858 if (adev->is_atom_fw)
2859 amdgpu_atomfirmware_scratch_regs_restore(adev);
2860 else
2861 amdgpu_atombios_scratch_regs_restore(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002862 /* post card */
2863 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002864
Chunming Zhou35d782f2016-07-15 15:57:13 +08002865 if (!r) {
2866 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
Chunming Zhoufcf06492017-05-05 10:33:33 +08002867 r = amdgpu_resume_phase1(adev);
2868 if (r)
2869 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002870 vram_lost = amdgpu_check_vram_lost(adev);
Chunming Zhouf1892132017-05-15 16:48:27 +08002871 if (vram_lost) {
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002872 DRM_ERROR("VRAM is lost!\n");
Chunming Zhouf1892132017-05-15 16:48:27 +08002873 atomic_inc(&adev->vram_lost_counter);
2874 }
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002875 r = amdgpu_ttm_recover_gart(adev);
2876 if (r)
Chunming Zhoufcf06492017-05-05 10:33:33 +08002877 goto out;
2878 r = amdgpu_resume_phase2(adev);
2879 if (r)
2880 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002881 if (vram_lost)
2882 amdgpu_fill_reset_magic(adev);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002883 }
Chunming Zhoufcf06492017-05-05 10:33:33 +08002884 }
2885out:
2886 if (!r) {
2887 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou1f465082016-06-30 15:02:26 +08002888 r = amdgpu_ib_ring_tests(adev);
2889 if (r) {
2890 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002891 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002892 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002893 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002894 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002895 /**
2896 * recovery vm page tables, since we cannot depend on VRAM is
2897 * consistent after gpu full reset.
2898 */
2899 if (need_full_reset && amdgpu_need_backup(adev)) {
2900 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2901 struct amdgpu_bo *bo, *tmp;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002902 struct dma_fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002903
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002904 DRM_INFO("recover vram bo from shadow\n");
2905 mutex_lock(&adev->shadow_list_lock);
2906 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002907 next = NULL;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002908 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2909 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002910 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002911 if (r) {
Monk Liu1d7b17b2017-01-22 18:52:56 +08002912 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002913 break;
2914 }
2915 }
2916
Chris Wilsonf54d1862016-10-25 13:00:45 +01002917 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002918 fence = next;
2919 }
2920 mutex_unlock(&adev->shadow_list_lock);
2921 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002922 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002923 if (r)
Monk Liu1d7b17b2017-01-22 18:52:56 +08002924 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002925 }
Chris Wilsonf54d1862016-10-25 13:00:45 +01002926 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002927 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002928 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2929 struct amdgpu_ring *ring = adev->rings[i];
Chunming Zhou51687752017-04-24 17:09:15 +08002930
2931 if (!ring || !ring->sched.thread)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002932 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002933
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002934 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002935 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002936 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002937 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08002938 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002939 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou51687752017-04-24 17:09:15 +08002940 if (adev->rings[i] && adev->rings[i]->sched.thread) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08002941 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002942 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002943 }
2944 }
2945
2946 drm_helper_resume_force_mode(adev->ddev);
2947
2948 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
Chunming Zhou6643be62017-05-05 10:50:09 +08002949 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002950 /* bad news, how to tell it to userspace ? */
2951 dev_info(adev->dev, "GPU reset failed\n");
Chunming Zhou6643be62017-05-05 10:50:09 +08002952 else
2953 dev_info(adev->dev, "GPU reset successed!\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002954
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002955 return r;
2956}
2957
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002958void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2959{
2960 u32 mask;
2961 int ret;
2962
Alex Deuchercd474ba2016-02-04 10:21:23 -05002963 if (amdgpu_pcie_gen_cap)
2964 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2965
2966 if (amdgpu_pcie_lane_cap)
2967 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2968
2969 /* covers APUs as well */
2970 if (pci_is_root_bus(adev->pdev->bus)) {
2971 if (adev->pm.pcie_gen_mask == 0)
2972 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2973 if (adev->pm.pcie_mlw_mask == 0)
2974 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002975 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002976 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05002977
2978 if (adev->pm.pcie_gen_mask == 0) {
2979 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2980 if (!ret) {
2981 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2982 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2983 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2984
2985 if (mask & DRM_PCIE_SPEED_25)
2986 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2987 if (mask & DRM_PCIE_SPEED_50)
2988 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2989 if (mask & DRM_PCIE_SPEED_80)
2990 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2991 } else {
2992 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2993 }
2994 }
2995 if (adev->pm.pcie_mlw_mask == 0) {
2996 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2997 if (!ret) {
2998 switch (mask) {
2999 case 32:
3000 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3001 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3002 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3003 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3004 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3005 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3006 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3007 break;
3008 case 16:
3009 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3010 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3011 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3012 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3013 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3014 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3015 break;
3016 case 12:
3017 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3018 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3019 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3020 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3021 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3022 break;
3023 case 8:
3024 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3025 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3026 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3027 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3028 break;
3029 case 4:
3030 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3031 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3032 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3033 break;
3034 case 2:
3035 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3036 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3037 break;
3038 case 1:
3039 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3040 break;
3041 default:
3042 break;
3043 }
3044 } else {
3045 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003046 }
3047 }
3048}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003049
3050/*
3051 * Debugfs
3052 */
3053int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04003054 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003055 unsigned nfiles)
3056{
3057 unsigned i;
3058
3059 for (i = 0; i < adev->debugfs_count; i++) {
3060 if (adev->debugfs[i].files == files) {
3061 /* Already registered */
3062 return 0;
3063 }
3064 }
3065
3066 i = adev->debugfs_count + 1;
3067 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3068 DRM_ERROR("Reached maximum number of debugfs components.\n");
3069 DRM_ERROR("Report so we increase "
3070 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3071 return -EINVAL;
3072 }
3073 adev->debugfs[adev->debugfs_count].files = files;
3074 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3075 adev->debugfs_count = i;
3076#if defined(CONFIG_DEBUG_FS)
3077 drm_debugfs_create_files(files, nfiles,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003078 adev->ddev->primary->debugfs_root,
3079 adev->ddev->primary);
3080#endif
3081 return 0;
3082}
3083
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003084#if defined(CONFIG_DEBUG_FS)
3085
3086static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3087 size_t size, loff_t *pos)
3088{
Al Viro45063092016-12-04 18:24:56 -05003089 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003090 ssize_t result = 0;
3091 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04003092 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04003093 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003094
3095 if (size & 0x3 || *pos & 0x3)
3096 return -EINVAL;
3097
Tom St Denisbd122672016-07-28 09:39:22 -04003098 /* are we reading registers for which a PG lock is necessary? */
3099 pm_pg_lock = (*pos >> 23) & 1;
3100
Tom St Denis566281592016-06-27 11:55:07 -04003101 if (*pos & (1ULL << 62)) {
3102 se_bank = (*pos >> 24) & 0x3FF;
3103 sh_bank = (*pos >> 34) & 0x3FF;
3104 instance_bank = (*pos >> 44) & 0x3FF;
Tom St Denis32977f92016-10-09 07:41:26 -04003105
3106 if (se_bank == 0x3FF)
3107 se_bank = 0xFFFFFFFF;
3108 if (sh_bank == 0x3FF)
3109 sh_bank = 0xFFFFFFFF;
3110 if (instance_bank == 0x3FF)
3111 instance_bank = 0xFFFFFFFF;
Tom St Denis566281592016-06-27 11:55:07 -04003112 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04003113 } else {
3114 use_bank = 0;
3115 }
3116
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003117 *pos &= (1UL << 22) - 1;
Tom St Denisbd122672016-07-28 09:39:22 -04003118
Tom St Denis566281592016-06-27 11:55:07 -04003119 if (use_bank) {
Tom St Denis32977f92016-10-09 07:41:26 -04003120 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3121 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
Tom St Denis566281592016-06-27 11:55:07 -04003122 return -EINVAL;
3123 mutex_lock(&adev->grbm_idx_mutex);
3124 amdgpu_gfx_select_se_sh(adev, se_bank,
3125 sh_bank, instance_bank);
3126 }
3127
Tom St Denisbd122672016-07-28 09:39:22 -04003128 if (pm_pg_lock)
3129 mutex_lock(&adev->pm.mutex);
3130
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003131 while (size) {
3132 uint32_t value;
3133
3134 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04003135 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003136
3137 value = RREG32(*pos >> 2);
3138 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04003139 if (r) {
3140 result = r;
3141 goto end;
3142 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003143
3144 result += 4;
3145 buf += 4;
3146 *pos += 4;
3147 size -= 4;
3148 }
3149
Tom St Denis566281592016-06-27 11:55:07 -04003150end:
3151 if (use_bank) {
3152 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3153 mutex_unlock(&adev->grbm_idx_mutex);
3154 }
3155
Tom St Denisbd122672016-07-28 09:39:22 -04003156 if (pm_pg_lock)
3157 mutex_unlock(&adev->pm.mutex);
3158
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003159 return result;
3160}
3161
3162static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3163 size_t size, loff_t *pos)
3164{
Al Viro45063092016-12-04 18:24:56 -05003165 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003166 ssize_t result = 0;
3167 int r;
Tom St Denis394fdde2016-10-10 07:31:23 -04003168 bool pm_pg_lock, use_bank;
3169 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003170
3171 if (size & 0x3 || *pos & 0x3)
3172 return -EINVAL;
3173
Tom St Denis394fdde2016-10-10 07:31:23 -04003174 /* are we reading registers for which a PG lock is necessary? */
3175 pm_pg_lock = (*pos >> 23) & 1;
3176
3177 if (*pos & (1ULL << 62)) {
3178 se_bank = (*pos >> 24) & 0x3FF;
3179 sh_bank = (*pos >> 34) & 0x3FF;
3180 instance_bank = (*pos >> 44) & 0x3FF;
3181
3182 if (se_bank == 0x3FF)
3183 se_bank = 0xFFFFFFFF;
3184 if (sh_bank == 0x3FF)
3185 sh_bank = 0xFFFFFFFF;
3186 if (instance_bank == 0x3FF)
3187 instance_bank = 0xFFFFFFFF;
3188 use_bank = 1;
3189 } else {
3190 use_bank = 0;
3191 }
3192
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003193 *pos &= (1UL << 22) - 1;
Tom St Denis394fdde2016-10-10 07:31:23 -04003194
3195 if (use_bank) {
3196 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3197 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3198 return -EINVAL;
3199 mutex_lock(&adev->grbm_idx_mutex);
3200 amdgpu_gfx_select_se_sh(adev, se_bank,
3201 sh_bank, instance_bank);
3202 }
3203
3204 if (pm_pg_lock)
3205 mutex_lock(&adev->pm.mutex);
3206
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003207 while (size) {
3208 uint32_t value;
3209
3210 if (*pos > adev->rmmio_size)
3211 return result;
3212
3213 r = get_user(value, (uint32_t *)buf);
3214 if (r)
3215 return r;
3216
3217 WREG32(*pos >> 2, value);
3218
3219 result += 4;
3220 buf += 4;
3221 *pos += 4;
3222 size -= 4;
3223 }
3224
Tom St Denis394fdde2016-10-10 07:31:23 -04003225 if (use_bank) {
3226 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3227 mutex_unlock(&adev->grbm_idx_mutex);
3228 }
3229
3230 if (pm_pg_lock)
3231 mutex_unlock(&adev->pm.mutex);
3232
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003233 return result;
3234}
3235
Tom St Denisadcec282016-04-15 13:08:44 -04003236static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3237 size_t size, loff_t *pos)
3238{
Al Viro45063092016-12-04 18:24:56 -05003239 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003240 ssize_t result = 0;
3241 int r;
3242
3243 if (size & 0x3 || *pos & 0x3)
3244 return -EINVAL;
3245
3246 while (size) {
3247 uint32_t value;
3248
3249 value = RREG32_PCIE(*pos >> 2);
3250 r = put_user(value, (uint32_t *)buf);
3251 if (r)
3252 return r;
3253
3254 result += 4;
3255 buf += 4;
3256 *pos += 4;
3257 size -= 4;
3258 }
3259
3260 return result;
3261}
3262
3263static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3264 size_t size, loff_t *pos)
3265{
Al Viro45063092016-12-04 18:24:56 -05003266 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003267 ssize_t result = 0;
3268 int r;
3269
3270 if (size & 0x3 || *pos & 0x3)
3271 return -EINVAL;
3272
3273 while (size) {
3274 uint32_t value;
3275
3276 r = get_user(value, (uint32_t *)buf);
3277 if (r)
3278 return r;
3279
3280 WREG32_PCIE(*pos >> 2, value);
3281
3282 result += 4;
3283 buf += 4;
3284 *pos += 4;
3285 size -= 4;
3286 }
3287
3288 return result;
3289}
3290
3291static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3292 size_t size, loff_t *pos)
3293{
Al Viro45063092016-12-04 18:24:56 -05003294 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003295 ssize_t result = 0;
3296 int r;
3297
3298 if (size & 0x3 || *pos & 0x3)
3299 return -EINVAL;
3300
3301 while (size) {
3302 uint32_t value;
3303
3304 value = RREG32_DIDT(*pos >> 2);
3305 r = put_user(value, (uint32_t *)buf);
3306 if (r)
3307 return r;
3308
3309 result += 4;
3310 buf += 4;
3311 *pos += 4;
3312 size -= 4;
3313 }
3314
3315 return result;
3316}
3317
3318static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3319 size_t size, loff_t *pos)
3320{
Al Viro45063092016-12-04 18:24:56 -05003321 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003322 ssize_t result = 0;
3323 int r;
3324
3325 if (size & 0x3 || *pos & 0x3)
3326 return -EINVAL;
3327
3328 while (size) {
3329 uint32_t value;
3330
3331 r = get_user(value, (uint32_t *)buf);
3332 if (r)
3333 return r;
3334
3335 WREG32_DIDT(*pos >> 2, value);
3336
3337 result += 4;
3338 buf += 4;
3339 *pos += 4;
3340 size -= 4;
3341 }
3342
3343 return result;
3344}
3345
3346static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3347 size_t size, loff_t *pos)
3348{
Al Viro45063092016-12-04 18:24:56 -05003349 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003350 ssize_t result = 0;
3351 int r;
3352
3353 if (size & 0x3 || *pos & 0x3)
3354 return -EINVAL;
3355
3356 while (size) {
3357 uint32_t value;
3358
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003359 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04003360 r = put_user(value, (uint32_t *)buf);
3361 if (r)
3362 return r;
3363
3364 result += 4;
3365 buf += 4;
3366 *pos += 4;
3367 size -= 4;
3368 }
3369
3370 return result;
3371}
3372
3373static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3374 size_t size, loff_t *pos)
3375{
Al Viro45063092016-12-04 18:24:56 -05003376 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003377 ssize_t result = 0;
3378 int r;
3379
3380 if (size & 0x3 || *pos & 0x3)
3381 return -EINVAL;
3382
3383 while (size) {
3384 uint32_t value;
3385
3386 r = get_user(value, (uint32_t *)buf);
3387 if (r)
3388 return r;
3389
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003390 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04003391
3392 result += 4;
3393 buf += 4;
3394 *pos += 4;
3395 size -= 4;
3396 }
3397
3398 return result;
3399}
3400
Tom St Denis1e051412016-06-27 09:57:18 -04003401static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3402 size_t size, loff_t *pos)
3403{
Al Viro45063092016-12-04 18:24:56 -05003404 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis1e051412016-06-27 09:57:18 -04003405 ssize_t result = 0;
3406 int r;
3407 uint32_t *config, no_regs = 0;
3408
3409 if (size & 0x3 || *pos & 0x3)
3410 return -EINVAL;
3411
Markus Elfringecab7662016-09-18 17:00:52 +02003412 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
Tom St Denis1e051412016-06-27 09:57:18 -04003413 if (!config)
3414 return -ENOMEM;
3415
3416 /* version, increment each time something is added */
Tom St Denis9a999352017-01-18 13:01:25 -05003417 config[no_regs++] = 3;
Tom St Denis1e051412016-06-27 09:57:18 -04003418 config[no_regs++] = adev->gfx.config.max_shader_engines;
3419 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3420 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3421 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3422 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3423 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3424 config[no_regs++] = adev->gfx.config.max_gprs;
3425 config[no_regs++] = adev->gfx.config.max_gs_threads;
3426 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3427 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3428 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3429 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3430 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3431 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3432 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3433 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3434 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3435 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3436 config[no_regs++] = adev->gfx.config.num_gpus;
3437 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3438 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3439 config[no_regs++] = adev->gfx.config.gb_addr_config;
3440 config[no_regs++] = adev->gfx.config.num_rbs;
3441
Tom St Denis89a8f302016-08-12 15:14:31 -04003442 /* rev==1 */
3443 config[no_regs++] = adev->rev_id;
3444 config[no_regs++] = adev->pg_flags;
3445 config[no_regs++] = adev->cg_flags;
3446
Tom St Denise9f11dc2016-08-17 12:00:51 -04003447 /* rev==2 */
3448 config[no_regs++] = adev->family;
3449 config[no_regs++] = adev->external_rev_id;
3450
Tom St Denis9a999352017-01-18 13:01:25 -05003451 /* rev==3 */
3452 config[no_regs++] = adev->pdev->device;
3453 config[no_regs++] = adev->pdev->revision;
3454 config[no_regs++] = adev->pdev->subsystem_device;
3455 config[no_regs++] = adev->pdev->subsystem_vendor;
3456
Tom St Denis1e051412016-06-27 09:57:18 -04003457 while (size && (*pos < no_regs * 4)) {
3458 uint32_t value;
3459
3460 value = config[*pos >> 2];
3461 r = put_user(value, (uint32_t *)buf);
3462 if (r) {
3463 kfree(config);
3464 return r;
3465 }
3466
3467 result += 4;
3468 buf += 4;
3469 *pos += 4;
3470 size -= 4;
3471 }
3472
3473 kfree(config);
3474 return result;
3475}
3476
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003477static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3478 size_t size, loff_t *pos)
3479{
Al Viro45063092016-12-04 18:24:56 -05003480 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003481 int idx, x, outsize, r, valuesize;
3482 uint32_t values[16];
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003483
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003484 if (size & 3 || *pos & 0x3)
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003485 return -EINVAL;
3486
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003487 if (amdgpu_dpm == 0)
3488 return -EINVAL;
3489
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003490 /* convert offset to sensor number */
3491 idx = *pos >> 2;
3492
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003493 valuesize = sizeof(values);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003494 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003495 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003496 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3497 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3498 &valuesize);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003499 else
3500 return -EINVAL;
3501
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003502 if (size > valuesize)
3503 return -EINVAL;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003504
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003505 outsize = 0;
3506 x = 0;
3507 if (!r) {
3508 while (size) {
3509 r = put_user(values[x++], (int32_t *)buf);
3510 buf += 4;
3511 size -= 4;
3512 outsize += 4;
3513 }
3514 }
3515
3516 return !r ? outsize : r;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003517}
Tom St Denis1e051412016-06-27 09:57:18 -04003518
Tom St Denis273d7aa2016-10-11 14:48:55 -04003519static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3520 size_t size, loff_t *pos)
3521{
3522 struct amdgpu_device *adev = f->f_inode->i_private;
3523 int r, x;
3524 ssize_t result=0;
Tom St Denis472259f2016-10-14 09:49:09 -04003525 uint32_t offset, se, sh, cu, wave, simd, data[32];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003526
3527 if (size & 3 || *pos & 3)
3528 return -EINVAL;
3529
3530 /* decode offset */
3531 offset = (*pos & 0x7F);
3532 se = ((*pos >> 7) & 0xFF);
3533 sh = ((*pos >> 15) & 0xFF);
3534 cu = ((*pos >> 23) & 0xFF);
3535 wave = ((*pos >> 31) & 0xFF);
3536 simd = ((*pos >> 37) & 0xFF);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003537
3538 /* switch to the specific se/sh/cu */
3539 mutex_lock(&adev->grbm_idx_mutex);
3540 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3541
3542 x = 0;
Tom St Denis472259f2016-10-14 09:49:09 -04003543 if (adev->gfx.funcs->read_wave_data)
3544 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003545
3546 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3547 mutex_unlock(&adev->grbm_idx_mutex);
3548
Tom St Denis5ecfb3b2016-10-13 12:15:03 -04003549 if (!x)
3550 return -EINVAL;
3551
Tom St Denis472259f2016-10-14 09:49:09 -04003552 while (size && (offset < x * 4)) {
Tom St Denis273d7aa2016-10-11 14:48:55 -04003553 uint32_t value;
3554
Tom St Denis472259f2016-10-14 09:49:09 -04003555 value = data[offset >> 2];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003556 r = put_user(value, (uint32_t *)buf);
3557 if (r)
3558 return r;
3559
3560 result += 4;
3561 buf += 4;
Tom St Denis472259f2016-10-14 09:49:09 -04003562 offset += 4;
Tom St Denis273d7aa2016-10-11 14:48:55 -04003563 size -= 4;
3564 }
3565
3566 return result;
3567}
3568
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003569static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3570 size_t size, loff_t *pos)
3571{
3572 struct amdgpu_device *adev = f->f_inode->i_private;
3573 int r;
3574 ssize_t result = 0;
3575 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3576
3577 if (size & 3 || *pos & 3)
3578 return -EINVAL;
3579
3580 /* decode offset */
3581 offset = (*pos & 0xFFF); /* in dwords */
3582 se = ((*pos >> 12) & 0xFF);
3583 sh = ((*pos >> 20) & 0xFF);
3584 cu = ((*pos >> 28) & 0xFF);
3585 wave = ((*pos >> 36) & 0xFF);
3586 simd = ((*pos >> 44) & 0xFF);
3587 thread = ((*pos >> 52) & 0xFF);
3588 bank = ((*pos >> 60) & 1);
3589
3590 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3591 if (!data)
3592 return -ENOMEM;
3593
3594 /* switch to the specific se/sh/cu */
3595 mutex_lock(&adev->grbm_idx_mutex);
3596 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3597
3598 if (bank == 0) {
3599 if (adev->gfx.funcs->read_wave_vgprs)
3600 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3601 } else {
3602 if (adev->gfx.funcs->read_wave_sgprs)
3603 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3604 }
3605
3606 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3607 mutex_unlock(&adev->grbm_idx_mutex);
3608
3609 while (size) {
3610 uint32_t value;
3611
3612 value = data[offset++];
3613 r = put_user(value, (uint32_t *)buf);
3614 if (r) {
3615 result = r;
3616 goto err;
3617 }
3618
3619 result += 4;
3620 buf += 4;
3621 size -= 4;
3622 }
3623
3624err:
3625 kfree(data);
3626 return result;
3627}
3628
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003629static const struct file_operations amdgpu_debugfs_regs_fops = {
3630 .owner = THIS_MODULE,
3631 .read = amdgpu_debugfs_regs_read,
3632 .write = amdgpu_debugfs_regs_write,
3633 .llseek = default_llseek
3634};
Tom St Denisadcec282016-04-15 13:08:44 -04003635static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3636 .owner = THIS_MODULE,
3637 .read = amdgpu_debugfs_regs_didt_read,
3638 .write = amdgpu_debugfs_regs_didt_write,
3639 .llseek = default_llseek
3640};
3641static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3642 .owner = THIS_MODULE,
3643 .read = amdgpu_debugfs_regs_pcie_read,
3644 .write = amdgpu_debugfs_regs_pcie_write,
3645 .llseek = default_llseek
3646};
3647static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3648 .owner = THIS_MODULE,
3649 .read = amdgpu_debugfs_regs_smc_read,
3650 .write = amdgpu_debugfs_regs_smc_write,
3651 .llseek = default_llseek
3652};
3653
Tom St Denis1e051412016-06-27 09:57:18 -04003654static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3655 .owner = THIS_MODULE,
3656 .read = amdgpu_debugfs_gca_config_read,
3657 .llseek = default_llseek
3658};
3659
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003660static const struct file_operations amdgpu_debugfs_sensors_fops = {
3661 .owner = THIS_MODULE,
3662 .read = amdgpu_debugfs_sensor_read,
3663 .llseek = default_llseek
3664};
3665
Tom St Denis273d7aa2016-10-11 14:48:55 -04003666static const struct file_operations amdgpu_debugfs_wave_fops = {
3667 .owner = THIS_MODULE,
3668 .read = amdgpu_debugfs_wave_read,
3669 .llseek = default_llseek
3670};
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003671static const struct file_operations amdgpu_debugfs_gpr_fops = {
3672 .owner = THIS_MODULE,
3673 .read = amdgpu_debugfs_gpr_read,
3674 .llseek = default_llseek
3675};
Tom St Denis273d7aa2016-10-11 14:48:55 -04003676
Tom St Denisadcec282016-04-15 13:08:44 -04003677static const struct file_operations *debugfs_regs[] = {
3678 &amdgpu_debugfs_regs_fops,
3679 &amdgpu_debugfs_regs_didt_fops,
3680 &amdgpu_debugfs_regs_pcie_fops,
3681 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04003682 &amdgpu_debugfs_gca_config_fops,
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003683 &amdgpu_debugfs_sensors_fops,
Tom St Denis273d7aa2016-10-11 14:48:55 -04003684 &amdgpu_debugfs_wave_fops,
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003685 &amdgpu_debugfs_gpr_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04003686};
3687
3688static const char *debugfs_regs_names[] = {
3689 "amdgpu_regs",
3690 "amdgpu_regs_didt",
3691 "amdgpu_regs_pcie",
3692 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04003693 "amdgpu_gca_config",
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003694 "amdgpu_sensors",
Tom St Denis273d7aa2016-10-11 14:48:55 -04003695 "amdgpu_wave",
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003696 "amdgpu_gpr",
Tom St Denisadcec282016-04-15 13:08:44 -04003697};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003698
3699static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3700{
3701 struct drm_minor *minor = adev->ddev->primary;
3702 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04003703 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003704
Tom St Denisadcec282016-04-15 13:08:44 -04003705 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3706 ent = debugfs_create_file(debugfs_regs_names[i],
3707 S_IFREG | S_IRUGO, root,
3708 adev, debugfs_regs[i]);
3709 if (IS_ERR(ent)) {
3710 for (j = 0; j < i; j++) {
3711 debugfs_remove(adev->debugfs_regs[i]);
3712 adev->debugfs_regs[i] = NULL;
3713 }
3714 return PTR_ERR(ent);
3715 }
3716
3717 if (!i)
3718 i_size_write(ent->d_inode, adev->rmmio_size);
3719 adev->debugfs_regs[i] = ent;
3720 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003721
3722 return 0;
3723}
3724
3725static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3726{
Tom St Denisadcec282016-04-15 13:08:44 -04003727 unsigned i;
3728
3729 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3730 if (adev->debugfs_regs[i]) {
3731 debugfs_remove(adev->debugfs_regs[i]);
3732 adev->debugfs_regs[i] = NULL;
3733 }
3734 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003735}
3736
3737int amdgpu_debugfs_init(struct drm_minor *minor)
3738{
3739 return 0;
3740}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003741#else
3742static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3743{
3744 return 0;
3745}
3746static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003747#endif