blob: 541695768f0a5ea794e554dea0497b9ec7ced09e [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deuchera5bde2f2016-09-23 16:23:41 -040043#include "amdgpu_atomfirmware.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050044#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080045#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040048#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040051#include "vi.h"
Ken Wang460826e2017-03-06 14:53:16 -050052#include "soc15.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080054#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080055#include <linux/firmware.h>
Gavin Wan89041942017-06-23 13:55:15 -040056#include "amdgpu_vf_error.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040057
Alex Deuchere2a75f82017-04-27 16:58:01 -040058MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
Alex Deucher2d2e5e72017-05-09 12:27:35 -040059MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
Alex Deuchere2a75f82017-04-27 16:58:01 -040060
Shirish S2dc80b02017-05-25 10:05:25 +053061#define AMDGPU_RESUME_MS 2000
62
Alex Deucherd38ceaf2015-04-20 16:55:21 -040063static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
64static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
Huang Rui4f0955f2017-05-10 23:04:06 +080065static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040066
67static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080068 "TAHITI",
69 "PITCAIRN",
70 "VERDE",
71 "OLAND",
72 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040073 "BONAIRE",
74 "KAVERI",
75 "KABINI",
76 "HAWAII",
77 "MULLINS",
78 "TOPAZ",
79 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080080 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040081 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040082 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040083 "POLARIS10",
84 "POLARIS11",
Junwei Zhangc4642a42016-12-14 15:32:28 -050085 "POLARIS12",
Ken Wangd4196f02016-03-09 09:28:32 +080086 "VEGA10",
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +080087 "RAVEN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040088 "LAST",
89};
90
91bool amdgpu_device_is_px(struct drm_device *dev)
92{
93 struct amdgpu_device *adev = dev->dev_private;
94
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080095 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040096 return true;
97 return false;
98}
99
100/*
101 * MMIO register access helper functions.
102 */
103uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
Monk Liu15d72fd2017-01-25 15:07:40 +0800104 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400105{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400106 uint32_t ret;
107
Monk Liu15d72fd2017-01-25 15:07:40 +0800108 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800109 BUG_ON(in_interrupt());
110 return amdgpu_virt_kiq_rreg(adev, reg);
111 }
112
Monk Liu15d72fd2017-01-25 15:07:40 +0800113 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Tom St Denisf4b373f2016-05-31 08:02:27 -0400114 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400115 else {
116 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400117
118 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
119 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
120 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
121 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400122 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400123 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
124 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400125}
126
127void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
Monk Liu15d72fd2017-01-25 15:07:40 +0800128 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400129{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400130 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800131
Ken Wang47ed4e12017-07-04 13:11:52 +0800132 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
133 adev->last_mm_index = v;
134 }
135
Monk Liu15d72fd2017-01-25 15:07:40 +0800136 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800137 BUG_ON(in_interrupt());
138 return amdgpu_virt_kiq_wreg(adev, reg, v);
139 }
140
Monk Liu15d72fd2017-01-25 15:07:40 +0800141 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400142 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
143 else {
144 unsigned long flags;
145
146 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
147 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
148 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
149 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
150 }
Ken Wang47ed4e12017-07-04 13:11:52 +0800151
152 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
153 udelay(500);
154 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400155}
156
157u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
158{
159 if ((reg * 4) < adev->rio_mem_size)
160 return ioread32(adev->rio_mem + (reg * 4));
161 else {
162 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
163 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
164 }
165}
166
167void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
168{
Ken Wang47ed4e12017-07-04 13:11:52 +0800169 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
170 adev->last_mm_index = v;
171 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400172
173 if ((reg * 4) < adev->rio_mem_size)
174 iowrite32(v, adev->rio_mem + (reg * 4));
175 else {
176 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
177 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
178 }
Ken Wang47ed4e12017-07-04 13:11:52 +0800179
180 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
181 udelay(500);
182 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400183}
184
185/**
186 * amdgpu_mm_rdoorbell - read a doorbell dword
187 *
188 * @adev: amdgpu_device pointer
189 * @index: doorbell index
190 *
191 * Returns the value in the doorbell aperture at the
192 * requested doorbell index (CIK).
193 */
194u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
195{
196 if (index < adev->doorbell.num_doorbells) {
197 return readl(adev->doorbell.ptr + index);
198 } else {
199 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
200 return 0;
201 }
202}
203
204/**
205 * amdgpu_mm_wdoorbell - write a doorbell dword
206 *
207 * @adev: amdgpu_device pointer
208 * @index: doorbell index
209 * @v: value to write
210 *
211 * Writes @v to the doorbell aperture at the
212 * requested doorbell index (CIK).
213 */
214void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
215{
216 if (index < adev->doorbell.num_doorbells) {
217 writel(v, adev->doorbell.ptr + index);
218 } else {
219 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
220 }
221}
222
223/**
Ken Wang832be402016-03-18 15:23:08 +0800224 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
225 *
226 * @adev: amdgpu_device pointer
227 * @index: doorbell index
228 *
229 * Returns the value in the doorbell aperture at the
230 * requested doorbell index (VEGA10+).
231 */
232u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
233{
234 if (index < adev->doorbell.num_doorbells) {
235 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
236 } else {
237 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
238 return 0;
239 }
240}
241
242/**
243 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
244 *
245 * @adev: amdgpu_device pointer
246 * @index: doorbell index
247 * @v: value to write
248 *
249 * Writes @v to the doorbell aperture at the
250 * requested doorbell index (VEGA10+).
251 */
252void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
253{
254 if (index < adev->doorbell.num_doorbells) {
255 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
256 } else {
257 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
258 }
259}
260
261/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400262 * amdgpu_invalid_rreg - dummy reg read function
263 *
264 * @adev: amdgpu device pointer
265 * @reg: offset of register
266 *
267 * Dummy register read function. Used for register blocks
268 * that certain asics don't have (all asics).
269 * Returns the value in the register.
270 */
271static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
272{
273 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
274 BUG();
275 return 0;
276}
277
278/**
279 * amdgpu_invalid_wreg - dummy reg write function
280 *
281 * @adev: amdgpu device pointer
282 * @reg: offset of register
283 * @v: value to write to the register
284 *
285 * Dummy register read function. Used for register blocks
286 * that certain asics don't have (all asics).
287 */
288static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
289{
290 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
291 reg, v);
292 BUG();
293}
294
295/**
296 * amdgpu_block_invalid_rreg - dummy reg read function
297 *
298 * @adev: amdgpu device pointer
299 * @block: offset of instance
300 * @reg: offset of register
301 *
302 * Dummy register read function. Used for register blocks
303 * that certain asics don't have (all asics).
304 * Returns the value in the register.
305 */
306static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
307 uint32_t block, uint32_t reg)
308{
309 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
310 reg, block);
311 BUG();
312 return 0;
313}
314
315/**
316 * amdgpu_block_invalid_wreg - dummy reg write function
317 *
318 * @adev: amdgpu device pointer
319 * @block: offset of instance
320 * @reg: offset of register
321 * @v: value to write to the register
322 *
323 * Dummy register read function. Used for register blocks
324 * that certain asics don't have (all asics).
325 */
326static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
327 uint32_t block,
328 uint32_t reg, uint32_t v)
329{
330 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
331 reg, block, v);
332 BUG();
333}
334
335static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
336{
337 int r;
338
339 if (adev->vram_scratch.robj == NULL) {
340 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
Alex Deucher857d9132015-08-27 00:14:16 -0400341 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
Christian König03f48dd2016-08-15 17:00:22 +0200342 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
343 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
Christian König72d76682015-09-03 17:34:59 +0200344 NULL, NULL, &adev->vram_scratch.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400345 if (r) {
346 return r;
347 }
348 }
349
350 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
351 if (unlikely(r != 0))
352 return r;
353 r = amdgpu_bo_pin(adev->vram_scratch.robj,
354 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
355 if (r) {
356 amdgpu_bo_unreserve(adev->vram_scratch.robj);
357 return r;
358 }
359 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
360 (void **)&adev->vram_scratch.ptr);
361 if (r)
362 amdgpu_bo_unpin(adev->vram_scratch.robj);
363 amdgpu_bo_unreserve(adev->vram_scratch.robj);
364
365 return r;
366}
367
368static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
369{
370 int r;
371
372 if (adev->vram_scratch.robj == NULL) {
373 return;
374 }
Alex Xie8ab25b42017-04-24 13:30:43 -0400375 r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400376 if (likely(r == 0)) {
377 amdgpu_bo_kunmap(adev->vram_scratch.robj);
378 amdgpu_bo_unpin(adev->vram_scratch.robj);
379 amdgpu_bo_unreserve(adev->vram_scratch.robj);
380 }
381 amdgpu_bo_unref(&adev->vram_scratch.robj);
382}
383
384/**
385 * amdgpu_program_register_sequence - program an array of registers.
386 *
387 * @adev: amdgpu_device pointer
388 * @registers: pointer to the register array
389 * @array_size: size of the register array
390 *
391 * Programs an array or registers with and and or masks.
392 * This is a helper for setting golden registers.
393 */
394void amdgpu_program_register_sequence(struct amdgpu_device *adev,
395 const u32 *registers,
396 const u32 array_size)
397{
398 u32 tmp, reg, and_mask, or_mask;
399 int i;
400
401 if (array_size % 3)
402 return;
403
404 for (i = 0; i < array_size; i +=3) {
405 reg = registers[i + 0];
406 and_mask = registers[i + 1];
407 or_mask = registers[i + 2];
408
409 if (and_mask == 0xffffffff) {
410 tmp = or_mask;
411 } else {
412 tmp = RREG32(reg);
413 tmp &= ~and_mask;
414 tmp |= or_mask;
415 }
416 WREG32(reg, tmp);
417 }
418}
419
420void amdgpu_pci_config_reset(struct amdgpu_device *adev)
421{
422 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
423}
424
425/*
426 * GPU doorbell aperture helpers function.
427 */
428/**
429 * amdgpu_doorbell_init - Init doorbell driver information.
430 *
431 * @adev: amdgpu_device pointer
432 *
433 * Init doorbell driver information (CIK)
434 * Returns 0 on success, error on failure.
435 */
436static int amdgpu_doorbell_init(struct amdgpu_device *adev)
437{
438 /* doorbell bar mapping */
439 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
440 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
441
Christian Königedf600d2016-05-03 15:54:54 +0200442 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400443 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
444 if (adev->doorbell.num_doorbells == 0)
445 return -EINVAL;
446
Christian König8972e5d2017-03-06 13:34:57 +0100447 adev->doorbell.ptr = ioremap(adev->doorbell.base,
448 adev->doorbell.num_doorbells *
449 sizeof(u32));
450 if (adev->doorbell.ptr == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400451 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400452
453 return 0;
454}
455
456/**
457 * amdgpu_doorbell_fini - Tear down doorbell driver information.
458 *
459 * @adev: amdgpu_device pointer
460 *
461 * Tear down doorbell driver information (CIK)
462 */
463static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
464{
465 iounmap(adev->doorbell.ptr);
466 adev->doorbell.ptr = NULL;
467}
468
469/**
470 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
471 * setup amdkfd
472 *
473 * @adev: amdgpu_device pointer
474 * @aperture_base: output returning doorbell aperture base physical address
475 * @aperture_size: output returning doorbell aperture size in bytes
476 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
477 *
478 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
479 * takes doorbells required for its own rings and reports the setup to amdkfd.
480 * amdgpu reserved doorbells are at the start of the doorbell aperture.
481 */
482void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
483 phys_addr_t *aperture_base,
484 size_t *aperture_size,
485 size_t *start_offset)
486{
487 /*
488 * The first num_doorbells are used by amdgpu.
489 * amdkfd takes whatever's left in the aperture.
490 */
491 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
492 *aperture_base = adev->doorbell.base;
493 *aperture_size = adev->doorbell.size;
494 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
495 } else {
496 *aperture_base = 0;
497 *aperture_size = 0;
498 *start_offset = 0;
499 }
500}
501
502/*
503 * amdgpu_wb_*()
Alex Xie455a7bc2017-05-08 21:36:03 -0400504 * Writeback is the method by which the GPU updates special pages in memory
Alex Xieea81a172017-05-08 13:41:11 -0400505 * with the status of certain GPU events (fences, ring pointers,etc.).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400506 */
507
508/**
509 * amdgpu_wb_fini - Disable Writeback and free memory
510 *
511 * @adev: amdgpu_device pointer
512 *
513 * Disables Writeback and frees the Writeback memory (all asics).
514 * Used at driver shutdown.
515 */
516static void amdgpu_wb_fini(struct amdgpu_device *adev)
517{
518 if (adev->wb.wb_obj) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400519 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
520 &adev->wb.gpu_addr,
521 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400522 adev->wb.wb_obj = NULL;
523 }
524}
525
526/**
527 * amdgpu_wb_init- Init Writeback driver info and allocate memory
528 *
529 * @adev: amdgpu_device pointer
530 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400531 * Initializes writeback and allocates writeback memory (all asics).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400532 * Used at driver startup.
533 * Returns 0 on success or an -error on failure.
534 */
535static int amdgpu_wb_init(struct amdgpu_device *adev)
536{
537 int r;
538
539 if (adev->wb.wb_obj == NULL) {
Huang Rui60a970a62017-03-15 10:13:32 +0800540 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
Alex Deuchera76ed482016-10-21 15:30:36 -0400541 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
542 &adev->wb.wb_obj, &adev->wb.gpu_addr,
543 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400544 if (r) {
545 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
546 return r;
547 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400548
549 adev->wb.num_wb = AMDGPU_MAX_WB;
550 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
551
552 /* clear wb memory */
Huang Rui60a970a62017-03-15 10:13:32 +0800553 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400554 }
555
556 return 0;
557}
558
559/**
560 * amdgpu_wb_get - Allocate a wb entry
561 *
562 * @adev: amdgpu_device pointer
563 * @wb: wb index
564 *
565 * Allocate a wb slot for use by the driver (all asics).
566 * Returns 0 on success or -EINVAL on failure.
567 */
568int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
569{
570 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
571 if (offset < adev->wb.num_wb) {
572 __set_bit(offset, adev->wb.used);
573 *wb = offset;
574 return 0;
575 } else {
576 return -EINVAL;
577 }
578}
579
580/**
Ken Wang70142852016-03-18 15:08:49 +0800581 * amdgpu_wb_get_64bit - Allocate a wb entry
582 *
583 * @adev: amdgpu_device pointer
584 * @wb: wb index
585 *
586 * Allocate a wb slot for use by the driver (all asics).
587 * Returns 0 on success or -EINVAL on failure.
588 */
589int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
590{
591 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
592 adev->wb.num_wb, 0, 2, 7, 0);
593 if ((offset + 1) < adev->wb.num_wb) {
594 __set_bit(offset, adev->wb.used);
595 __set_bit(offset + 1, adev->wb.used);
596 *wb = offset;
597 return 0;
598 } else {
599 return -EINVAL;
600 }
601}
602
603/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400604 * amdgpu_wb_free - Free a wb entry
605 *
606 * @adev: amdgpu_device pointer
607 * @wb: wb index
608 *
609 * Free a wb slot allocated for use by the driver (all asics)
610 */
611void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
612{
613 if (wb < adev->wb.num_wb)
614 __clear_bit(wb, adev->wb.used);
615}
616
617/**
Ken Wang70142852016-03-18 15:08:49 +0800618 * amdgpu_wb_free_64bit - Free a wb entry
619 *
620 * @adev: amdgpu_device pointer
621 * @wb: wb index
622 *
623 * Free a wb slot allocated for use by the driver (all asics)
624 */
625void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
626{
627 if ((wb + 1) < adev->wb.num_wb) {
628 __clear_bit(wb, adev->wb.used);
629 __clear_bit(wb + 1, adev->wb.used);
630 }
631}
632
633/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400634 * amdgpu_vram_location - try to find VRAM location
635 * @adev: amdgpu device structure holding all necessary informations
636 * @mc: memory controller structure holding memory informations
637 * @base: base address at which to put VRAM
638 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400639 * Function will try to place VRAM at base address provided
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400640 * as parameter (which is so far either PCI aperture address or
641 * for IGP TOM base address).
642 *
643 * If there is not enough space to fit the unvisible VRAM in the 32bits
644 * address space then we limit the VRAM size to the aperture.
645 *
646 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
647 * this shouldn't be a problem as we are using the PCI aperture as a reference.
648 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
649 * not IGP.
650 *
651 * Note: we use mc_vram_size as on some board we need to program the mc to
652 * cover the whole aperture even if VRAM size is inferior to aperture size
653 * Novell bug 204882 + along with lots of ubuntu ones
654 *
655 * Note: when limiting vram it's safe to overwritte real_vram_size because
656 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
657 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
658 * ones)
659 *
660 * Note: IGP TOM addr should be the same as the aperture addr, we don't
Alex Xie455a7bc2017-05-08 21:36:03 -0400661 * explicitly check for that though.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400662 *
663 * FIXME: when reducing VRAM size align new size on power of 2.
664 */
665void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
666{
667 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
668
669 mc->vram_start = base;
670 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
671 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
672 mc->real_vram_size = mc->aper_size;
673 mc->mc_vram_size = mc->aper_size;
674 }
675 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
676 if (limit && limit < mc->real_vram_size)
677 mc->real_vram_size = limit;
678 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
679 mc->mc_vram_size >> 20, mc->vram_start,
680 mc->vram_end, mc->real_vram_size >> 20);
681}
682
683/**
684 * amdgpu_gtt_location - try to find GTT location
685 * @adev: amdgpu device structure holding all necessary informations
686 * @mc: memory controller structure holding memory informations
687 *
688 * Function will place try to place GTT before or after VRAM.
689 *
690 * If GTT size is bigger than space left then we ajust GTT size.
691 * Thus function will never fails.
692 *
693 * FIXME: when reducing GTT size align new size on power of 2.
694 */
695void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
696{
697 u64 size_af, size_bf;
698
699 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
700 size_bf = mc->vram_start & ~mc->gtt_base_align;
701 if (size_bf > size_af) {
702 if (mc->gtt_size > size_bf) {
703 dev_warn(adev->dev, "limiting GTT\n");
704 mc->gtt_size = size_bf;
705 }
Alex Deucher9dc5a912016-11-17 15:40:22 -0500706 mc->gtt_start = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400707 } else {
708 if (mc->gtt_size > size_af) {
709 dev_warn(adev->dev, "limiting GTT\n");
710 mc->gtt_size = size_af;
711 }
712 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
713 }
714 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
715 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
716 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
717}
718
719/*
720 * GPU helpers function.
721 */
722/**
Jim Quc836fec2017-02-10 15:59:59 +0800723 * amdgpu_need_post - check if the hw need post or not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400724 *
725 * @adev: amdgpu_device pointer
726 *
Jim Quc836fec2017-02-10 15:59:59 +0800727 * Check if the asic has been initialized (all asics) at driver startup
728 * or post is needed if hw reset is performed.
729 * Returns true if need or false if not.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400730 */
Jim Quc836fec2017-02-10 15:59:59 +0800731bool amdgpu_need_post(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400732{
733 uint32_t reg;
734
Jim Quc836fec2017-02-10 15:59:59 +0800735 if (adev->has_hw_reset) {
736 adev->has_hw_reset = false;
737 return true;
738 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400739 /* then check MEM_SIZE, in case the crtcs are off */
Alex Deucherbbf282d2017-03-03 17:26:10 -0500740 reg = amdgpu_asic_get_config_memsize(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400741
Alex Deucherf2713e82017-03-28 12:19:31 -0400742 if ((reg != 0) && (reg != 0xffffffff))
Jim Quc836fec2017-02-10 15:59:59 +0800743 return false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400744
Jim Quc836fec2017-02-10 15:59:59 +0800745 return true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400746
747}
748
Monk Liubec86372016-09-14 19:38:08 +0800749static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
750{
751 if (amdgpu_sriov_vf(adev))
752 return false;
753
754 if (amdgpu_passthrough(adev)) {
Monk Liu1da2c322016-11-11 11:24:29 +0800755 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
756 * some old smc fw still need driver do vPost otherwise gpu hang, while
757 * those smc fw version above 22.15 doesn't have this flaw, so we force
758 * vpost executed for smc version below 22.15
Monk Liubec86372016-09-14 19:38:08 +0800759 */
760 if (adev->asic_type == CHIP_FIJI) {
761 int err;
762 uint32_t fw_ver;
763 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
764 /* force vPost if error occured */
765 if (err)
766 return true;
767
768 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
Monk Liu1da2c322016-11-11 11:24:29 +0800769 if (fw_ver < 0x00160e00)
770 return true;
Monk Liubec86372016-09-14 19:38:08 +0800771 }
Monk Liubec86372016-09-14 19:38:08 +0800772 }
Jim Quc836fec2017-02-10 15:59:59 +0800773 return amdgpu_need_post(adev);
Monk Liubec86372016-09-14 19:38:08 +0800774}
775
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400776/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400777 * amdgpu_dummy_page_init - init dummy page used by the driver
778 *
779 * @adev: amdgpu_device pointer
780 *
781 * Allocate the dummy page used by the driver (all asics).
782 * This dummy page is used by the driver as a filler for gart entries
783 * when pages are taken out of the GART
784 * Returns 0 on sucess, -ENOMEM on failure.
785 */
786int amdgpu_dummy_page_init(struct amdgpu_device *adev)
787{
788 if (adev->dummy_page.page)
789 return 0;
790 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
791 if (adev->dummy_page.page == NULL)
792 return -ENOMEM;
793 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
794 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
795 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
796 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
797 __free_page(adev->dummy_page.page);
798 adev->dummy_page.page = NULL;
799 return -ENOMEM;
800 }
801 return 0;
802}
803
804/**
805 * amdgpu_dummy_page_fini - free dummy page used by the driver
806 *
807 * @adev: amdgpu_device pointer
808 *
809 * Frees the dummy page used by the driver (all asics).
810 */
811void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
812{
813 if (adev->dummy_page.page == NULL)
814 return;
815 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
816 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
817 __free_page(adev->dummy_page.page);
818 adev->dummy_page.page = NULL;
819}
820
821
822/* ATOM accessor methods */
823/*
824 * ATOM is an interpreted byte code stored in tables in the vbios. The
825 * driver registers callbacks to access registers and the interpreter
826 * in the driver parses the tables and executes then to program specific
827 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
828 * atombios.h, and atom.c
829 */
830
831/**
832 * cail_pll_read - read PLL register
833 *
834 * @info: atom card_info pointer
835 * @reg: PLL register offset
836 *
837 * Provides a PLL register accessor for the atom interpreter (r4xx+).
838 * Returns the value of the PLL register.
839 */
840static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
841{
842 return 0;
843}
844
845/**
846 * cail_pll_write - write PLL register
847 *
848 * @info: atom card_info pointer
849 * @reg: PLL register offset
850 * @val: value to write to the pll register
851 *
852 * Provides a PLL register accessor for the atom interpreter (r4xx+).
853 */
854static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
855{
856
857}
858
859/**
860 * cail_mc_read - read MC (Memory Controller) register
861 *
862 * @info: atom card_info pointer
863 * @reg: MC register offset
864 *
865 * Provides an MC register accessor for the atom interpreter (r4xx+).
866 * Returns the value of the MC register.
867 */
868static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
869{
870 return 0;
871}
872
873/**
874 * cail_mc_write - write MC (Memory Controller) register
875 *
876 * @info: atom card_info pointer
877 * @reg: MC register offset
878 * @val: value to write to the pll register
879 *
880 * Provides a MC register accessor for the atom interpreter (r4xx+).
881 */
882static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
883{
884
885}
886
887/**
888 * cail_reg_write - write MMIO register
889 *
890 * @info: atom card_info pointer
891 * @reg: MMIO register offset
892 * @val: value to write to the pll register
893 *
894 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
895 */
896static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
897{
898 struct amdgpu_device *adev = info->dev->dev_private;
899
900 WREG32(reg, val);
901}
902
903/**
904 * cail_reg_read - read MMIO register
905 *
906 * @info: atom card_info pointer
907 * @reg: MMIO register offset
908 *
909 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
910 * Returns the value of the MMIO register.
911 */
912static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
913{
914 struct amdgpu_device *adev = info->dev->dev_private;
915 uint32_t r;
916
917 r = RREG32(reg);
918 return r;
919}
920
921/**
922 * cail_ioreg_write - write IO register
923 *
924 * @info: atom card_info pointer
925 * @reg: IO register offset
926 * @val: value to write to the pll register
927 *
928 * Provides a IO register accessor for the atom interpreter (r4xx+).
929 */
930static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
931{
932 struct amdgpu_device *adev = info->dev->dev_private;
933
934 WREG32_IO(reg, val);
935}
936
937/**
938 * cail_ioreg_read - read IO register
939 *
940 * @info: atom card_info pointer
941 * @reg: IO register offset
942 *
943 * Provides an IO register accessor for the atom interpreter (r4xx+).
944 * Returns the value of the IO register.
945 */
946static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
947{
948 struct amdgpu_device *adev = info->dev->dev_private;
949 uint32_t r;
950
951 r = RREG32_IO(reg);
952 return r;
953}
954
955/**
956 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
957 *
958 * @adev: amdgpu_device pointer
959 *
960 * Frees the driver info and register access callbacks for the ATOM
961 * interpreter (r4xx+).
962 * Called at driver shutdown.
963 */
964static void amdgpu_atombios_fini(struct amdgpu_device *adev)
965{
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800966 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400967 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800968 kfree(adev->mode_info.atom_context->iio);
969 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400970 kfree(adev->mode_info.atom_context);
971 adev->mode_info.atom_context = NULL;
972 kfree(adev->mode_info.atom_card_info);
973 adev->mode_info.atom_card_info = NULL;
974}
975
976/**
977 * amdgpu_atombios_init - init the driver info and callbacks for atombios
978 *
979 * @adev: amdgpu_device pointer
980 *
981 * Initializes the driver info and register access callbacks for the
982 * ATOM interpreter (r4xx+).
983 * Returns 0 on sucess, -ENOMEM on failure.
984 * Called at driver startup.
985 */
986static int amdgpu_atombios_init(struct amdgpu_device *adev)
987{
988 struct card_info *atom_card_info =
989 kzalloc(sizeof(struct card_info), GFP_KERNEL);
990
991 if (!atom_card_info)
992 return -ENOMEM;
993
994 adev->mode_info.atom_card_info = atom_card_info;
995 atom_card_info->dev = adev->ddev;
996 atom_card_info->reg_read = cail_reg_read;
997 atom_card_info->reg_write = cail_reg_write;
998 /* needed for iio ops */
999 if (adev->rio_mem) {
1000 atom_card_info->ioreg_read = cail_ioreg_read;
1001 atom_card_info->ioreg_write = cail_ioreg_write;
1002 } else {
Amber Linb64a18c2017-01-04 08:06:58 -05001003 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001004 atom_card_info->ioreg_read = cail_reg_read;
1005 atom_card_info->ioreg_write = cail_reg_write;
1006 }
1007 atom_card_info->mc_read = cail_mc_read;
1008 atom_card_info->mc_write = cail_mc_write;
1009 atom_card_info->pll_read = cail_pll_read;
1010 atom_card_info->pll_write = cail_pll_write;
1011
1012 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
1013 if (!adev->mode_info.atom_context) {
1014 amdgpu_atombios_fini(adev);
1015 return -ENOMEM;
1016 }
1017
1018 mutex_init(&adev->mode_info.atom_context->mutex);
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001019 if (adev->is_atom_fw) {
1020 amdgpu_atomfirmware_scratch_regs_init(adev);
1021 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1022 } else {
1023 amdgpu_atombios_scratch_regs_init(adev);
1024 amdgpu_atombios_allocate_fb_scratch(adev);
1025 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001026 return 0;
1027}
1028
1029/* if we get transitioned to only one device, take VGA back */
1030/**
1031 * amdgpu_vga_set_decode - enable/disable vga decode
1032 *
1033 * @cookie: amdgpu_device pointer
1034 * @state: enable/disable vga decode
1035 *
1036 * Enable/disable vga decode (all asics).
1037 * Returns VGA resource flags.
1038 */
1039static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1040{
1041 struct amdgpu_device *adev = cookie;
1042 amdgpu_asic_set_vga_state(adev, state);
1043 if (state)
1044 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1045 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1046 else
1047 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1048}
1049
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001050static void amdgpu_check_block_size(struct amdgpu_device *adev)
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001051{
1052 /* defines number of bits in page table versus page directory,
1053 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1054 * page table and the remaining bits are in the page directory */
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001055 if (amdgpu_vm_block_size == -1)
1056 return;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001057
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001058 if (amdgpu_vm_block_size < 9) {
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001059 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1060 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001061 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001062 }
1063
1064 if (amdgpu_vm_block_size > 24 ||
1065 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1066 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1067 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001068 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001069 }
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001070
1071 return;
1072
1073def_value:
1074 amdgpu_vm_block_size = -1;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001075}
1076
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001077static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1078{
Alex Deucher64dab072017-06-15 18:20:09 -04001079 /* no need to check the default value */
1080 if (amdgpu_vm_size == -1)
1081 return;
1082
Alex Deucher76117502017-06-21 12:31:41 -04001083 if (!is_power_of_2(amdgpu_vm_size)) {
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001084 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1085 amdgpu_vm_size);
1086 goto def_value;
1087 }
1088
1089 if (amdgpu_vm_size < 1) {
1090 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1091 amdgpu_vm_size);
1092 goto def_value;
1093 }
1094
1095 /*
1096 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1097 */
1098 if (amdgpu_vm_size > 1024) {
1099 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1100 amdgpu_vm_size);
1101 goto def_value;
1102 }
1103
1104 return;
1105
1106def_value:
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001107 amdgpu_vm_size = -1;
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001108}
1109
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001110/**
1111 * amdgpu_check_arguments - validate module params
1112 *
1113 * @adev: amdgpu_device pointer
1114 *
1115 * Validates certain module parameters and updates
1116 * the associated values used by the driver (all asics).
1117 */
1118static void amdgpu_check_arguments(struct amdgpu_device *adev)
1119{
Chunming Zhou5b011232015-12-10 17:34:33 +08001120 if (amdgpu_sched_jobs < 4) {
1121 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1122 amdgpu_sched_jobs);
1123 amdgpu_sched_jobs = 4;
Alex Deucher76117502017-06-21 12:31:41 -04001124 } else if (!is_power_of_2(amdgpu_sched_jobs)){
Chunming Zhou5b011232015-12-10 17:34:33 +08001125 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1126 amdgpu_sched_jobs);
1127 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1128 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001129
1130 if (amdgpu_gart_size != -1) {
Christian Königc4e1a132016-03-17 16:25:15 +01001131 /* gtt size must be greater or equal to 32M */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001132 if (amdgpu_gart_size < 32) {
1133 dev_warn(adev->dev, "gart size (%d) too small\n",
1134 amdgpu_gart_size);
1135 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001136 }
1137 }
1138
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001139 amdgpu_check_vm_size(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001140
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001141 amdgpu_check_block_size(adev);
Christian König6a7f76e2016-08-24 15:51:49 +02001142
jimqu526bae32016-11-07 09:53:10 +08001143 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
Alex Deucher76117502017-06-21 12:31:41 -04001144 !is_power_of_2(amdgpu_vram_page_split))) {
Christian König6a7f76e2016-08-24 15:51:49 +02001145 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1146 amdgpu_vram_page_split);
1147 amdgpu_vram_page_split = 1024;
1148 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001149}
1150
1151/**
1152 * amdgpu_switcheroo_set_state - set switcheroo state
1153 *
1154 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001155 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001156 *
1157 * Callback for the switcheroo driver. Suspends or resumes the
1158 * the asics before or after it is powered up using ACPI methods.
1159 */
1160static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1161{
1162 struct drm_device *dev = pci_get_drvdata(pdev);
1163
1164 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1165 return;
1166
1167 if (state == VGA_SWITCHEROO_ON) {
1168 unsigned d3_delay = dev->pdev->d3_delay;
1169
Joe Perches7ca85292017-02-28 04:55:52 -08001170 pr_info("amdgpu: switched on\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001171 /* don't suspend or resume card normally */
1172 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1173
Alex Deucher810ddc32016-08-23 13:25:49 -04001174 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001175
1176 dev->pdev->d3_delay = d3_delay;
1177
1178 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1179 drm_kms_helper_poll_enable(dev);
1180 } else {
Joe Perches7ca85292017-02-28 04:55:52 -08001181 pr_info("amdgpu: switched off\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001182 drm_kms_helper_poll_disable(dev);
1183 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001184 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001185 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1186 }
1187}
1188
1189/**
1190 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1191 *
1192 * @pdev: pci dev pointer
1193 *
1194 * Callback for the switcheroo driver. Check of the switcheroo
1195 * state can be changed.
1196 * Returns true if the state can be changed, false if not.
1197 */
1198static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1199{
1200 struct drm_device *dev = pci_get_drvdata(pdev);
1201
1202 /*
1203 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1204 * locking inversion with the driver load path. And the access here is
1205 * completely racy anyway. So don't bother with locking for now.
1206 */
1207 return dev->open_count == 0;
1208}
1209
1210static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1211 .set_gpu_state = amdgpu_switcheroo_set_state,
1212 .reprobe = NULL,
1213 .can_switch = amdgpu_switcheroo_can_switch,
1214};
1215
1216int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001217 enum amd_ip_block_type block_type,
1218 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001219{
1220 int i, r = 0;
1221
1222 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001223 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001224 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001225 if (adev->ip_blocks[i].version->type != block_type)
1226 continue;
1227 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1228 continue;
1229 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1230 (void *)adev, state);
1231 if (r)
1232 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1233 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001234 }
1235 return r;
1236}
1237
1238int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001239 enum amd_ip_block_type block_type,
1240 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001241{
1242 int i, r = 0;
1243
1244 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001245 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001246 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001247 if (adev->ip_blocks[i].version->type != block_type)
1248 continue;
1249 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1250 continue;
1251 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1252 (void *)adev, state);
1253 if (r)
1254 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1255 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001256 }
1257 return r;
1258}
1259
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001260void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1261{
1262 int i;
1263
1264 for (i = 0; i < adev->num_ip_blocks; i++) {
1265 if (!adev->ip_blocks[i].status.valid)
1266 continue;
1267 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1268 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1269 }
1270}
1271
Alex Deucher5dbbb602016-06-23 11:41:04 -04001272int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1273 enum amd_ip_block_type block_type)
1274{
1275 int i, r;
1276
1277 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001278 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001279 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001280 if (adev->ip_blocks[i].version->type == block_type) {
1281 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001282 if (r)
1283 return r;
1284 break;
1285 }
1286 }
1287 return 0;
1288
1289}
1290
1291bool amdgpu_is_idle(struct amdgpu_device *adev,
1292 enum amd_ip_block_type block_type)
1293{
1294 int i;
1295
1296 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001297 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001298 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001299 if (adev->ip_blocks[i].version->type == block_type)
1300 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001301 }
1302 return true;
1303
1304}
1305
Alex Deuchera1255102016-10-13 17:41:13 -04001306struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1307 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001308{
1309 int i;
1310
1311 for (i = 0; i < adev->num_ip_blocks; i++)
Alex Deuchera1255102016-10-13 17:41:13 -04001312 if (adev->ip_blocks[i].version->type == type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001313 return &adev->ip_blocks[i];
1314
1315 return NULL;
1316}
1317
1318/**
1319 * amdgpu_ip_block_version_cmp
1320 *
1321 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001322 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001323 * @major: major version
1324 * @minor: minor version
1325 *
1326 * return 0 if equal or greater
1327 * return 1 if smaller or the ip_block doesn't exist
1328 */
1329int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001330 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001331 u32 major, u32 minor)
1332{
Alex Deuchera1255102016-10-13 17:41:13 -04001333 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001334
Alex Deuchera1255102016-10-13 17:41:13 -04001335 if (ip_block && ((ip_block->version->major > major) ||
1336 ((ip_block->version->major == major) &&
1337 (ip_block->version->minor >= minor))))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001338 return 0;
1339
1340 return 1;
1341}
1342
Alex Deuchera1255102016-10-13 17:41:13 -04001343/**
1344 * amdgpu_ip_block_add
1345 *
1346 * @adev: amdgpu_device pointer
1347 * @ip_block_version: pointer to the IP to add
1348 *
1349 * Adds the IP block driver information to the collection of IPs
1350 * on the asic.
1351 */
1352int amdgpu_ip_block_add(struct amdgpu_device *adev,
1353 const struct amdgpu_ip_block_version *ip_block_version)
1354{
1355 if (!ip_block_version)
1356 return -EINVAL;
1357
Huang Ruia0bae352017-05-03 09:52:06 +08001358 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1359 ip_block_version->funcs->name);
1360
Alex Deuchera1255102016-10-13 17:41:13 -04001361 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1362
1363 return 0;
1364}
1365
Alex Deucher483ef982016-09-30 12:43:04 -04001366static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
Emily Deng9accf2f2016-08-10 16:01:25 +08001367{
1368 adev->enable_virtual_display = false;
1369
1370 if (amdgpu_virtual_display) {
1371 struct drm_device *ddev = adev->ddev;
1372 const char *pci_address_name = pci_name(ddev->pdev);
Emily Deng0f663562016-09-30 13:02:18 -04001373 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
Emily Deng9accf2f2016-08-10 16:01:25 +08001374
1375 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1376 pciaddstr_tmp = pciaddstr;
Emily Deng0f663562016-09-30 13:02:18 -04001377 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1378 pciaddname = strsep(&pciaddname_tmp, ",");
Yintian Tao967de2a2017-01-22 15:16:51 +08001379 if (!strcmp("all", pciaddname)
1380 || !strcmp(pci_address_name, pciaddname)) {
Emily Deng0f663562016-09-30 13:02:18 -04001381 long num_crtc;
1382 int res = -1;
1383
Emily Deng9accf2f2016-08-10 16:01:25 +08001384 adev->enable_virtual_display = true;
Emily Deng0f663562016-09-30 13:02:18 -04001385
1386 if (pciaddname_tmp)
1387 res = kstrtol(pciaddname_tmp, 10,
1388 &num_crtc);
1389
1390 if (!res) {
1391 if (num_crtc < 1)
1392 num_crtc = 1;
1393 if (num_crtc > 6)
1394 num_crtc = 6;
1395 adev->mode_info.num_crtc = num_crtc;
1396 } else {
1397 adev->mode_info.num_crtc = 1;
1398 }
Emily Deng9accf2f2016-08-10 16:01:25 +08001399 break;
1400 }
1401 }
1402
Emily Deng0f663562016-09-30 13:02:18 -04001403 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1404 amdgpu_virtual_display, pci_address_name,
1405 adev->enable_virtual_display, adev->mode_info.num_crtc);
Emily Deng9accf2f2016-08-10 16:01:25 +08001406
1407 kfree(pciaddstr);
1408 }
1409}
1410
Alex Deuchere2a75f82017-04-27 16:58:01 -04001411static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1412{
Alex Deuchere2a75f82017-04-27 16:58:01 -04001413 const char *chip_name;
1414 char fw_name[30];
1415 int err;
1416 const struct gpu_info_firmware_header_v1_0 *hdr;
1417
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001418 adev->firmware.gpu_info_fw = NULL;
1419
Alex Deuchere2a75f82017-04-27 16:58:01 -04001420 switch (adev->asic_type) {
1421 case CHIP_TOPAZ:
1422 case CHIP_TONGA:
1423 case CHIP_FIJI:
1424 case CHIP_POLARIS11:
1425 case CHIP_POLARIS10:
1426 case CHIP_POLARIS12:
1427 case CHIP_CARRIZO:
1428 case CHIP_STONEY:
1429#ifdef CONFIG_DRM_AMDGPU_SI
1430 case CHIP_VERDE:
1431 case CHIP_TAHITI:
1432 case CHIP_PITCAIRN:
1433 case CHIP_OLAND:
1434 case CHIP_HAINAN:
1435#endif
1436#ifdef CONFIG_DRM_AMDGPU_CIK
1437 case CHIP_BONAIRE:
1438 case CHIP_HAWAII:
1439 case CHIP_KAVERI:
1440 case CHIP_KABINI:
1441 case CHIP_MULLINS:
1442#endif
1443 default:
1444 return 0;
1445 case CHIP_VEGA10:
1446 chip_name = "vega10";
1447 break;
Alex Deucher2d2e5e72017-05-09 12:27:35 -04001448 case CHIP_RAVEN:
1449 chip_name = "raven";
1450 break;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001451 }
1452
1453 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001454 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001455 if (err) {
1456 dev_err(adev->dev,
1457 "Failed to load gpu_info firmware \"%s\"\n",
1458 fw_name);
1459 goto out;
1460 }
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001461 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001462 if (err) {
1463 dev_err(adev->dev,
1464 "Failed to validate gpu_info firmware \"%s\"\n",
1465 fw_name);
1466 goto out;
1467 }
1468
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001469 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001470 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1471
1472 switch (hdr->version_major) {
1473 case 1:
1474 {
1475 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001476 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
Alex Deuchere2a75f82017-04-27 16:58:01 -04001477 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1478
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001479 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1480 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1481 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1482 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001483 adev->gfx.config.max_texture_channel_caches =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001484 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1485 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1486 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1487 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1488 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001489 adev->gfx.config.double_offchip_lds_buf =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001490 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1491 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
Hawking Zhang51fd0372017-06-09 22:30:52 +08001492 adev->gfx.cu_info.max_waves_per_simd =
1493 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1494 adev->gfx.cu_info.max_scratch_slots_per_cu =
1495 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1496 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001497 break;
1498 }
1499 default:
1500 dev_err(adev->dev,
1501 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1502 err = -EINVAL;
1503 goto out;
1504 }
1505out:
Alex Deuchere2a75f82017-04-27 16:58:01 -04001506 return err;
1507}
1508
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001509static int amdgpu_early_init(struct amdgpu_device *adev)
1510{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001511 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001512
Alex Deucher483ef982016-09-30 12:43:04 -04001513 amdgpu_device_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001514
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001515 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001516 case CHIP_TOPAZ:
1517 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001518 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001519 case CHIP_POLARIS11:
1520 case CHIP_POLARIS10:
Junwei Zhangc4642a42016-12-14 15:32:28 -05001521 case CHIP_POLARIS12:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001522 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001523 case CHIP_STONEY:
1524 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001525 adev->family = AMDGPU_FAMILY_CZ;
1526 else
1527 adev->family = AMDGPU_FAMILY_VI;
1528
1529 r = vi_set_ip_blocks(adev);
1530 if (r)
1531 return r;
1532 break;
Ken Wang33f34802016-01-21 17:29:41 +08001533#ifdef CONFIG_DRM_AMDGPU_SI
1534 case CHIP_VERDE:
1535 case CHIP_TAHITI:
1536 case CHIP_PITCAIRN:
1537 case CHIP_OLAND:
1538 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001539 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001540 r = si_set_ip_blocks(adev);
1541 if (r)
1542 return r;
1543 break;
1544#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001545#ifdef CONFIG_DRM_AMDGPU_CIK
1546 case CHIP_BONAIRE:
1547 case CHIP_HAWAII:
1548 case CHIP_KAVERI:
1549 case CHIP_KABINI:
1550 case CHIP_MULLINS:
1551 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1552 adev->family = AMDGPU_FAMILY_CI;
1553 else
1554 adev->family = AMDGPU_FAMILY_KV;
1555
1556 r = cik_set_ip_blocks(adev);
1557 if (r)
1558 return r;
1559 break;
1560#endif
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +08001561 case CHIP_VEGA10:
1562 case CHIP_RAVEN:
1563 if (adev->asic_type == CHIP_RAVEN)
1564 adev->family = AMDGPU_FAMILY_RV;
1565 else
1566 adev->family = AMDGPU_FAMILY_AI;
Ken Wang460826e2017-03-06 14:53:16 -05001567
1568 r = soc15_set_ip_blocks(adev);
1569 if (r)
1570 return r;
1571 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001572 default:
1573 /* FIXME: not supported yet */
1574 return -EINVAL;
1575 }
1576
Alex Deuchere2a75f82017-04-27 16:58:01 -04001577 r = amdgpu_device_parse_gpu_info_fw(adev);
1578 if (r)
1579 return r;
1580
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001581 if (amdgpu_sriov_vf(adev)) {
1582 r = amdgpu_virt_request_full_gpu(adev, true);
1583 if (r)
1584 return r;
1585 }
1586
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001587 for (i = 0; i < adev->num_ip_blocks; i++) {
1588 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
Huang Ruied8cf002017-05-03 09:40:17 +08001589 DRM_ERROR("disabled ip block: %d <%s>\n",
1590 i, adev->ip_blocks[i].version->funcs->name);
Alex Deuchera1255102016-10-13 17:41:13 -04001591 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001592 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001593 if (adev->ip_blocks[i].version->funcs->early_init) {
1594 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001595 if (r == -ENOENT) {
Alex Deuchera1255102016-10-13 17:41:13 -04001596 adev->ip_blocks[i].status.valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001597 } else if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001598 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1599 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001600 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001601 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001602 adev->ip_blocks[i].status.valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001603 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001604 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001605 adev->ip_blocks[i].status.valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001606 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001607 }
1608 }
1609
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001610 adev->cg_flags &= amdgpu_cg_mask;
1611 adev->pg_flags &= amdgpu_pg_mask;
1612
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001613 return 0;
1614}
1615
1616static int amdgpu_init(struct amdgpu_device *adev)
1617{
1618 int i, r;
1619
1620 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001621 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001622 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001623 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001624 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001625 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1626 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001627 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001628 }
Alex Deuchera1255102016-10-13 17:41:13 -04001629 adev->ip_blocks[i].status.sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001630 /* need to do gmc hw init early so we can allocate gpu mem */
Alex Deuchera1255102016-10-13 17:41:13 -04001631 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001632 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001633 if (r) {
1634 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001635 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001636 }
Alex Deuchera1255102016-10-13 17:41:13 -04001637 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001638 if (r) {
1639 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001640 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001641 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001642 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001643 if (r) {
1644 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001645 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001646 }
Alex Deuchera1255102016-10-13 17:41:13 -04001647 adev->ip_blocks[i].status.hw = true;
Monk Liu24936642017-01-09 15:54:32 +08001648
1649 /* right after GMC hw init, we create CSA */
1650 if (amdgpu_sriov_vf(adev)) {
1651 r = amdgpu_allocate_static_csa(adev);
1652 if (r) {
1653 DRM_ERROR("allocate CSA failed %d\n", r);
1654 return r;
1655 }
1656 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001657 }
1658 }
1659
1660 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001661 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001662 continue;
1663 /* gmc hw init is done early */
Alex Deuchera1255102016-10-13 17:41:13 -04001664 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001665 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001666 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001667 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001668 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1669 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001670 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001671 }
Alex Deuchera1255102016-10-13 17:41:13 -04001672 adev->ip_blocks[i].status.hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001673 }
1674
1675 return 0;
1676}
1677
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001678static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1679{
1680 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1681}
1682
1683static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1684{
1685 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1686 AMDGPU_RESET_MAGIC_NUM);
1687}
1688
Shirish S2dc80b02017-05-25 10:05:25 +05301689static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
1690{
1691 int i = 0, r;
1692
1693 for (i = 0; i < adev->num_ip_blocks; i++) {
1694 if (!adev->ip_blocks[i].status.valid)
1695 continue;
1696 /* skip CG for VCE/UVD, it's handled specially */
1697 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1698 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1699 /* enable clockgating to save power */
1700 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1701 AMD_CG_STATE_GATE);
1702 if (r) {
1703 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1704 adev->ip_blocks[i].version->funcs->name, r);
1705 return r;
1706 }
1707 }
1708 }
1709 return 0;
1710}
1711
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001712static int amdgpu_late_init(struct amdgpu_device *adev)
1713{
1714 int i = 0, r;
1715
1716 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001717 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001718 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001719 if (adev->ip_blocks[i].version->funcs->late_init) {
1720 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001721 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001722 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1723 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001724 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001725 }
Alex Deuchera1255102016-10-13 17:41:13 -04001726 adev->ip_blocks[i].status.late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001727 }
1728 }
1729
Shirish S2dc80b02017-05-25 10:05:25 +05301730 mod_delayed_work(system_wq, &adev->late_init_work,
1731 msecs_to_jiffies(AMDGPU_RESUME_MS));
1732
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001733 amdgpu_fill_reset_magic(adev);
1734
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001735 return 0;
1736}
1737
1738static int amdgpu_fini(struct amdgpu_device *adev)
1739{
1740 int i, r;
1741
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001742 /* need to disable SMC first */
1743 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001744 if (!adev->ip_blocks[i].status.hw)
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001745 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001746 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001747 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
Alex Deuchera1255102016-10-13 17:41:13 -04001748 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1749 AMD_CG_STATE_UNGATE);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001750 if (r) {
1751 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001752 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001753 return r;
1754 }
Alex Deuchera1255102016-10-13 17:41:13 -04001755 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001756 /* XXX handle errors */
1757 if (r) {
1758 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001759 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001760 }
Alex Deuchera1255102016-10-13 17:41:13 -04001761 adev->ip_blocks[i].status.hw = false;
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001762 break;
1763 }
1764 }
1765
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001766 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001767 if (!adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001768 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001769 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001770 amdgpu_wb_fini(adev);
1771 amdgpu_vram_scratch_fini(adev);
1772 }
Rex Zhu8201a672016-11-24 21:44:44 +08001773
1774 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1775 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1776 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1777 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1778 AMD_CG_STATE_UNGATE);
1779 if (r) {
1780 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1781 adev->ip_blocks[i].version->funcs->name, r);
1782 return r;
1783 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001784 }
Rex Zhu8201a672016-11-24 21:44:44 +08001785
Alex Deuchera1255102016-10-13 17:41:13 -04001786 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001787 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001788 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001789 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1790 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001791 }
Rex Zhu8201a672016-11-24 21:44:44 +08001792
Alex Deuchera1255102016-10-13 17:41:13 -04001793 adev->ip_blocks[i].status.hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001794 }
1795
1796 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001797 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001798 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001799 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001800 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001801 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001802 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1803 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001804 }
Alex Deuchera1255102016-10-13 17:41:13 -04001805 adev->ip_blocks[i].status.sw = false;
1806 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001807 }
1808
Monk Liua6dcfd92016-05-19 14:36:34 +08001809 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001810 if (!adev->ip_blocks[i].status.late_initialized)
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001811 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001812 if (adev->ip_blocks[i].version->funcs->late_fini)
1813 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1814 adev->ip_blocks[i].status.late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001815 }
1816
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001817 if (amdgpu_sriov_vf(adev)) {
Monk Liu24936642017-01-09 15:54:32 +08001818 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001819 amdgpu_virt_release_full_gpu(adev, false);
1820 }
Monk Liu24936642017-01-09 15:54:32 +08001821
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001822 return 0;
1823}
1824
Shirish S2dc80b02017-05-25 10:05:25 +05301825static void amdgpu_late_init_func_handler(struct work_struct *work)
1826{
1827 struct amdgpu_device *adev =
1828 container_of(work, struct amdgpu_device, late_init_work.work);
1829 amdgpu_late_set_cg_state(adev);
1830}
1831
Alex Deucherfaefba92016-12-06 10:38:29 -05001832int amdgpu_suspend(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001833{
1834 int i, r;
1835
Xiangliang Yue941ea92017-01-18 12:47:55 +08001836 if (amdgpu_sriov_vf(adev))
1837 amdgpu_virt_request_full_gpu(adev, false);
1838
Flora Cuic5a93a22016-02-26 10:45:25 +08001839 /* ungate SMC block first */
1840 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1841 AMD_CG_STATE_UNGATE);
1842 if (r) {
1843 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1844 }
1845
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001846 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001847 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001848 continue;
1849 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001850 if (i != AMD_IP_BLOCK_TYPE_SMC) {
Alex Deuchera1255102016-10-13 17:41:13 -04001851 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1852 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001853 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001854 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1855 adev->ip_blocks[i].version->funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001856 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001857 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001858 /* XXX handle errors */
Alex Deuchera1255102016-10-13 17:41:13 -04001859 r = adev->ip_blocks[i].version->funcs->suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001860 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001861 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001862 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1863 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001864 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001865 }
1866
Xiangliang Yue941ea92017-01-18 12:47:55 +08001867 if (amdgpu_sriov_vf(adev))
1868 amdgpu_virt_release_full_gpu(adev, false);
1869
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001870 return 0;
1871}
1872
Monk Liue4f0fdc2017-02-09 11:55:49 +08001873static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001874{
1875 int i, r;
1876
Monk Liu2cb681b2017-04-26 12:00:49 +08001877 static enum amd_ip_block_type ip_order[] = {
1878 AMD_IP_BLOCK_TYPE_GMC,
1879 AMD_IP_BLOCK_TYPE_COMMON,
Monk Liu2cb681b2017-04-26 12:00:49 +08001880 AMD_IP_BLOCK_TYPE_IH,
1881 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001882
Monk Liu2cb681b2017-04-26 12:00:49 +08001883 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1884 int j;
1885 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001886
Monk Liu2cb681b2017-04-26 12:00:49 +08001887 for (j = 0; j < adev->num_ip_blocks; j++) {
1888 block = &adev->ip_blocks[j];
1889
1890 if (block->version->type != ip_order[i] ||
1891 !block->status.valid)
1892 continue;
1893
1894 r = block->version->funcs->hw_init(adev);
1895 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001896 }
1897 }
1898
1899 return 0;
1900}
1901
Monk Liue4f0fdc2017-02-09 11:55:49 +08001902static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001903{
1904 int i, r;
1905
Monk Liu2cb681b2017-04-26 12:00:49 +08001906 static enum amd_ip_block_type ip_order[] = {
1907 AMD_IP_BLOCK_TYPE_SMC,
1908 AMD_IP_BLOCK_TYPE_DCE,
1909 AMD_IP_BLOCK_TYPE_GFX,
1910 AMD_IP_BLOCK_TYPE_SDMA,
1911 AMD_IP_BLOCK_TYPE_VCE,
1912 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001913
Monk Liu2cb681b2017-04-26 12:00:49 +08001914 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1915 int j;
1916 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001917
Monk Liu2cb681b2017-04-26 12:00:49 +08001918 for (j = 0; j < adev->num_ip_blocks; j++) {
1919 block = &adev->ip_blocks[j];
1920
1921 if (block->version->type != ip_order[i] ||
1922 !block->status.valid)
1923 continue;
1924
1925 r = block->version->funcs->hw_init(adev);
1926 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001927 }
1928 }
1929
1930 return 0;
1931}
1932
Chunming Zhoufcf06492017-05-05 10:33:33 +08001933static int amdgpu_resume_phase1(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001934{
1935 int i, r;
1936
1937 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001938 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001939 continue;
Chunming Zhoufcf06492017-05-05 10:33:33 +08001940 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1941 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1942 adev->ip_blocks[i].version->type ==
1943 AMD_IP_BLOCK_TYPE_IH) {
1944 r = adev->ip_blocks[i].version->funcs->resume(adev);
1945 if (r) {
1946 DRM_ERROR("resume of IP block <%s> failed %d\n",
1947 adev->ip_blocks[i].version->funcs->name, r);
1948 return r;
1949 }
1950 }
1951 }
1952
1953 return 0;
1954}
1955
1956static int amdgpu_resume_phase2(struct amdgpu_device *adev)
1957{
1958 int i, r;
1959
1960 for (i = 0; i < adev->num_ip_blocks; i++) {
1961 if (!adev->ip_blocks[i].status.valid)
1962 continue;
1963 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1964 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1965 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1966 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001967 r = adev->ip_blocks[i].version->funcs->resume(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001968 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001969 DRM_ERROR("resume of IP block <%s> failed %d\n",
1970 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001971 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001972 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001973 }
1974
1975 return 0;
1976}
1977
Chunming Zhoufcf06492017-05-05 10:33:33 +08001978static int amdgpu_resume(struct amdgpu_device *adev)
1979{
1980 int r;
1981
1982 r = amdgpu_resume_phase1(adev);
1983 if (r)
1984 return r;
1985 r = amdgpu_resume_phase2(adev);
1986
1987 return r;
1988}
1989
Monk Liu4e99a442016-03-31 13:26:59 +08001990static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04001991{
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001992 if (adev->is_atom_fw) {
1993 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1994 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1995 } else {
1996 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1997 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1998 }
Andres Rodriguez048765a2016-06-11 02:51:32 -04001999}
2000
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002001/**
2002 * amdgpu_device_init - initialize the driver
2003 *
2004 * @adev: amdgpu_device pointer
2005 * @pdev: drm dev pointer
2006 * @pdev: pci dev pointer
2007 * @flags: driver flags
2008 *
2009 * Initializes the driver info and hw (all asics).
2010 * Returns 0 for success or an error on failure.
2011 * Called at driver startup.
2012 */
2013int amdgpu_device_init(struct amdgpu_device *adev,
2014 struct drm_device *ddev,
2015 struct pci_dev *pdev,
2016 uint32_t flags)
2017{
2018 int r, i;
2019 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02002020 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002021
2022 adev->shutdown = false;
2023 adev->dev = &pdev->dev;
2024 adev->ddev = ddev;
2025 adev->pdev = pdev;
2026 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08002027 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002028 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2029 adev->mc.gtt_size = 512 * 1024 * 1024;
2030 adev->accel_working = false;
2031 adev->num_rings = 0;
2032 adev->mman.buffer_funcs = NULL;
2033 adev->mman.buffer_funcs_ring = NULL;
2034 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01002035 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002036 adev->gart.gart_funcs = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002037 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002038
2039 adev->smc_rreg = &amdgpu_invalid_rreg;
2040 adev->smc_wreg = &amdgpu_invalid_wreg;
2041 adev->pcie_rreg = &amdgpu_invalid_rreg;
2042 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08002043 adev->pciep_rreg = &amdgpu_invalid_rreg;
2044 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002045 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2046 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2047 adev->didt_rreg = &amdgpu_invalid_rreg;
2048 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08002049 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2050 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002051 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2052 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2053
Rex Zhuccdbb202016-06-08 12:47:41 +08002054
Alex Deucher3e39ab92015-06-05 15:04:33 -04002055 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2056 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2057 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002058
2059 /* mutex initialization are all done here so we
2060 * can recall function without having locking issues */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002061 atomic_set(&adev->irq.ih.lock, 0);
Huang Rui0e5ca0d2017-03-03 18:37:23 -05002062 mutex_init(&adev->firmware.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002063 mutex_init(&adev->pm.mutex);
2064 mutex_init(&adev->gfx.gpu_clock_mutex);
2065 mutex_init(&adev->srbm_mutex);
2066 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002067 mutex_init(&adev->mn_lock);
2068 hash_init(adev->mn_hash);
2069
2070 amdgpu_check_arguments(adev);
2071
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002072 spin_lock_init(&adev->mmio_idx_lock);
2073 spin_lock_init(&adev->smc_idx_lock);
2074 spin_lock_init(&adev->pcie_idx_lock);
2075 spin_lock_init(&adev->uvd_ctx_idx_lock);
2076 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08002077 spin_lock_init(&adev->gc_cac_idx_lock);
Evan Quan16abb5d2017-07-04 09:21:50 +08002078 spin_lock_init(&adev->se_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002079 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02002080 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002081
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08002082 INIT_LIST_HEAD(&adev->shadow_list);
2083 mutex_init(&adev->shadow_list_lock);
2084
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002085 INIT_LIST_HEAD(&adev->gtt_list);
2086 spin_lock_init(&adev->gtt_list_lock);
2087
Andres Rodriguez795f2812017-03-06 16:27:55 -05002088 INIT_LIST_HEAD(&adev->ring_lru_list);
2089 spin_lock_init(&adev->ring_lru_list_lock);
2090
Shirish S2dc80b02017-05-25 10:05:25 +05302091 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2092
Alex Xie0fa49552017-06-08 14:58:05 -04002093 /* Registers mapping */
2094 /* TODO: block userspace mapping of io register */
Ken Wangda69c1612016-01-21 19:08:55 +08002095 if (adev->asic_type >= CHIP_BONAIRE) {
2096 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2097 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2098 } else {
2099 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2100 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2101 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002102
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002103 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2104 if (adev->rmmio == NULL) {
2105 return -ENOMEM;
2106 }
2107 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2108 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2109
Ken Wangda69c1612016-01-21 19:08:55 +08002110 if (adev->asic_type >= CHIP_BONAIRE)
2111 /* doorbell bar mapping */
2112 amdgpu_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002113
2114 /* io port mapping */
2115 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2116 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2117 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2118 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2119 break;
2120 }
2121 }
2122 if (adev->rio_mem == NULL)
Amber Linb64a18c2017-01-04 08:06:58 -05002123 DRM_INFO("PCI I/O BAR is not found.\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002124
2125 /* early init functions */
2126 r = amdgpu_early_init(adev);
2127 if (r)
2128 return r;
2129
2130 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2131 /* this will fail for cards that aren't VGA class devices, just
2132 * ignore it */
2133 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2134
2135 if (amdgpu_runtime_pm == 1)
2136 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04002137 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002138 runtime = true;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002139 if (!pci_is_thunderbolt_attached(adev->pdev))
2140 vga_switcheroo_register_client(adev->pdev,
2141 &amdgpu_switcheroo_ops, runtime);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002142 if (runtime)
2143 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2144
2145 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04002146 if (!amdgpu_get_bios(adev)) {
2147 r = -EINVAL;
2148 goto failed;
2149 }
Nils Wallméniusf7e9e9f2016-12-14 21:52:45 +01002150
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002151 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002152 if (r) {
2153 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002154 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002155 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002156 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002157
Monk Liu4e99a442016-03-31 13:26:59 +08002158 /* detect if we are with an SRIOV vbios */
2159 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04002160
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002161 /* Post card if necessary */
Monk Liubec86372016-09-14 19:38:08 +08002162 if (amdgpu_vpost_needed(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002163 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08002164 dev_err(adev->dev, "no vBIOS found\n");
Gavin Wan89041942017-06-23 13:55:15 -04002165 amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002166 r = -EINVAL;
2167 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002168 }
Monk Liubec86372016-09-14 19:38:08 +08002169 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08002170 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2171 if (r) {
2172 dev_err(adev->dev, "gpu post error!\n");
Gavin Wan89041942017-06-23 13:55:15 -04002173 amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
Monk Liu4e99a442016-03-31 13:26:59 +08002174 goto failed;
2175 }
2176 } else {
2177 DRM_INFO("GPU post is not needed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002178 }
2179
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002180 if (!adev->is_atom_fw) {
2181 /* Initialize clocks */
2182 r = amdgpu_atombios_get_clock_info(adev);
2183 if (r) {
2184 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002185 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2186 goto failed;
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002187 }
2188 /* init i2c buses */
2189 amdgpu_atombios_i2c_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002190 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002191
2192 /* Fence driver */
2193 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002194 if (r) {
2195 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002196 amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002197 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002198 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002199
2200 /* init the mode config */
2201 drm_mode_config_init(adev->ddev);
2202
2203 r = amdgpu_init(adev);
2204 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05002205 dev_err(adev->dev, "amdgpu_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002206 amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002207 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002208 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002209 }
2210
2211 adev->accel_working = true;
2212
Alex Xiee59c0202017-06-01 09:42:59 -04002213 amdgpu_vm_check_compute_bug(adev);
2214
Marek Olšák95844d22016-08-17 23:49:27 +02002215 /* Initialize the buffer migration limit. */
2216 if (amdgpu_moverate >= 0)
2217 max_MBps = amdgpu_moverate;
2218 else
2219 max_MBps = 8; /* Allow 8 MB/s. */
2220 /* Get a log2 for easy divisions. */
2221 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2222
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002223 r = amdgpu_ib_pool_init(adev);
2224 if (r) {
2225 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Gavin Wan89041942017-06-23 13:55:15 -04002226 amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002227 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002228 }
2229
2230 r = amdgpu_ib_ring_tests(adev);
2231 if (r)
2232 DRM_ERROR("ib ring test failed (%d).\n", r);
2233
Monk Liu9bc92b92017-02-08 17:38:13 +08002234 amdgpu_fbdev_init(adev);
2235
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002236 r = amdgpu_gem_debugfs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002237 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002238 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002239
2240 r = amdgpu_debugfs_regs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002241 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002242 DRM_ERROR("registering register debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002243
Huang Rui4f0955f2017-05-10 23:04:06 +08002244 r = amdgpu_debugfs_test_ib_ring_init(adev);
2245 if (r)
2246 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2247
Huang Rui50ab2532016-06-12 15:51:09 +08002248 r = amdgpu_debugfs_firmware_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002249 if (r)
Huang Rui50ab2532016-06-12 15:51:09 +08002250 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
Huang Rui50ab2532016-06-12 15:51:09 +08002251
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002252 if ((amdgpu_testing & 1)) {
2253 if (adev->accel_working)
2254 amdgpu_test_moves(adev);
2255 else
2256 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2257 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002258 if (amdgpu_benchmarking) {
2259 if (adev->accel_working)
2260 amdgpu_benchmark(adev, amdgpu_benchmarking);
2261 else
2262 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2263 }
2264
2265 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2266 * explicit gating rather than handling it automatically.
2267 */
2268 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002269 if (r) {
2270 dev_err(adev->dev, "amdgpu_late_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002271 amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002272 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002273 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002274
2275 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04002276
2277failed:
Gavin Wan89041942017-06-23 13:55:15 -04002278 amdgpu_vf_error_trans_all(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002279 if (runtime)
2280 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2281 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002282}
2283
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002284/**
2285 * amdgpu_device_fini - tear down the driver
2286 *
2287 * @adev: amdgpu_device pointer
2288 *
2289 * Tear down the driver info (all asics).
2290 * Called at driver shutdown.
2291 */
2292void amdgpu_device_fini(struct amdgpu_device *adev)
2293{
2294 int r;
2295
2296 DRM_INFO("amdgpu: finishing device.\n");
2297 adev->shutdown = true;
Pixel Dingdb2c2a92017-04-25 16:47:42 +08002298 if (adev->mode_info.mode_config_initialized)
2299 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002300 /* evict vram memory */
2301 amdgpu_bo_evict_vram(adev);
2302 amdgpu_ib_pool_fini(adev);
2303 amdgpu_fence_driver_fini(adev);
2304 amdgpu_fbdev_fini(adev);
2305 r = amdgpu_fini(adev);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08002306 if (adev->firmware.gpu_info_fw) {
2307 release_firmware(adev->firmware.gpu_info_fw);
2308 adev->firmware.gpu_info_fw = NULL;
2309 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002310 adev->accel_working = false;
Shirish S2dc80b02017-05-25 10:05:25 +05302311 cancel_delayed_work_sync(&adev->late_init_work);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002312 /* free i2c buses */
2313 amdgpu_i2c_fini(adev);
2314 amdgpu_atombios_fini(adev);
2315 kfree(adev->bios);
2316 adev->bios = NULL;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002317 if (!pci_is_thunderbolt_attached(adev->pdev))
2318 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002319 if (adev->flags & AMD_IS_PX)
2320 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002321 vga_client_register(adev->pdev, NULL, NULL, NULL);
2322 if (adev->rio_mem)
2323 pci_iounmap(adev->pdev, adev->rio_mem);
2324 adev->rio_mem = NULL;
2325 iounmap(adev->rmmio);
2326 adev->rmmio = NULL;
Ken Wangda69c1612016-01-21 19:08:55 +08002327 if (adev->asic_type >= CHIP_BONAIRE)
2328 amdgpu_doorbell_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002329 amdgpu_debugfs_regs_cleanup(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002330}
2331
2332
2333/*
2334 * Suspend & resume.
2335 */
2336/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002337 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002338 *
2339 * @pdev: drm dev pointer
2340 * @state: suspend state
2341 *
2342 * Puts the hw in the suspend state (all asics).
2343 * Returns 0 for success or an error on failure.
2344 * Called at driver suspend.
2345 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002346int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002347{
2348 struct amdgpu_device *adev;
2349 struct drm_crtc *crtc;
2350 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002351 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002352
2353 if (dev == NULL || dev->dev_private == NULL) {
2354 return -ENODEV;
2355 }
2356
2357 adev = dev->dev_private;
2358
2359 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2360 return 0;
2361
2362 drm_kms_helper_poll_disable(dev);
2363
2364 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002365 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002366 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2367 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2368 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002369 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002370
Alex Deucher756e6882015-10-08 00:03:36 -04002371 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002372 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04002373 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002374 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2375 struct amdgpu_bo *robj;
2376
Alex Deucher756e6882015-10-08 00:03:36 -04002377 if (amdgpu_crtc->cursor_bo) {
2378 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002379 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002380 if (r == 0) {
2381 amdgpu_bo_unpin(aobj);
2382 amdgpu_bo_unreserve(aobj);
2383 }
2384 }
2385
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002386 if (rfb == NULL || rfb->obj == NULL) {
2387 continue;
2388 }
2389 robj = gem_to_amdgpu_bo(rfb->obj);
2390 /* don't unpin kernel fb objects */
2391 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
Alex Xie7a6901d2017-04-24 13:52:41 -04002392 r = amdgpu_bo_reserve(robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002393 if (r == 0) {
2394 amdgpu_bo_unpin(robj);
2395 amdgpu_bo_unreserve(robj);
2396 }
2397 }
2398 }
2399 /* evict vram memory */
2400 amdgpu_bo_evict_vram(adev);
2401
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002402 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002403
2404 r = amdgpu_suspend(adev);
2405
Alex Deuchera0a71e42016-10-10 12:41:36 -04002406 /* evict remaining vram memory
2407 * This second call to evict vram is to evict the gart page table
2408 * using the CPU.
2409 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002410 amdgpu_bo_evict_vram(adev);
2411
Alex Deucherd05da0e2017-06-30 17:08:45 -04002412 amdgpu_atombios_scratch_regs_save(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002413 pci_save_state(dev->pdev);
2414 if (suspend) {
2415 /* Shut down the device */
2416 pci_disable_device(dev->pdev);
2417 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08002418 } else {
2419 r = amdgpu_asic_reset(adev);
2420 if (r)
2421 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002422 }
2423
2424 if (fbcon) {
2425 console_lock();
2426 amdgpu_fbdev_set_suspend(adev, 1);
2427 console_unlock();
2428 }
2429 return 0;
2430}
2431
2432/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002433 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002434 *
2435 * @pdev: drm dev pointer
2436 *
2437 * Bring the hw back to operating state (all asics).
2438 * Returns 0 for success or an error on failure.
2439 * Called at driver resume.
2440 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002441int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002442{
2443 struct drm_connector *connector;
2444 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04002445 struct drm_crtc *crtc;
Huang Rui03161a62017-04-13 16:12:26 +08002446 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002447
2448 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2449 return 0;
2450
jimqu74b0b152016-09-07 17:09:12 +08002451 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002452 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08002453
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002454 if (resume) {
2455 pci_set_power_state(dev->pdev, PCI_D0);
2456 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08002457 r = pci_enable_device(dev->pdev);
Huang Rui03161a62017-04-13 16:12:26 +08002458 if (r)
2459 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002460 }
Alex Deucherd05da0e2017-06-30 17:08:45 -04002461 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002462
2463 /* post card */
Jim Quc836fec2017-02-10 15:59:59 +08002464 if (amdgpu_need_post(adev)) {
jimqu74b0b152016-09-07 17:09:12 +08002465 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2466 if (r)
2467 DRM_ERROR("amdgpu asic init failed\n");
2468 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002469
2470 r = amdgpu_resume(adev);
Rex Zhue6707212017-03-30 13:21:01 +08002471 if (r) {
Flora Cuica198522016-02-04 15:10:08 +08002472 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Huang Rui03161a62017-04-13 16:12:26 +08002473 goto unlock;
Rex Zhue6707212017-03-30 13:21:01 +08002474 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002475 amdgpu_fence_driver_resume(adev);
2476
Flora Cuica198522016-02-04 15:10:08 +08002477 if (resume) {
2478 r = amdgpu_ib_ring_tests(adev);
2479 if (r)
2480 DRM_ERROR("ib ring test failed (%d).\n", r);
2481 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002482
2483 r = amdgpu_late_init(adev);
Huang Rui03161a62017-04-13 16:12:26 +08002484 if (r)
2485 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002486
Alex Deucher756e6882015-10-08 00:03:36 -04002487 /* pin cursors */
2488 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2489 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2490
2491 if (amdgpu_crtc->cursor_bo) {
2492 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002493 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002494 if (r == 0) {
2495 r = amdgpu_bo_pin(aobj,
2496 AMDGPU_GEM_DOMAIN_VRAM,
2497 &amdgpu_crtc->cursor_addr);
2498 if (r != 0)
2499 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2500 amdgpu_bo_unreserve(aobj);
2501 }
2502 }
2503 }
2504
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002505 /* blat the mode back in */
2506 if (fbcon) {
2507 drm_helper_resume_force_mode(dev);
2508 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002509 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002510 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2511 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2512 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002513 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002514 }
2515
2516 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002517
2518 /*
2519 * Most of the connector probing functions try to acquire runtime pm
2520 * refs to ensure that the GPU is powered on when connector polling is
2521 * performed. Since we're calling this from a runtime PM callback,
2522 * trying to acquire rpm refs will cause us to deadlock.
2523 *
2524 * Since we're guaranteed to be holding the rpm lock, it's safe to
2525 * temporarily disable the rpm helpers so this doesn't deadlock us.
2526 */
2527#ifdef CONFIG_PM
2528 dev->dev->power.disable_depth++;
2529#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05002530 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002531#ifdef CONFIG_PM
2532 dev->dev->power.disable_depth--;
2533#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002534
Huang Rui03161a62017-04-13 16:12:26 +08002535 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002536 amdgpu_fbdev_set_suspend(adev, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002537
Huang Rui03161a62017-04-13 16:12:26 +08002538unlock:
2539 if (fbcon)
2540 console_unlock();
2541
2542 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002543}
2544
Chunming Zhou63fbf422016-07-15 11:19:20 +08002545static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2546{
2547 int i;
2548 bool asic_hang = false;
2549
2550 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002551 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002552 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002553 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2554 adev->ip_blocks[i].status.hang =
2555 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2556 if (adev->ip_blocks[i].status.hang) {
2557 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
Chunming Zhou63fbf422016-07-15 11:19:20 +08002558 asic_hang = true;
2559 }
2560 }
2561 return asic_hang;
2562}
2563
Baoyou Xie4d446652016-09-18 22:09:35 +08002564static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002565{
2566 int i, r = 0;
2567
2568 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002569 if (!adev->ip_blocks[i].status.valid)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002570 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002571 if (adev->ip_blocks[i].status.hang &&
2572 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2573 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
Chunming Zhoud31a5012016-07-18 10:04:34 +08002574 if (r)
2575 return r;
2576 }
2577 }
2578
2579 return 0;
2580}
2581
Chunming Zhou35d782f2016-07-15 15:57:13 +08002582static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2583{
Alex Deucherda146d32016-10-13 16:07:03 -04002584 int i;
2585
2586 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002587 if (!adev->ip_blocks[i].status.valid)
Alex Deucherda146d32016-10-13 16:07:03 -04002588 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002589 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2590 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2591 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2592 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2593 if (adev->ip_blocks[i].status.hang) {
Alex Deucherda146d32016-10-13 16:07:03 -04002594 DRM_INFO("Some block need full reset!\n");
2595 return true;
2596 }
2597 }
Chunming Zhou35d782f2016-07-15 15:57:13 +08002598 }
2599 return false;
2600}
2601
2602static int amdgpu_soft_reset(struct amdgpu_device *adev)
2603{
2604 int i, r = 0;
2605
2606 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002607 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002608 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002609 if (adev->ip_blocks[i].status.hang &&
2610 adev->ip_blocks[i].version->funcs->soft_reset) {
2611 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002612 if (r)
2613 return r;
2614 }
2615 }
2616
2617 return 0;
2618}
2619
2620static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2621{
2622 int i, r = 0;
2623
2624 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002625 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002626 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002627 if (adev->ip_blocks[i].status.hang &&
2628 adev->ip_blocks[i].version->funcs->post_soft_reset)
2629 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002630 if (r)
2631 return r;
2632 }
2633
2634 return 0;
2635}
2636
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002637bool amdgpu_need_backup(struct amdgpu_device *adev)
2638{
2639 if (adev->flags & AMD_IS_APU)
2640 return false;
2641
2642 return amdgpu_lockup_timeout > 0 ? true : false;
2643}
2644
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002645static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2646 struct amdgpu_ring *ring,
2647 struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +01002648 struct dma_fence **fence)
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002649{
2650 uint32_t domain;
2651 int r;
2652
Roger.He23d2e502017-04-21 14:24:26 +08002653 if (!bo->shadow)
2654 return 0;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002655
Alex Xie1d284792017-04-24 13:53:04 -04002656 r = amdgpu_bo_reserve(bo, true);
Roger.He23d2e502017-04-21 14:24:26 +08002657 if (r)
2658 return r;
2659 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2660 /* if bo has been evicted, then no need to recover */
2661 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
Roger.He82521312017-04-21 13:08:43 +08002662 r = amdgpu_bo_validate(bo->shadow);
2663 if (r) {
2664 DRM_ERROR("bo validate failed!\n");
2665 goto err;
2666 }
2667
2668 r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
2669 if (r) {
2670 DRM_ERROR("%p bind failed\n", bo->shadow);
2671 goto err;
2672 }
2673
Roger.He23d2e502017-04-21 14:24:26 +08002674 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002675 NULL, fence, true);
Roger.He23d2e502017-04-21 14:24:26 +08002676 if (r) {
2677 DRM_ERROR("recover page table failed!\n");
2678 goto err;
2679 }
2680 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002681err:
Roger.He23d2e502017-04-21 14:24:26 +08002682 amdgpu_bo_unreserve(bo);
2683 return r;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002684}
2685
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002686/**
Monk Liua90ad3c2017-01-23 14:22:08 +08002687 * amdgpu_sriov_gpu_reset - reset the asic
2688 *
2689 * @adev: amdgpu device pointer
Monk Liu7225f872017-04-26 14:51:54 +08002690 * @job: which job trigger hang
Monk Liua90ad3c2017-01-23 14:22:08 +08002691 *
2692 * Attempt the reset the GPU if it has hung (all asics).
2693 * for SRIOV case.
2694 * Returns 0 for success or an error on failure.
2695 */
Monk Liu7225f872017-04-26 14:51:54 +08002696int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002697{
Monk Liu65781c72017-05-11 13:36:44 +08002698 int i, j, r = 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002699 int resched;
2700 struct amdgpu_bo *bo, *tmp;
2701 struct amdgpu_ring *ring;
2702 struct dma_fence *fence = NULL, *next = NULL;
2703
Monk Liu147b5982017-01-25 15:48:01 +08002704 mutex_lock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002705 atomic_inc(&adev->gpu_reset_counter);
Monk Liu1fb37a32017-01-26 15:36:37 +08002706 adev->gfx.in_reset = true;
Monk Liua90ad3c2017-01-23 14:22:08 +08002707
2708 /* block TTM */
2709 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2710
Monk Liu65781c72017-05-11 13:36:44 +08002711 /* we start from the ring trigger GPU hang */
2712 j = job ? job->ring->idx : 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002713
Monk Liu65781c72017-05-11 13:36:44 +08002714 /* block scheduler */
2715 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2716 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002717 if (!ring || !ring->sched.thread)
2718 continue;
2719
2720 kthread_park(ring->sched.thread);
Monk Liua90ad3c2017-01-23 14:22:08 +08002721
Monk Liu65781c72017-05-11 13:36:44 +08002722 if (job && j != i)
2723 continue;
2724
Monk Liu4f059ec2017-05-11 13:59:15 +08002725 /* here give the last chance to check if job removed from mirror-list
Monk Liu65781c72017-05-11 13:36:44 +08002726 * since we already pay some time on kthread_park */
Monk Liu4f059ec2017-05-11 13:59:15 +08002727 if (job && list_empty(&job->base.node)) {
Monk Liu65781c72017-05-11 13:36:44 +08002728 kthread_unpark(ring->sched.thread);
2729 goto give_up_reset;
2730 }
2731
2732 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2733 amd_sched_job_kickout(&job->base);
2734
2735 /* only do job_reset on the hang ring if @job not NULL */
2736 amd_sched_hw_job_reset(&ring->sched);
2737
2738 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2739 amdgpu_fence_driver_force_completion_ring(ring);
2740 }
Monk Liua90ad3c2017-01-23 14:22:08 +08002741
2742 /* request to take full control of GPU before re-initialization */
Monk Liu7225f872017-04-26 14:51:54 +08002743 if (job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002744 amdgpu_virt_reset_gpu(adev);
2745 else
2746 amdgpu_virt_request_full_gpu(adev, true);
2747
2748
2749 /* Resume IP prior to SMC */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002750 amdgpu_sriov_reinit_early(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002751
2752 /* we need recover gart prior to run SMC/CP/SDMA resume */
2753 amdgpu_ttm_recover_gart(adev);
2754
2755 /* now we are okay to resume SMC/CP/SDMA */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002756 amdgpu_sriov_reinit_late(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002757
2758 amdgpu_irq_gpu_reset_resume_helper(adev);
2759
2760 if (amdgpu_ib_ring_tests(adev))
2761 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2762
2763 /* release full control of GPU after ib test */
2764 amdgpu_virt_release_full_gpu(adev, true);
2765
2766 DRM_INFO("recover vram bo from shadow\n");
2767
2768 ring = adev->mman.buffer_funcs_ring;
2769 mutex_lock(&adev->shadow_list_lock);
2770 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002771 next = NULL;
Monk Liua90ad3c2017-01-23 14:22:08 +08002772 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2773 if (fence) {
2774 r = dma_fence_wait(fence, false);
2775 if (r) {
2776 WARN(r, "recovery from shadow isn't completed\n");
2777 break;
2778 }
2779 }
2780
2781 dma_fence_put(fence);
2782 fence = next;
2783 }
2784 mutex_unlock(&adev->shadow_list_lock);
2785
2786 if (fence) {
2787 r = dma_fence_wait(fence, false);
2788 if (r)
2789 WARN(r, "recovery from shadow isn't completed\n");
2790 }
2791 dma_fence_put(fence);
2792
Monk Liu65781c72017-05-11 13:36:44 +08002793 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2794 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002795 if (!ring || !ring->sched.thread)
2796 continue;
2797
Monk Liu65781c72017-05-11 13:36:44 +08002798 if (job && j != i) {
2799 kthread_unpark(ring->sched.thread);
2800 continue;
2801 }
2802
Monk Liua90ad3c2017-01-23 14:22:08 +08002803 amd_sched_job_recovery(&ring->sched);
2804 kthread_unpark(ring->sched.thread);
2805 }
2806
2807 drm_helper_resume_force_mode(adev->ddev);
Monk Liu65781c72017-05-11 13:36:44 +08002808give_up_reset:
Monk Liua90ad3c2017-01-23 14:22:08 +08002809 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2810 if (r) {
2811 /* bad news, how to tell it to userspace ? */
2812 dev_info(adev->dev, "GPU reset failed\n");
Monk Liu65781c72017-05-11 13:36:44 +08002813 } else {
2814 dev_info(adev->dev, "GPU reset successed!\n");
Monk Liua90ad3c2017-01-23 14:22:08 +08002815 }
2816
Monk Liu1fb37a32017-01-26 15:36:37 +08002817 adev->gfx.in_reset = false;
Monk Liu147b5982017-01-25 15:48:01 +08002818 mutex_unlock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002819 return r;
2820}
2821
2822/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002823 * amdgpu_gpu_reset - reset the asic
2824 *
2825 * @adev: amdgpu device pointer
2826 *
2827 * Attempt the reset the GPU if it has hung (all asics).
2828 * Returns 0 for success or an error on failure.
2829 */
2830int amdgpu_gpu_reset(struct amdgpu_device *adev)
2831{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002832 int i, r;
2833 int resched;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002834 bool need_full_reset, vram_lost = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002835
Chunming Zhou63fbf422016-07-15 11:19:20 +08002836 if (!amdgpu_check_soft_reset(adev)) {
2837 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2838 return 0;
2839 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002840
Marek Olšákd94aed52015-05-05 21:13:49 +02002841 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002842
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002843 /* block TTM */
2844 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2845
Chunming Zhou0875dc92016-06-12 15:41:58 +08002846 /* block scheduler */
2847 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2848 struct amdgpu_ring *ring = adev->rings[i];
2849
Chunming Zhou51687752017-04-24 17:09:15 +08002850 if (!ring || !ring->sched.thread)
Chunming Zhou0875dc92016-06-12 15:41:58 +08002851 continue;
2852 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002853 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002854 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002855 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2856 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002857
Chunming Zhou35d782f2016-07-15 15:57:13 +08002858 need_full_reset = amdgpu_need_full_reset(adev);
2859
2860 if (!need_full_reset) {
2861 amdgpu_pre_soft_reset(adev);
2862 r = amdgpu_soft_reset(adev);
2863 amdgpu_post_soft_reset(adev);
2864 if (r || amdgpu_check_soft_reset(adev)) {
2865 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2866 need_full_reset = true;
2867 }
2868 }
2869
2870 if (need_full_reset) {
Chunming Zhou35d782f2016-07-15 15:57:13 +08002871 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002872
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002873retry:
Alex Deucherd05da0e2017-06-30 17:08:45 -04002874 amdgpu_atombios_scratch_regs_save(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002875 r = amdgpu_asic_reset(adev);
Alex Deucherd05da0e2017-06-30 17:08:45 -04002876 amdgpu_atombios_scratch_regs_restore(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002877 /* post card */
2878 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002879
Chunming Zhou35d782f2016-07-15 15:57:13 +08002880 if (!r) {
2881 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
Chunming Zhoufcf06492017-05-05 10:33:33 +08002882 r = amdgpu_resume_phase1(adev);
2883 if (r)
2884 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002885 vram_lost = amdgpu_check_vram_lost(adev);
Chunming Zhouf1892132017-05-15 16:48:27 +08002886 if (vram_lost) {
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002887 DRM_ERROR("VRAM is lost!\n");
Chunming Zhouf1892132017-05-15 16:48:27 +08002888 atomic_inc(&adev->vram_lost_counter);
2889 }
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002890 r = amdgpu_ttm_recover_gart(adev);
2891 if (r)
Chunming Zhoufcf06492017-05-05 10:33:33 +08002892 goto out;
2893 r = amdgpu_resume_phase2(adev);
2894 if (r)
2895 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002896 if (vram_lost)
2897 amdgpu_fill_reset_magic(adev);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002898 }
Chunming Zhoufcf06492017-05-05 10:33:33 +08002899 }
2900out:
2901 if (!r) {
2902 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou1f465082016-06-30 15:02:26 +08002903 r = amdgpu_ib_ring_tests(adev);
2904 if (r) {
2905 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002906 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002907 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002908 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002909 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002910 /**
2911 * recovery vm page tables, since we cannot depend on VRAM is
2912 * consistent after gpu full reset.
2913 */
2914 if (need_full_reset && amdgpu_need_backup(adev)) {
2915 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2916 struct amdgpu_bo *bo, *tmp;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002917 struct dma_fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002918
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002919 DRM_INFO("recover vram bo from shadow\n");
2920 mutex_lock(&adev->shadow_list_lock);
2921 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002922 next = NULL;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002923 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2924 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002925 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002926 if (r) {
Monk Liu1d7b17b2017-01-22 18:52:56 +08002927 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002928 break;
2929 }
2930 }
2931
Chris Wilsonf54d1862016-10-25 13:00:45 +01002932 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002933 fence = next;
2934 }
2935 mutex_unlock(&adev->shadow_list_lock);
2936 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002937 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002938 if (r)
Monk Liu1d7b17b2017-01-22 18:52:56 +08002939 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002940 }
Chris Wilsonf54d1862016-10-25 13:00:45 +01002941 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002942 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002943 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2944 struct amdgpu_ring *ring = adev->rings[i];
Chunming Zhou51687752017-04-24 17:09:15 +08002945
2946 if (!ring || !ring->sched.thread)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002947 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002948
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002949 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002950 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002951 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002952 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08002953 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Gavin Wan89041942017-06-23 13:55:15 -04002954 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002955 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou51687752017-04-24 17:09:15 +08002956 if (adev->rings[i] && adev->rings[i]->sched.thread) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08002957 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002958 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002959 }
2960 }
2961
2962 drm_helper_resume_force_mode(adev->ddev);
2963
2964 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
Gavin Wan89041942017-06-23 13:55:15 -04002965 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002966 /* bad news, how to tell it to userspace ? */
2967 dev_info(adev->dev, "GPU reset failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002968 amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
2969 }
2970 else {
Chunming Zhou6643be62017-05-05 10:50:09 +08002971 dev_info(adev->dev, "GPU reset successed!\n");
Gavin Wan89041942017-06-23 13:55:15 -04002972 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002973
Gavin Wan89041942017-06-23 13:55:15 -04002974 amdgpu_vf_error_trans_all(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002975 return r;
2976}
2977
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002978void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2979{
2980 u32 mask;
2981 int ret;
2982
Alex Deuchercd474ba2016-02-04 10:21:23 -05002983 if (amdgpu_pcie_gen_cap)
2984 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2985
2986 if (amdgpu_pcie_lane_cap)
2987 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2988
2989 /* covers APUs as well */
2990 if (pci_is_root_bus(adev->pdev->bus)) {
2991 if (adev->pm.pcie_gen_mask == 0)
2992 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2993 if (adev->pm.pcie_mlw_mask == 0)
2994 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002995 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002996 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05002997
2998 if (adev->pm.pcie_gen_mask == 0) {
2999 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3000 if (!ret) {
3001 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3002 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3003 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3004
3005 if (mask & DRM_PCIE_SPEED_25)
3006 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3007 if (mask & DRM_PCIE_SPEED_50)
3008 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3009 if (mask & DRM_PCIE_SPEED_80)
3010 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3011 } else {
3012 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3013 }
3014 }
3015 if (adev->pm.pcie_mlw_mask == 0) {
3016 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3017 if (!ret) {
3018 switch (mask) {
3019 case 32:
3020 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3021 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3022 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3023 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3024 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3025 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3026 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3027 break;
3028 case 16:
3029 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3030 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3031 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3032 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3033 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3034 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3035 break;
3036 case 12:
3037 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3038 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3039 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3040 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3041 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3042 break;
3043 case 8:
3044 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3045 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3046 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3047 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3048 break;
3049 case 4:
3050 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3051 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3052 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3053 break;
3054 case 2:
3055 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3056 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3057 break;
3058 case 1:
3059 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3060 break;
3061 default:
3062 break;
3063 }
3064 } else {
3065 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003066 }
3067 }
3068}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003069
3070/*
3071 * Debugfs
3072 */
3073int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04003074 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003075 unsigned nfiles)
3076{
3077 unsigned i;
3078
3079 for (i = 0; i < adev->debugfs_count; i++) {
3080 if (adev->debugfs[i].files == files) {
3081 /* Already registered */
3082 return 0;
3083 }
3084 }
3085
3086 i = adev->debugfs_count + 1;
3087 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3088 DRM_ERROR("Reached maximum number of debugfs components.\n");
3089 DRM_ERROR("Report so we increase "
3090 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3091 return -EINVAL;
3092 }
3093 adev->debugfs[adev->debugfs_count].files = files;
3094 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3095 adev->debugfs_count = i;
3096#if defined(CONFIG_DEBUG_FS)
3097 drm_debugfs_create_files(files, nfiles,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003098 adev->ddev->primary->debugfs_root,
3099 adev->ddev->primary);
3100#endif
3101 return 0;
3102}
3103
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003104#if defined(CONFIG_DEBUG_FS)
3105
3106static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3107 size_t size, loff_t *pos)
3108{
Al Viro45063092016-12-04 18:24:56 -05003109 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003110 ssize_t result = 0;
3111 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04003112 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04003113 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003114
3115 if (size & 0x3 || *pos & 0x3)
3116 return -EINVAL;
3117
Tom St Denisbd122672016-07-28 09:39:22 -04003118 /* are we reading registers for which a PG lock is necessary? */
3119 pm_pg_lock = (*pos >> 23) & 1;
3120
Tom St Denis566281592016-06-27 11:55:07 -04003121 if (*pos & (1ULL << 62)) {
3122 se_bank = (*pos >> 24) & 0x3FF;
3123 sh_bank = (*pos >> 34) & 0x3FF;
3124 instance_bank = (*pos >> 44) & 0x3FF;
Tom St Denis32977f92016-10-09 07:41:26 -04003125
3126 if (se_bank == 0x3FF)
3127 se_bank = 0xFFFFFFFF;
3128 if (sh_bank == 0x3FF)
3129 sh_bank = 0xFFFFFFFF;
3130 if (instance_bank == 0x3FF)
3131 instance_bank = 0xFFFFFFFF;
Tom St Denis566281592016-06-27 11:55:07 -04003132 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04003133 } else {
3134 use_bank = 0;
3135 }
3136
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003137 *pos &= (1UL << 22) - 1;
Tom St Denisbd122672016-07-28 09:39:22 -04003138
Tom St Denis566281592016-06-27 11:55:07 -04003139 if (use_bank) {
Tom St Denis32977f92016-10-09 07:41:26 -04003140 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3141 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
Tom St Denis566281592016-06-27 11:55:07 -04003142 return -EINVAL;
3143 mutex_lock(&adev->grbm_idx_mutex);
3144 amdgpu_gfx_select_se_sh(adev, se_bank,
3145 sh_bank, instance_bank);
3146 }
3147
Tom St Denisbd122672016-07-28 09:39:22 -04003148 if (pm_pg_lock)
3149 mutex_lock(&adev->pm.mutex);
3150
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003151 while (size) {
3152 uint32_t value;
3153
3154 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04003155 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003156
3157 value = RREG32(*pos >> 2);
3158 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04003159 if (r) {
3160 result = r;
3161 goto end;
3162 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003163
3164 result += 4;
3165 buf += 4;
3166 *pos += 4;
3167 size -= 4;
3168 }
3169
Tom St Denis566281592016-06-27 11:55:07 -04003170end:
3171 if (use_bank) {
3172 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3173 mutex_unlock(&adev->grbm_idx_mutex);
3174 }
3175
Tom St Denisbd122672016-07-28 09:39:22 -04003176 if (pm_pg_lock)
3177 mutex_unlock(&adev->pm.mutex);
3178
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003179 return result;
3180}
3181
3182static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3183 size_t size, loff_t *pos)
3184{
Al Viro45063092016-12-04 18:24:56 -05003185 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003186 ssize_t result = 0;
3187 int r;
Tom St Denis394fdde2016-10-10 07:31:23 -04003188 bool pm_pg_lock, use_bank;
3189 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003190
3191 if (size & 0x3 || *pos & 0x3)
3192 return -EINVAL;
3193
Tom St Denis394fdde2016-10-10 07:31:23 -04003194 /* are we reading registers for which a PG lock is necessary? */
3195 pm_pg_lock = (*pos >> 23) & 1;
3196
3197 if (*pos & (1ULL << 62)) {
3198 se_bank = (*pos >> 24) & 0x3FF;
3199 sh_bank = (*pos >> 34) & 0x3FF;
3200 instance_bank = (*pos >> 44) & 0x3FF;
3201
3202 if (se_bank == 0x3FF)
3203 se_bank = 0xFFFFFFFF;
3204 if (sh_bank == 0x3FF)
3205 sh_bank = 0xFFFFFFFF;
3206 if (instance_bank == 0x3FF)
3207 instance_bank = 0xFFFFFFFF;
3208 use_bank = 1;
3209 } else {
3210 use_bank = 0;
3211 }
3212
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003213 *pos &= (1UL << 22) - 1;
Tom St Denis394fdde2016-10-10 07:31:23 -04003214
3215 if (use_bank) {
3216 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3217 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3218 return -EINVAL;
3219 mutex_lock(&adev->grbm_idx_mutex);
3220 amdgpu_gfx_select_se_sh(adev, se_bank,
3221 sh_bank, instance_bank);
3222 }
3223
3224 if (pm_pg_lock)
3225 mutex_lock(&adev->pm.mutex);
3226
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003227 while (size) {
3228 uint32_t value;
3229
3230 if (*pos > adev->rmmio_size)
3231 return result;
3232
3233 r = get_user(value, (uint32_t *)buf);
3234 if (r)
3235 return r;
3236
3237 WREG32(*pos >> 2, value);
3238
3239 result += 4;
3240 buf += 4;
3241 *pos += 4;
3242 size -= 4;
3243 }
3244
Tom St Denis394fdde2016-10-10 07:31:23 -04003245 if (use_bank) {
3246 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3247 mutex_unlock(&adev->grbm_idx_mutex);
3248 }
3249
3250 if (pm_pg_lock)
3251 mutex_unlock(&adev->pm.mutex);
3252
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003253 return result;
3254}
3255
Tom St Denisadcec282016-04-15 13:08:44 -04003256static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3257 size_t size, loff_t *pos)
3258{
Al Viro45063092016-12-04 18:24:56 -05003259 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003260 ssize_t result = 0;
3261 int r;
3262
3263 if (size & 0x3 || *pos & 0x3)
3264 return -EINVAL;
3265
3266 while (size) {
3267 uint32_t value;
3268
3269 value = RREG32_PCIE(*pos >> 2);
3270 r = put_user(value, (uint32_t *)buf);
3271 if (r)
3272 return r;
3273
3274 result += 4;
3275 buf += 4;
3276 *pos += 4;
3277 size -= 4;
3278 }
3279
3280 return result;
3281}
3282
3283static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3284 size_t size, loff_t *pos)
3285{
Al Viro45063092016-12-04 18:24:56 -05003286 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003287 ssize_t result = 0;
3288 int r;
3289
3290 if (size & 0x3 || *pos & 0x3)
3291 return -EINVAL;
3292
3293 while (size) {
3294 uint32_t value;
3295
3296 r = get_user(value, (uint32_t *)buf);
3297 if (r)
3298 return r;
3299
3300 WREG32_PCIE(*pos >> 2, value);
3301
3302 result += 4;
3303 buf += 4;
3304 *pos += 4;
3305 size -= 4;
3306 }
3307
3308 return result;
3309}
3310
3311static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3312 size_t size, loff_t *pos)
3313{
Al Viro45063092016-12-04 18:24:56 -05003314 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003315 ssize_t result = 0;
3316 int r;
3317
3318 if (size & 0x3 || *pos & 0x3)
3319 return -EINVAL;
3320
3321 while (size) {
3322 uint32_t value;
3323
3324 value = RREG32_DIDT(*pos >> 2);
3325 r = put_user(value, (uint32_t *)buf);
3326 if (r)
3327 return r;
3328
3329 result += 4;
3330 buf += 4;
3331 *pos += 4;
3332 size -= 4;
3333 }
3334
3335 return result;
3336}
3337
3338static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3339 size_t size, loff_t *pos)
3340{
Al Viro45063092016-12-04 18:24:56 -05003341 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003342 ssize_t result = 0;
3343 int r;
3344
3345 if (size & 0x3 || *pos & 0x3)
3346 return -EINVAL;
3347
3348 while (size) {
3349 uint32_t value;
3350
3351 r = get_user(value, (uint32_t *)buf);
3352 if (r)
3353 return r;
3354
3355 WREG32_DIDT(*pos >> 2, value);
3356
3357 result += 4;
3358 buf += 4;
3359 *pos += 4;
3360 size -= 4;
3361 }
3362
3363 return result;
3364}
3365
3366static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3367 size_t size, loff_t *pos)
3368{
Al Viro45063092016-12-04 18:24:56 -05003369 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003370 ssize_t result = 0;
3371 int r;
3372
3373 if (size & 0x3 || *pos & 0x3)
3374 return -EINVAL;
3375
3376 while (size) {
3377 uint32_t value;
3378
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003379 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04003380 r = put_user(value, (uint32_t *)buf);
3381 if (r)
3382 return r;
3383
3384 result += 4;
3385 buf += 4;
3386 *pos += 4;
3387 size -= 4;
3388 }
3389
3390 return result;
3391}
3392
3393static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3394 size_t size, loff_t *pos)
3395{
Al Viro45063092016-12-04 18:24:56 -05003396 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003397 ssize_t result = 0;
3398 int r;
3399
3400 if (size & 0x3 || *pos & 0x3)
3401 return -EINVAL;
3402
3403 while (size) {
3404 uint32_t value;
3405
3406 r = get_user(value, (uint32_t *)buf);
3407 if (r)
3408 return r;
3409
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003410 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04003411
3412 result += 4;
3413 buf += 4;
3414 *pos += 4;
3415 size -= 4;
3416 }
3417
3418 return result;
3419}
3420
Tom St Denis1e051412016-06-27 09:57:18 -04003421static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3422 size_t size, loff_t *pos)
3423{
Al Viro45063092016-12-04 18:24:56 -05003424 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis1e051412016-06-27 09:57:18 -04003425 ssize_t result = 0;
3426 int r;
3427 uint32_t *config, no_regs = 0;
3428
3429 if (size & 0x3 || *pos & 0x3)
3430 return -EINVAL;
3431
Markus Elfringecab7662016-09-18 17:00:52 +02003432 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
Tom St Denis1e051412016-06-27 09:57:18 -04003433 if (!config)
3434 return -ENOMEM;
3435
3436 /* version, increment each time something is added */
Tom St Denis9a999352017-01-18 13:01:25 -05003437 config[no_regs++] = 3;
Tom St Denis1e051412016-06-27 09:57:18 -04003438 config[no_regs++] = adev->gfx.config.max_shader_engines;
3439 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3440 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3441 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3442 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3443 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3444 config[no_regs++] = adev->gfx.config.max_gprs;
3445 config[no_regs++] = adev->gfx.config.max_gs_threads;
3446 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3447 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3448 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3449 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3450 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3451 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3452 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3453 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3454 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3455 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3456 config[no_regs++] = adev->gfx.config.num_gpus;
3457 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3458 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3459 config[no_regs++] = adev->gfx.config.gb_addr_config;
3460 config[no_regs++] = adev->gfx.config.num_rbs;
3461
Tom St Denis89a8f302016-08-12 15:14:31 -04003462 /* rev==1 */
3463 config[no_regs++] = adev->rev_id;
3464 config[no_regs++] = adev->pg_flags;
3465 config[no_regs++] = adev->cg_flags;
3466
Tom St Denise9f11dc2016-08-17 12:00:51 -04003467 /* rev==2 */
3468 config[no_regs++] = adev->family;
3469 config[no_regs++] = adev->external_rev_id;
3470
Tom St Denis9a999352017-01-18 13:01:25 -05003471 /* rev==3 */
3472 config[no_regs++] = adev->pdev->device;
3473 config[no_regs++] = adev->pdev->revision;
3474 config[no_regs++] = adev->pdev->subsystem_device;
3475 config[no_regs++] = adev->pdev->subsystem_vendor;
3476
Tom St Denis1e051412016-06-27 09:57:18 -04003477 while (size && (*pos < no_regs * 4)) {
3478 uint32_t value;
3479
3480 value = config[*pos >> 2];
3481 r = put_user(value, (uint32_t *)buf);
3482 if (r) {
3483 kfree(config);
3484 return r;
3485 }
3486
3487 result += 4;
3488 buf += 4;
3489 *pos += 4;
3490 size -= 4;
3491 }
3492
3493 kfree(config);
3494 return result;
3495}
3496
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003497static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3498 size_t size, loff_t *pos)
3499{
Al Viro45063092016-12-04 18:24:56 -05003500 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003501 int idx, x, outsize, r, valuesize;
3502 uint32_t values[16];
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003503
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003504 if (size & 3 || *pos & 0x3)
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003505 return -EINVAL;
3506
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003507 if (amdgpu_dpm == 0)
3508 return -EINVAL;
3509
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003510 /* convert offset to sensor number */
3511 idx = *pos >> 2;
3512
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003513 valuesize = sizeof(values);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003514 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003515 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003516 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3517 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3518 &valuesize);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003519 else
3520 return -EINVAL;
3521
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003522 if (size > valuesize)
3523 return -EINVAL;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003524
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003525 outsize = 0;
3526 x = 0;
3527 if (!r) {
3528 while (size) {
3529 r = put_user(values[x++], (int32_t *)buf);
3530 buf += 4;
3531 size -= 4;
3532 outsize += 4;
3533 }
3534 }
3535
3536 return !r ? outsize : r;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003537}
Tom St Denis1e051412016-06-27 09:57:18 -04003538
Tom St Denis273d7aa2016-10-11 14:48:55 -04003539static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3540 size_t size, loff_t *pos)
3541{
3542 struct amdgpu_device *adev = f->f_inode->i_private;
3543 int r, x;
3544 ssize_t result=0;
Tom St Denis472259f2016-10-14 09:49:09 -04003545 uint32_t offset, se, sh, cu, wave, simd, data[32];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003546
3547 if (size & 3 || *pos & 3)
3548 return -EINVAL;
3549
3550 /* decode offset */
3551 offset = (*pos & 0x7F);
3552 se = ((*pos >> 7) & 0xFF);
3553 sh = ((*pos >> 15) & 0xFF);
3554 cu = ((*pos >> 23) & 0xFF);
3555 wave = ((*pos >> 31) & 0xFF);
3556 simd = ((*pos >> 37) & 0xFF);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003557
3558 /* switch to the specific se/sh/cu */
3559 mutex_lock(&adev->grbm_idx_mutex);
3560 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3561
3562 x = 0;
Tom St Denis472259f2016-10-14 09:49:09 -04003563 if (adev->gfx.funcs->read_wave_data)
3564 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003565
3566 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3567 mutex_unlock(&adev->grbm_idx_mutex);
3568
Tom St Denis5ecfb3b2016-10-13 12:15:03 -04003569 if (!x)
3570 return -EINVAL;
3571
Tom St Denis472259f2016-10-14 09:49:09 -04003572 while (size && (offset < x * 4)) {
Tom St Denis273d7aa2016-10-11 14:48:55 -04003573 uint32_t value;
3574
Tom St Denis472259f2016-10-14 09:49:09 -04003575 value = data[offset >> 2];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003576 r = put_user(value, (uint32_t *)buf);
3577 if (r)
3578 return r;
3579
3580 result += 4;
3581 buf += 4;
Tom St Denis472259f2016-10-14 09:49:09 -04003582 offset += 4;
Tom St Denis273d7aa2016-10-11 14:48:55 -04003583 size -= 4;
3584 }
3585
3586 return result;
3587}
3588
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003589static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3590 size_t size, loff_t *pos)
3591{
3592 struct amdgpu_device *adev = f->f_inode->i_private;
3593 int r;
3594 ssize_t result = 0;
3595 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3596
3597 if (size & 3 || *pos & 3)
3598 return -EINVAL;
3599
3600 /* decode offset */
3601 offset = (*pos & 0xFFF); /* in dwords */
3602 se = ((*pos >> 12) & 0xFF);
3603 sh = ((*pos >> 20) & 0xFF);
3604 cu = ((*pos >> 28) & 0xFF);
3605 wave = ((*pos >> 36) & 0xFF);
3606 simd = ((*pos >> 44) & 0xFF);
3607 thread = ((*pos >> 52) & 0xFF);
3608 bank = ((*pos >> 60) & 1);
3609
3610 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3611 if (!data)
3612 return -ENOMEM;
3613
3614 /* switch to the specific se/sh/cu */
3615 mutex_lock(&adev->grbm_idx_mutex);
3616 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3617
3618 if (bank == 0) {
3619 if (adev->gfx.funcs->read_wave_vgprs)
3620 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3621 } else {
3622 if (adev->gfx.funcs->read_wave_sgprs)
3623 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3624 }
3625
3626 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3627 mutex_unlock(&adev->grbm_idx_mutex);
3628
3629 while (size) {
3630 uint32_t value;
3631
3632 value = data[offset++];
3633 r = put_user(value, (uint32_t *)buf);
3634 if (r) {
3635 result = r;
3636 goto err;
3637 }
3638
3639 result += 4;
3640 buf += 4;
3641 size -= 4;
3642 }
3643
3644err:
3645 kfree(data);
3646 return result;
3647}
3648
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003649static const struct file_operations amdgpu_debugfs_regs_fops = {
3650 .owner = THIS_MODULE,
3651 .read = amdgpu_debugfs_regs_read,
3652 .write = amdgpu_debugfs_regs_write,
3653 .llseek = default_llseek
3654};
Tom St Denisadcec282016-04-15 13:08:44 -04003655static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3656 .owner = THIS_MODULE,
3657 .read = amdgpu_debugfs_regs_didt_read,
3658 .write = amdgpu_debugfs_regs_didt_write,
3659 .llseek = default_llseek
3660};
3661static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3662 .owner = THIS_MODULE,
3663 .read = amdgpu_debugfs_regs_pcie_read,
3664 .write = amdgpu_debugfs_regs_pcie_write,
3665 .llseek = default_llseek
3666};
3667static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3668 .owner = THIS_MODULE,
3669 .read = amdgpu_debugfs_regs_smc_read,
3670 .write = amdgpu_debugfs_regs_smc_write,
3671 .llseek = default_llseek
3672};
3673
Tom St Denis1e051412016-06-27 09:57:18 -04003674static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3675 .owner = THIS_MODULE,
3676 .read = amdgpu_debugfs_gca_config_read,
3677 .llseek = default_llseek
3678};
3679
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003680static const struct file_operations amdgpu_debugfs_sensors_fops = {
3681 .owner = THIS_MODULE,
3682 .read = amdgpu_debugfs_sensor_read,
3683 .llseek = default_llseek
3684};
3685
Tom St Denis273d7aa2016-10-11 14:48:55 -04003686static const struct file_operations amdgpu_debugfs_wave_fops = {
3687 .owner = THIS_MODULE,
3688 .read = amdgpu_debugfs_wave_read,
3689 .llseek = default_llseek
3690};
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003691static const struct file_operations amdgpu_debugfs_gpr_fops = {
3692 .owner = THIS_MODULE,
3693 .read = amdgpu_debugfs_gpr_read,
3694 .llseek = default_llseek
3695};
Tom St Denis273d7aa2016-10-11 14:48:55 -04003696
Tom St Denisadcec282016-04-15 13:08:44 -04003697static const struct file_operations *debugfs_regs[] = {
3698 &amdgpu_debugfs_regs_fops,
3699 &amdgpu_debugfs_regs_didt_fops,
3700 &amdgpu_debugfs_regs_pcie_fops,
3701 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04003702 &amdgpu_debugfs_gca_config_fops,
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003703 &amdgpu_debugfs_sensors_fops,
Tom St Denis273d7aa2016-10-11 14:48:55 -04003704 &amdgpu_debugfs_wave_fops,
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003705 &amdgpu_debugfs_gpr_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04003706};
3707
3708static const char *debugfs_regs_names[] = {
3709 "amdgpu_regs",
3710 "amdgpu_regs_didt",
3711 "amdgpu_regs_pcie",
3712 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04003713 "amdgpu_gca_config",
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003714 "amdgpu_sensors",
Tom St Denis273d7aa2016-10-11 14:48:55 -04003715 "amdgpu_wave",
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003716 "amdgpu_gpr",
Tom St Denisadcec282016-04-15 13:08:44 -04003717};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003718
3719static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3720{
3721 struct drm_minor *minor = adev->ddev->primary;
3722 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04003723 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003724
Tom St Denisadcec282016-04-15 13:08:44 -04003725 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3726 ent = debugfs_create_file(debugfs_regs_names[i],
3727 S_IFREG | S_IRUGO, root,
3728 adev, debugfs_regs[i]);
3729 if (IS_ERR(ent)) {
3730 for (j = 0; j < i; j++) {
3731 debugfs_remove(adev->debugfs_regs[i]);
3732 adev->debugfs_regs[i] = NULL;
3733 }
3734 return PTR_ERR(ent);
3735 }
3736
3737 if (!i)
3738 i_size_write(ent->d_inode, adev->rmmio_size);
3739 adev->debugfs_regs[i] = ent;
3740 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003741
3742 return 0;
3743}
3744
3745static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3746{
Tom St Denisadcec282016-04-15 13:08:44 -04003747 unsigned i;
3748
3749 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3750 if (adev->debugfs_regs[i]) {
3751 debugfs_remove(adev->debugfs_regs[i]);
3752 adev->debugfs_regs[i] = NULL;
3753 }
3754 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003755}
3756
Huang Rui4f0955f2017-05-10 23:04:06 +08003757static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3758{
3759 struct drm_info_node *node = (struct drm_info_node *) m->private;
3760 struct drm_device *dev = node->minor->dev;
3761 struct amdgpu_device *adev = dev->dev_private;
3762 int r = 0, i;
3763
3764 /* hold on the scheduler */
3765 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3766 struct amdgpu_ring *ring = adev->rings[i];
3767
3768 if (!ring || !ring->sched.thread)
3769 continue;
3770 kthread_park(ring->sched.thread);
3771 }
3772
3773 seq_printf(m, "run ib test:\n");
3774 r = amdgpu_ib_ring_tests(adev);
3775 if (r)
3776 seq_printf(m, "ib ring tests failed (%d).\n", r);
3777 else
3778 seq_printf(m, "ib ring tests passed.\n");
3779
3780 /* go on the scheduler */
3781 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3782 struct amdgpu_ring *ring = adev->rings[i];
3783
3784 if (!ring || !ring->sched.thread)
3785 continue;
3786 kthread_unpark(ring->sched.thread);
3787 }
3788
3789 return 0;
3790}
3791
3792static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3793 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3794};
3795
3796static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3797{
3798 return amdgpu_debugfs_add_files(adev,
3799 amdgpu_debugfs_test_ib_ring_list, 1);
3800}
3801
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003802int amdgpu_debugfs_init(struct drm_minor *minor)
3803{
3804 return 0;
3805}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003806#else
Arnd Bergmann27bad5b2017-06-21 23:51:02 +02003807static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
Huang Rui4f0955f2017-05-10 23:04:06 +08003808{
3809 return 0;
3810}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003811static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3812{
3813 return 0;
3814}
3815static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003816#endif