blob: 49dd8e0ddd17dce1d5d8a8e68736b59970eea716 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deuchera5bde2f2016-09-23 16:23:41 -040043#include "amdgpu_atomfirmware.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050044#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080045#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040048#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040051#include "vi.h"
Ken Wang460826e2017-03-06 14:53:16 -050052#include "soc15.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080054#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080055#include <linux/firmware.h>
Gavin Wan89041942017-06-23 13:55:15 -040056#include "amdgpu_vf_error.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040057
Alex Deuchere2a75f82017-04-27 16:58:01 -040058MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
Alex Deucher2d2e5e72017-05-09 12:27:35 -040059MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
Alex Deuchere2a75f82017-04-27 16:58:01 -040060
Shirish S2dc80b02017-05-25 10:05:25 +053061#define AMDGPU_RESUME_MS 2000
62
Alex Deucherd38ceaf2015-04-20 16:55:21 -040063static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
64static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
Huang Rui4f0955f2017-05-10 23:04:06 +080065static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040066
67static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080068 "TAHITI",
69 "PITCAIRN",
70 "VERDE",
71 "OLAND",
72 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040073 "BONAIRE",
74 "KAVERI",
75 "KABINI",
76 "HAWAII",
77 "MULLINS",
78 "TOPAZ",
79 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080080 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040081 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040082 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040083 "POLARIS10",
84 "POLARIS11",
Junwei Zhangc4642a42016-12-14 15:32:28 -050085 "POLARIS12",
Ken Wangd4196f02016-03-09 09:28:32 +080086 "VEGA10",
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +080087 "RAVEN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040088 "LAST",
89};
90
91bool amdgpu_device_is_px(struct drm_device *dev)
92{
93 struct amdgpu_device *adev = dev->dev_private;
94
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080095 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040096 return true;
97 return false;
98}
99
100/*
101 * MMIO register access helper functions.
102 */
103uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
Monk Liu15d72fd2017-01-25 15:07:40 +0800104 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400105{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400106 uint32_t ret;
107
Monk Liu15d72fd2017-01-25 15:07:40 +0800108 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800109 BUG_ON(in_interrupt());
110 return amdgpu_virt_kiq_rreg(adev, reg);
111 }
112
Monk Liu15d72fd2017-01-25 15:07:40 +0800113 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Tom St Denisf4b373f2016-05-31 08:02:27 -0400114 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400115 else {
116 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400117
118 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
119 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
120 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
121 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400122 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400123 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
124 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400125}
126
127void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
Monk Liu15d72fd2017-01-25 15:07:40 +0800128 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400129{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400130 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800131
Ken Wang47ed4e12017-07-04 13:11:52 +0800132 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
133 adev->last_mm_index = v;
134 }
135
Monk Liu15d72fd2017-01-25 15:07:40 +0800136 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800137 BUG_ON(in_interrupt());
138 return amdgpu_virt_kiq_wreg(adev, reg, v);
139 }
140
Monk Liu15d72fd2017-01-25 15:07:40 +0800141 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400142 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
143 else {
144 unsigned long flags;
145
146 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
147 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
148 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
149 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
150 }
Ken Wang47ed4e12017-07-04 13:11:52 +0800151
152 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
153 udelay(500);
154 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400155}
156
157u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
158{
159 if ((reg * 4) < adev->rio_mem_size)
160 return ioread32(adev->rio_mem + (reg * 4));
161 else {
162 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
163 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
164 }
165}
166
167void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
168{
Ken Wang47ed4e12017-07-04 13:11:52 +0800169 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
170 adev->last_mm_index = v;
171 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400172
173 if ((reg * 4) < adev->rio_mem_size)
174 iowrite32(v, adev->rio_mem + (reg * 4));
175 else {
176 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
177 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
178 }
Ken Wang47ed4e12017-07-04 13:11:52 +0800179
180 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
181 udelay(500);
182 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400183}
184
185/**
186 * amdgpu_mm_rdoorbell - read a doorbell dword
187 *
188 * @adev: amdgpu_device pointer
189 * @index: doorbell index
190 *
191 * Returns the value in the doorbell aperture at the
192 * requested doorbell index (CIK).
193 */
194u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
195{
196 if (index < adev->doorbell.num_doorbells) {
197 return readl(adev->doorbell.ptr + index);
198 } else {
199 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
200 return 0;
201 }
202}
203
204/**
205 * amdgpu_mm_wdoorbell - write a doorbell dword
206 *
207 * @adev: amdgpu_device pointer
208 * @index: doorbell index
209 * @v: value to write
210 *
211 * Writes @v to the doorbell aperture at the
212 * requested doorbell index (CIK).
213 */
214void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
215{
216 if (index < adev->doorbell.num_doorbells) {
217 writel(v, adev->doorbell.ptr + index);
218 } else {
219 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
220 }
221}
222
223/**
Ken Wang832be402016-03-18 15:23:08 +0800224 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
225 *
226 * @adev: amdgpu_device pointer
227 * @index: doorbell index
228 *
229 * Returns the value in the doorbell aperture at the
230 * requested doorbell index (VEGA10+).
231 */
232u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
233{
234 if (index < adev->doorbell.num_doorbells) {
235 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
236 } else {
237 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
238 return 0;
239 }
240}
241
242/**
243 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
244 *
245 * @adev: amdgpu_device pointer
246 * @index: doorbell index
247 * @v: value to write
248 *
249 * Writes @v to the doorbell aperture at the
250 * requested doorbell index (VEGA10+).
251 */
252void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
253{
254 if (index < adev->doorbell.num_doorbells) {
255 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
256 } else {
257 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
258 }
259}
260
261/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400262 * amdgpu_invalid_rreg - dummy reg read function
263 *
264 * @adev: amdgpu device pointer
265 * @reg: offset of register
266 *
267 * Dummy register read function. Used for register blocks
268 * that certain asics don't have (all asics).
269 * Returns the value in the register.
270 */
271static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
272{
273 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
274 BUG();
275 return 0;
276}
277
278/**
279 * amdgpu_invalid_wreg - dummy reg write function
280 *
281 * @adev: amdgpu device pointer
282 * @reg: offset of register
283 * @v: value to write to the register
284 *
285 * Dummy register read function. Used for register blocks
286 * that certain asics don't have (all asics).
287 */
288static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
289{
290 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
291 reg, v);
292 BUG();
293}
294
295/**
296 * amdgpu_block_invalid_rreg - dummy reg read function
297 *
298 * @adev: amdgpu device pointer
299 * @block: offset of instance
300 * @reg: offset of register
301 *
302 * Dummy register read function. Used for register blocks
303 * that certain asics don't have (all asics).
304 * Returns the value in the register.
305 */
306static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
307 uint32_t block, uint32_t reg)
308{
309 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
310 reg, block);
311 BUG();
312 return 0;
313}
314
315/**
316 * amdgpu_block_invalid_wreg - dummy reg write function
317 *
318 * @adev: amdgpu device pointer
319 * @block: offset of instance
320 * @reg: offset of register
321 * @v: value to write to the register
322 *
323 * Dummy register read function. Used for register blocks
324 * that certain asics don't have (all asics).
325 */
326static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
327 uint32_t block,
328 uint32_t reg, uint32_t v)
329{
330 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
331 reg, block, v);
332 BUG();
333}
334
335static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
336{
337 int r;
338
339 if (adev->vram_scratch.robj == NULL) {
340 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
Alex Deucher857d9132015-08-27 00:14:16 -0400341 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
Christian König03f48dd2016-08-15 17:00:22 +0200342 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
343 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
Christian König72d76682015-09-03 17:34:59 +0200344 NULL, NULL, &adev->vram_scratch.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400345 if (r) {
346 return r;
347 }
348 }
349
350 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
351 if (unlikely(r != 0))
352 return r;
353 r = amdgpu_bo_pin(adev->vram_scratch.robj,
354 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
355 if (r) {
356 amdgpu_bo_unreserve(adev->vram_scratch.robj);
357 return r;
358 }
359 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
360 (void **)&adev->vram_scratch.ptr);
361 if (r)
362 amdgpu_bo_unpin(adev->vram_scratch.robj);
363 amdgpu_bo_unreserve(adev->vram_scratch.robj);
364
365 return r;
366}
367
368static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
369{
370 int r;
371
372 if (adev->vram_scratch.robj == NULL) {
373 return;
374 }
Alex Xie8ab25b42017-04-24 13:30:43 -0400375 r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400376 if (likely(r == 0)) {
377 amdgpu_bo_kunmap(adev->vram_scratch.robj);
378 amdgpu_bo_unpin(adev->vram_scratch.robj);
379 amdgpu_bo_unreserve(adev->vram_scratch.robj);
380 }
381 amdgpu_bo_unref(&adev->vram_scratch.robj);
382}
383
384/**
385 * amdgpu_program_register_sequence - program an array of registers.
386 *
387 * @adev: amdgpu_device pointer
388 * @registers: pointer to the register array
389 * @array_size: size of the register array
390 *
391 * Programs an array or registers with and and or masks.
392 * This is a helper for setting golden registers.
393 */
394void amdgpu_program_register_sequence(struct amdgpu_device *adev,
395 const u32 *registers,
396 const u32 array_size)
397{
398 u32 tmp, reg, and_mask, or_mask;
399 int i;
400
401 if (array_size % 3)
402 return;
403
404 for (i = 0; i < array_size; i +=3) {
405 reg = registers[i + 0];
406 and_mask = registers[i + 1];
407 or_mask = registers[i + 2];
408
409 if (and_mask == 0xffffffff) {
410 tmp = or_mask;
411 } else {
412 tmp = RREG32(reg);
413 tmp &= ~and_mask;
414 tmp |= or_mask;
415 }
416 WREG32(reg, tmp);
417 }
418}
419
420void amdgpu_pci_config_reset(struct amdgpu_device *adev)
421{
422 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
423}
424
425/*
426 * GPU doorbell aperture helpers function.
427 */
428/**
429 * amdgpu_doorbell_init - Init doorbell driver information.
430 *
431 * @adev: amdgpu_device pointer
432 *
433 * Init doorbell driver information (CIK)
434 * Returns 0 on success, error on failure.
435 */
436static int amdgpu_doorbell_init(struct amdgpu_device *adev)
437{
438 /* doorbell bar mapping */
439 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
440 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
441
Christian Königedf600d2016-05-03 15:54:54 +0200442 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400443 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
444 if (adev->doorbell.num_doorbells == 0)
445 return -EINVAL;
446
Christian König8972e5d2017-03-06 13:34:57 +0100447 adev->doorbell.ptr = ioremap(adev->doorbell.base,
448 adev->doorbell.num_doorbells *
449 sizeof(u32));
450 if (adev->doorbell.ptr == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400451 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400452
453 return 0;
454}
455
456/**
457 * amdgpu_doorbell_fini - Tear down doorbell driver information.
458 *
459 * @adev: amdgpu_device pointer
460 *
461 * Tear down doorbell driver information (CIK)
462 */
463static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
464{
465 iounmap(adev->doorbell.ptr);
466 adev->doorbell.ptr = NULL;
467}
468
469/**
470 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
471 * setup amdkfd
472 *
473 * @adev: amdgpu_device pointer
474 * @aperture_base: output returning doorbell aperture base physical address
475 * @aperture_size: output returning doorbell aperture size in bytes
476 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
477 *
478 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
479 * takes doorbells required for its own rings and reports the setup to amdkfd.
480 * amdgpu reserved doorbells are at the start of the doorbell aperture.
481 */
482void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
483 phys_addr_t *aperture_base,
484 size_t *aperture_size,
485 size_t *start_offset)
486{
487 /*
488 * The first num_doorbells are used by amdgpu.
489 * amdkfd takes whatever's left in the aperture.
490 */
491 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
492 *aperture_base = adev->doorbell.base;
493 *aperture_size = adev->doorbell.size;
494 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
495 } else {
496 *aperture_base = 0;
497 *aperture_size = 0;
498 *start_offset = 0;
499 }
500}
501
502/*
503 * amdgpu_wb_*()
Alex Xie455a7bc2017-05-08 21:36:03 -0400504 * Writeback is the method by which the GPU updates special pages in memory
Alex Xieea81a172017-05-08 13:41:11 -0400505 * with the status of certain GPU events (fences, ring pointers,etc.).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400506 */
507
508/**
509 * amdgpu_wb_fini - Disable Writeback and free memory
510 *
511 * @adev: amdgpu_device pointer
512 *
513 * Disables Writeback and frees the Writeback memory (all asics).
514 * Used at driver shutdown.
515 */
516static void amdgpu_wb_fini(struct amdgpu_device *adev)
517{
518 if (adev->wb.wb_obj) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400519 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
520 &adev->wb.gpu_addr,
521 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400522 adev->wb.wb_obj = NULL;
523 }
524}
525
526/**
527 * amdgpu_wb_init- Init Writeback driver info and allocate memory
528 *
529 * @adev: amdgpu_device pointer
530 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400531 * Initializes writeback and allocates writeback memory (all asics).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400532 * Used at driver startup.
533 * Returns 0 on success or an -error on failure.
534 */
535static int amdgpu_wb_init(struct amdgpu_device *adev)
536{
537 int r;
538
539 if (adev->wb.wb_obj == NULL) {
Huang Rui60a970a62017-03-15 10:13:32 +0800540 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
Alex Deuchera76ed482016-10-21 15:30:36 -0400541 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
542 &adev->wb.wb_obj, &adev->wb.gpu_addr,
543 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400544 if (r) {
545 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
546 return r;
547 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400548
549 adev->wb.num_wb = AMDGPU_MAX_WB;
550 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
551
552 /* clear wb memory */
Huang Rui60a970a62017-03-15 10:13:32 +0800553 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400554 }
555
556 return 0;
557}
558
559/**
560 * amdgpu_wb_get - Allocate a wb entry
561 *
562 * @adev: amdgpu_device pointer
563 * @wb: wb index
564 *
565 * Allocate a wb slot for use by the driver (all asics).
566 * Returns 0 on success or -EINVAL on failure.
567 */
568int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
569{
570 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
571 if (offset < adev->wb.num_wb) {
572 __set_bit(offset, adev->wb.used);
573 *wb = offset;
574 return 0;
575 } else {
576 return -EINVAL;
577 }
578}
579
580/**
Ken Wang70142852016-03-18 15:08:49 +0800581 * amdgpu_wb_get_64bit - Allocate a wb entry
582 *
583 * @adev: amdgpu_device pointer
584 * @wb: wb index
585 *
586 * Allocate a wb slot for use by the driver (all asics).
587 * Returns 0 on success or -EINVAL on failure.
588 */
589int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
590{
591 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
592 adev->wb.num_wb, 0, 2, 7, 0);
593 if ((offset + 1) < adev->wb.num_wb) {
594 __set_bit(offset, adev->wb.used);
595 __set_bit(offset + 1, adev->wb.used);
596 *wb = offset;
597 return 0;
598 } else {
599 return -EINVAL;
600 }
601}
602
603/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400604 * amdgpu_wb_free - Free a wb entry
605 *
606 * @adev: amdgpu_device pointer
607 * @wb: wb index
608 *
609 * Free a wb slot allocated for use by the driver (all asics)
610 */
611void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
612{
613 if (wb < adev->wb.num_wb)
614 __clear_bit(wb, adev->wb.used);
615}
616
617/**
Ken Wang70142852016-03-18 15:08:49 +0800618 * amdgpu_wb_free_64bit - Free a wb entry
619 *
620 * @adev: amdgpu_device pointer
621 * @wb: wb index
622 *
623 * Free a wb slot allocated for use by the driver (all asics)
624 */
625void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
626{
627 if ((wb + 1) < adev->wb.num_wb) {
628 __clear_bit(wb, adev->wb.used);
629 __clear_bit(wb + 1, adev->wb.used);
630 }
631}
632
633/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400634 * amdgpu_vram_location - try to find VRAM location
635 * @adev: amdgpu device structure holding all necessary informations
636 * @mc: memory controller structure holding memory informations
637 * @base: base address at which to put VRAM
638 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400639 * Function will try to place VRAM at base address provided
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400640 * as parameter (which is so far either PCI aperture address or
641 * for IGP TOM base address).
642 *
643 * If there is not enough space to fit the unvisible VRAM in the 32bits
644 * address space then we limit the VRAM size to the aperture.
645 *
646 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
647 * this shouldn't be a problem as we are using the PCI aperture as a reference.
648 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
649 * not IGP.
650 *
651 * Note: we use mc_vram_size as on some board we need to program the mc to
652 * cover the whole aperture even if VRAM size is inferior to aperture size
653 * Novell bug 204882 + along with lots of ubuntu ones
654 *
655 * Note: when limiting vram it's safe to overwritte real_vram_size because
656 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
657 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
658 * ones)
659 *
660 * Note: IGP TOM addr should be the same as the aperture addr, we don't
Alex Xie455a7bc2017-05-08 21:36:03 -0400661 * explicitly check for that though.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400662 *
663 * FIXME: when reducing VRAM size align new size on power of 2.
664 */
665void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
666{
667 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
668
669 mc->vram_start = base;
670 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
671 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
672 mc->real_vram_size = mc->aper_size;
673 mc->mc_vram_size = mc->aper_size;
674 }
675 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
676 if (limit && limit < mc->real_vram_size)
677 mc->real_vram_size = limit;
678 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
679 mc->mc_vram_size >> 20, mc->vram_start,
680 mc->vram_end, mc->real_vram_size >> 20);
681}
682
683/**
Christian König6f02a692017-07-07 11:56:59 +0200684 * amdgpu_gart_location - try to find GTT location
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400685 * @adev: amdgpu device structure holding all necessary informations
686 * @mc: memory controller structure holding memory informations
687 *
688 * Function will place try to place GTT before or after VRAM.
689 *
690 * If GTT size is bigger than space left then we ajust GTT size.
691 * Thus function will never fails.
692 *
693 * FIXME: when reducing GTT size align new size on power of 2.
694 */
Christian König6f02a692017-07-07 11:56:59 +0200695void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400696{
697 u64 size_af, size_bf;
698
Christian Königed21c042017-07-06 22:26:05 +0200699 size_af = adev->mc.mc_mask - mc->vram_end;
700 size_bf = mc->vram_start;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400701 if (size_bf > size_af) {
Christian König6f02a692017-07-07 11:56:59 +0200702 if (mc->gart_size > size_bf) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400703 dev_warn(adev->dev, "limiting GTT\n");
Christian König6f02a692017-07-07 11:56:59 +0200704 mc->gart_size = size_bf;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400705 }
Christian König6f02a692017-07-07 11:56:59 +0200706 mc->gart_start = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400707 } else {
Christian König6f02a692017-07-07 11:56:59 +0200708 if (mc->gart_size > size_af) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400709 dev_warn(adev->dev, "limiting GTT\n");
Christian König6f02a692017-07-07 11:56:59 +0200710 mc->gart_size = size_af;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400711 }
Christian König6f02a692017-07-07 11:56:59 +0200712 mc->gart_start = mc->vram_end + 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400713 }
Christian König6f02a692017-07-07 11:56:59 +0200714 mc->gart_end = mc->gart_start + mc->gart_size - 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400715 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
Christian König6f02a692017-07-07 11:56:59 +0200716 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400717}
718
719/*
720 * GPU helpers function.
721 */
722/**
Jim Quc836fec2017-02-10 15:59:59 +0800723 * amdgpu_need_post - check if the hw need post or not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400724 *
725 * @adev: amdgpu_device pointer
726 *
Jim Quc836fec2017-02-10 15:59:59 +0800727 * Check if the asic has been initialized (all asics) at driver startup
728 * or post is needed if hw reset is performed.
729 * Returns true if need or false if not.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400730 */
Jim Quc836fec2017-02-10 15:59:59 +0800731bool amdgpu_need_post(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400732{
733 uint32_t reg;
734
Jim Quc836fec2017-02-10 15:59:59 +0800735 if (adev->has_hw_reset) {
736 adev->has_hw_reset = false;
737 return true;
738 }
Alex Deucher70d17a22017-06-30 17:26:47 -0400739
740 /* bios scratch used on CIK+ */
741 if (adev->asic_type >= CHIP_BONAIRE)
742 return amdgpu_atombios_scratch_need_asic_init(adev);
743
744 /* check MEM_SIZE for older asics */
Alex Deucherbbf282d2017-03-03 17:26:10 -0500745 reg = amdgpu_asic_get_config_memsize(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400746
Alex Deucherf2713e82017-03-28 12:19:31 -0400747 if ((reg != 0) && (reg != 0xffffffff))
Jim Quc836fec2017-02-10 15:59:59 +0800748 return false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400749
Jim Quc836fec2017-02-10 15:59:59 +0800750 return true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400751
752}
753
Monk Liubec86372016-09-14 19:38:08 +0800754static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
755{
756 if (amdgpu_sriov_vf(adev))
757 return false;
758
759 if (amdgpu_passthrough(adev)) {
Monk Liu1da2c322016-11-11 11:24:29 +0800760 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
761 * some old smc fw still need driver do vPost otherwise gpu hang, while
762 * those smc fw version above 22.15 doesn't have this flaw, so we force
763 * vpost executed for smc version below 22.15
Monk Liubec86372016-09-14 19:38:08 +0800764 */
765 if (adev->asic_type == CHIP_FIJI) {
766 int err;
767 uint32_t fw_ver;
768 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
769 /* force vPost if error occured */
770 if (err)
771 return true;
772
773 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
Monk Liu1da2c322016-11-11 11:24:29 +0800774 if (fw_ver < 0x00160e00)
775 return true;
Monk Liubec86372016-09-14 19:38:08 +0800776 }
Monk Liubec86372016-09-14 19:38:08 +0800777 }
Jim Quc836fec2017-02-10 15:59:59 +0800778 return amdgpu_need_post(adev);
Monk Liubec86372016-09-14 19:38:08 +0800779}
780
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400781/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400782 * amdgpu_dummy_page_init - init dummy page used by the driver
783 *
784 * @adev: amdgpu_device pointer
785 *
786 * Allocate the dummy page used by the driver (all asics).
787 * This dummy page is used by the driver as a filler for gart entries
788 * when pages are taken out of the GART
789 * Returns 0 on sucess, -ENOMEM on failure.
790 */
791int amdgpu_dummy_page_init(struct amdgpu_device *adev)
792{
793 if (adev->dummy_page.page)
794 return 0;
795 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
796 if (adev->dummy_page.page == NULL)
797 return -ENOMEM;
798 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
799 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
800 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
801 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
802 __free_page(adev->dummy_page.page);
803 adev->dummy_page.page = NULL;
804 return -ENOMEM;
805 }
806 return 0;
807}
808
809/**
810 * amdgpu_dummy_page_fini - free dummy page used by the driver
811 *
812 * @adev: amdgpu_device pointer
813 *
814 * Frees the dummy page used by the driver (all asics).
815 */
816void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
817{
818 if (adev->dummy_page.page == NULL)
819 return;
820 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
821 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
822 __free_page(adev->dummy_page.page);
823 adev->dummy_page.page = NULL;
824}
825
826
827/* ATOM accessor methods */
828/*
829 * ATOM is an interpreted byte code stored in tables in the vbios. The
830 * driver registers callbacks to access registers and the interpreter
831 * in the driver parses the tables and executes then to program specific
832 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
833 * atombios.h, and atom.c
834 */
835
836/**
837 * cail_pll_read - read PLL register
838 *
839 * @info: atom card_info pointer
840 * @reg: PLL register offset
841 *
842 * Provides a PLL register accessor for the atom interpreter (r4xx+).
843 * Returns the value of the PLL register.
844 */
845static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
846{
847 return 0;
848}
849
850/**
851 * cail_pll_write - write PLL register
852 *
853 * @info: atom card_info pointer
854 * @reg: PLL register offset
855 * @val: value to write to the pll register
856 *
857 * Provides a PLL register accessor for the atom interpreter (r4xx+).
858 */
859static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
860{
861
862}
863
864/**
865 * cail_mc_read - read MC (Memory Controller) register
866 *
867 * @info: atom card_info pointer
868 * @reg: MC register offset
869 *
870 * Provides an MC register accessor for the atom interpreter (r4xx+).
871 * Returns the value of the MC register.
872 */
873static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
874{
875 return 0;
876}
877
878/**
879 * cail_mc_write - write MC (Memory Controller) register
880 *
881 * @info: atom card_info pointer
882 * @reg: MC register offset
883 * @val: value to write to the pll register
884 *
885 * Provides a MC register accessor for the atom interpreter (r4xx+).
886 */
887static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
888{
889
890}
891
892/**
893 * cail_reg_write - write MMIO register
894 *
895 * @info: atom card_info pointer
896 * @reg: MMIO register offset
897 * @val: value to write to the pll register
898 *
899 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
900 */
901static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
902{
903 struct amdgpu_device *adev = info->dev->dev_private;
904
905 WREG32(reg, val);
906}
907
908/**
909 * cail_reg_read - read MMIO register
910 *
911 * @info: atom card_info pointer
912 * @reg: MMIO register offset
913 *
914 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
915 * Returns the value of the MMIO register.
916 */
917static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
918{
919 struct amdgpu_device *adev = info->dev->dev_private;
920 uint32_t r;
921
922 r = RREG32(reg);
923 return r;
924}
925
926/**
927 * cail_ioreg_write - write IO register
928 *
929 * @info: atom card_info pointer
930 * @reg: IO register offset
931 * @val: value to write to the pll register
932 *
933 * Provides a IO register accessor for the atom interpreter (r4xx+).
934 */
935static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
936{
937 struct amdgpu_device *adev = info->dev->dev_private;
938
939 WREG32_IO(reg, val);
940}
941
942/**
943 * cail_ioreg_read - read IO register
944 *
945 * @info: atom card_info pointer
946 * @reg: IO register offset
947 *
948 * Provides an IO register accessor for the atom interpreter (r4xx+).
949 * Returns the value of the IO register.
950 */
951static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
952{
953 struct amdgpu_device *adev = info->dev->dev_private;
954 uint32_t r;
955
956 r = RREG32_IO(reg);
957 return r;
958}
959
960/**
961 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
962 *
963 * @adev: amdgpu_device pointer
964 *
965 * Frees the driver info and register access callbacks for the ATOM
966 * interpreter (r4xx+).
967 * Called at driver shutdown.
968 */
969static void amdgpu_atombios_fini(struct amdgpu_device *adev)
970{
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800971 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400972 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800973 kfree(adev->mode_info.atom_context->iio);
974 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400975 kfree(adev->mode_info.atom_context);
976 adev->mode_info.atom_context = NULL;
977 kfree(adev->mode_info.atom_card_info);
978 adev->mode_info.atom_card_info = NULL;
979}
980
981/**
982 * amdgpu_atombios_init - init the driver info and callbacks for atombios
983 *
984 * @adev: amdgpu_device pointer
985 *
986 * Initializes the driver info and register access callbacks for the
987 * ATOM interpreter (r4xx+).
988 * Returns 0 on sucess, -ENOMEM on failure.
989 * Called at driver startup.
990 */
991static int amdgpu_atombios_init(struct amdgpu_device *adev)
992{
993 struct card_info *atom_card_info =
994 kzalloc(sizeof(struct card_info), GFP_KERNEL);
995
996 if (!atom_card_info)
997 return -ENOMEM;
998
999 adev->mode_info.atom_card_info = atom_card_info;
1000 atom_card_info->dev = adev->ddev;
1001 atom_card_info->reg_read = cail_reg_read;
1002 atom_card_info->reg_write = cail_reg_write;
1003 /* needed for iio ops */
1004 if (adev->rio_mem) {
1005 atom_card_info->ioreg_read = cail_ioreg_read;
1006 atom_card_info->ioreg_write = cail_ioreg_write;
1007 } else {
Amber Linb64a18c2017-01-04 08:06:58 -05001008 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001009 atom_card_info->ioreg_read = cail_reg_read;
1010 atom_card_info->ioreg_write = cail_reg_write;
1011 }
1012 atom_card_info->mc_read = cail_mc_read;
1013 atom_card_info->mc_write = cail_mc_write;
1014 atom_card_info->pll_read = cail_pll_read;
1015 atom_card_info->pll_write = cail_pll_write;
1016
1017 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
1018 if (!adev->mode_info.atom_context) {
1019 amdgpu_atombios_fini(adev);
1020 return -ENOMEM;
1021 }
1022
1023 mutex_init(&adev->mode_info.atom_context->mutex);
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001024 if (adev->is_atom_fw) {
1025 amdgpu_atomfirmware_scratch_regs_init(adev);
1026 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1027 } else {
1028 amdgpu_atombios_scratch_regs_init(adev);
1029 amdgpu_atombios_allocate_fb_scratch(adev);
1030 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001031 return 0;
1032}
1033
1034/* if we get transitioned to only one device, take VGA back */
1035/**
1036 * amdgpu_vga_set_decode - enable/disable vga decode
1037 *
1038 * @cookie: amdgpu_device pointer
1039 * @state: enable/disable vga decode
1040 *
1041 * Enable/disable vga decode (all asics).
1042 * Returns VGA resource flags.
1043 */
1044static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1045{
1046 struct amdgpu_device *adev = cookie;
1047 amdgpu_asic_set_vga_state(adev, state);
1048 if (state)
1049 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1050 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1051 else
1052 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1053}
1054
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001055static void amdgpu_check_block_size(struct amdgpu_device *adev)
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001056{
1057 /* defines number of bits in page table versus page directory,
1058 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1059 * page table and the remaining bits are in the page directory */
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001060 if (amdgpu_vm_block_size == -1)
1061 return;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001062
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001063 if (amdgpu_vm_block_size < 9) {
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001064 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1065 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001066 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001067 }
1068
1069 if (amdgpu_vm_block_size > 24 ||
1070 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1071 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1072 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001073 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001074 }
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001075
1076 return;
1077
1078def_value:
1079 amdgpu_vm_block_size = -1;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001080}
1081
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001082static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1083{
Alex Deucher64dab072017-06-15 18:20:09 -04001084 /* no need to check the default value */
1085 if (amdgpu_vm_size == -1)
1086 return;
1087
Alex Deucher76117502017-06-21 12:31:41 -04001088 if (!is_power_of_2(amdgpu_vm_size)) {
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001089 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1090 amdgpu_vm_size);
1091 goto def_value;
1092 }
1093
1094 if (amdgpu_vm_size < 1) {
1095 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1096 amdgpu_vm_size);
1097 goto def_value;
1098 }
1099
1100 /*
1101 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1102 */
1103 if (amdgpu_vm_size > 1024) {
1104 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1105 amdgpu_vm_size);
1106 goto def_value;
1107 }
1108
1109 return;
1110
1111def_value:
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001112 amdgpu_vm_size = -1;
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001113}
1114
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001115/**
1116 * amdgpu_check_arguments - validate module params
1117 *
1118 * @adev: amdgpu_device pointer
1119 *
1120 * Validates certain module parameters and updates
1121 * the associated values used by the driver (all asics).
1122 */
1123static void amdgpu_check_arguments(struct amdgpu_device *adev)
1124{
Chunming Zhou5b011232015-12-10 17:34:33 +08001125 if (amdgpu_sched_jobs < 4) {
1126 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1127 amdgpu_sched_jobs);
1128 amdgpu_sched_jobs = 4;
Alex Deucher76117502017-06-21 12:31:41 -04001129 } else if (!is_power_of_2(amdgpu_sched_jobs)){
Chunming Zhou5b011232015-12-10 17:34:33 +08001130 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1131 amdgpu_sched_jobs);
1132 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1133 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001134
1135 if (amdgpu_gart_size != -1) {
Christian Königc4e1a132016-03-17 16:25:15 +01001136 /* gtt size must be greater or equal to 32M */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001137 if (amdgpu_gart_size < 32) {
1138 dev_warn(adev->dev, "gart size (%d) too small\n",
1139 amdgpu_gart_size);
1140 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001141 }
1142 }
1143
Christian König36d38372017-07-07 13:17:45 +02001144 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1145 /* gtt size must be greater or equal to 32M */
1146 dev_warn(adev->dev, "gtt size (%d) too small\n",
1147 amdgpu_gtt_size);
1148 amdgpu_gtt_size = -1;
1149 }
1150
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001151 amdgpu_check_vm_size(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001152
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001153 amdgpu_check_block_size(adev);
Christian König6a7f76e2016-08-24 15:51:49 +02001154
jimqu526bae32016-11-07 09:53:10 +08001155 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
Alex Deucher76117502017-06-21 12:31:41 -04001156 !is_power_of_2(amdgpu_vram_page_split))) {
Christian König6a7f76e2016-08-24 15:51:49 +02001157 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1158 amdgpu_vram_page_split);
1159 amdgpu_vram_page_split = 1024;
1160 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001161}
1162
1163/**
1164 * amdgpu_switcheroo_set_state - set switcheroo state
1165 *
1166 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001167 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001168 *
1169 * Callback for the switcheroo driver. Suspends or resumes the
1170 * the asics before or after it is powered up using ACPI methods.
1171 */
1172static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1173{
1174 struct drm_device *dev = pci_get_drvdata(pdev);
1175
1176 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1177 return;
1178
1179 if (state == VGA_SWITCHEROO_ON) {
1180 unsigned d3_delay = dev->pdev->d3_delay;
1181
Joe Perches7ca85292017-02-28 04:55:52 -08001182 pr_info("amdgpu: switched on\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001183 /* don't suspend or resume card normally */
1184 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1185
Alex Deucher810ddc32016-08-23 13:25:49 -04001186 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001187
1188 dev->pdev->d3_delay = d3_delay;
1189
1190 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1191 drm_kms_helper_poll_enable(dev);
1192 } else {
Joe Perches7ca85292017-02-28 04:55:52 -08001193 pr_info("amdgpu: switched off\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001194 drm_kms_helper_poll_disable(dev);
1195 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001196 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001197 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1198 }
1199}
1200
1201/**
1202 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1203 *
1204 * @pdev: pci dev pointer
1205 *
1206 * Callback for the switcheroo driver. Check of the switcheroo
1207 * state can be changed.
1208 * Returns true if the state can be changed, false if not.
1209 */
1210static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1211{
1212 struct drm_device *dev = pci_get_drvdata(pdev);
1213
1214 /*
1215 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1216 * locking inversion with the driver load path. And the access here is
1217 * completely racy anyway. So don't bother with locking for now.
1218 */
1219 return dev->open_count == 0;
1220}
1221
1222static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1223 .set_gpu_state = amdgpu_switcheroo_set_state,
1224 .reprobe = NULL,
1225 .can_switch = amdgpu_switcheroo_can_switch,
1226};
1227
1228int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001229 enum amd_ip_block_type block_type,
1230 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001231{
1232 int i, r = 0;
1233
1234 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001235 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001236 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001237 if (adev->ip_blocks[i].version->type != block_type)
1238 continue;
1239 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1240 continue;
1241 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1242 (void *)adev, state);
1243 if (r)
1244 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1245 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001246 }
1247 return r;
1248}
1249
1250int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001251 enum amd_ip_block_type block_type,
1252 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001253{
1254 int i, r = 0;
1255
1256 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001257 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001258 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001259 if (adev->ip_blocks[i].version->type != block_type)
1260 continue;
1261 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1262 continue;
1263 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1264 (void *)adev, state);
1265 if (r)
1266 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1267 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001268 }
1269 return r;
1270}
1271
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001272void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1273{
1274 int i;
1275
1276 for (i = 0; i < adev->num_ip_blocks; i++) {
1277 if (!adev->ip_blocks[i].status.valid)
1278 continue;
1279 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1280 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1281 }
1282}
1283
Alex Deucher5dbbb602016-06-23 11:41:04 -04001284int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1285 enum amd_ip_block_type block_type)
1286{
1287 int i, r;
1288
1289 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001290 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001291 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001292 if (adev->ip_blocks[i].version->type == block_type) {
1293 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001294 if (r)
1295 return r;
1296 break;
1297 }
1298 }
1299 return 0;
1300
1301}
1302
1303bool amdgpu_is_idle(struct amdgpu_device *adev,
1304 enum amd_ip_block_type block_type)
1305{
1306 int i;
1307
1308 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001309 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001310 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001311 if (adev->ip_blocks[i].version->type == block_type)
1312 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001313 }
1314 return true;
1315
1316}
1317
Alex Deuchera1255102016-10-13 17:41:13 -04001318struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1319 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001320{
1321 int i;
1322
1323 for (i = 0; i < adev->num_ip_blocks; i++)
Alex Deuchera1255102016-10-13 17:41:13 -04001324 if (adev->ip_blocks[i].version->type == type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001325 return &adev->ip_blocks[i];
1326
1327 return NULL;
1328}
1329
1330/**
1331 * amdgpu_ip_block_version_cmp
1332 *
1333 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001334 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001335 * @major: major version
1336 * @minor: minor version
1337 *
1338 * return 0 if equal or greater
1339 * return 1 if smaller or the ip_block doesn't exist
1340 */
1341int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001342 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001343 u32 major, u32 minor)
1344{
Alex Deuchera1255102016-10-13 17:41:13 -04001345 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001346
Alex Deuchera1255102016-10-13 17:41:13 -04001347 if (ip_block && ((ip_block->version->major > major) ||
1348 ((ip_block->version->major == major) &&
1349 (ip_block->version->minor >= minor))))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001350 return 0;
1351
1352 return 1;
1353}
1354
Alex Deuchera1255102016-10-13 17:41:13 -04001355/**
1356 * amdgpu_ip_block_add
1357 *
1358 * @adev: amdgpu_device pointer
1359 * @ip_block_version: pointer to the IP to add
1360 *
1361 * Adds the IP block driver information to the collection of IPs
1362 * on the asic.
1363 */
1364int amdgpu_ip_block_add(struct amdgpu_device *adev,
1365 const struct amdgpu_ip_block_version *ip_block_version)
1366{
1367 if (!ip_block_version)
1368 return -EINVAL;
1369
Huang Ruia0bae352017-05-03 09:52:06 +08001370 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1371 ip_block_version->funcs->name);
1372
Alex Deuchera1255102016-10-13 17:41:13 -04001373 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1374
1375 return 0;
1376}
1377
Alex Deucher483ef982016-09-30 12:43:04 -04001378static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
Emily Deng9accf2f2016-08-10 16:01:25 +08001379{
1380 adev->enable_virtual_display = false;
1381
1382 if (amdgpu_virtual_display) {
1383 struct drm_device *ddev = adev->ddev;
1384 const char *pci_address_name = pci_name(ddev->pdev);
Emily Deng0f663562016-09-30 13:02:18 -04001385 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
Emily Deng9accf2f2016-08-10 16:01:25 +08001386
1387 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1388 pciaddstr_tmp = pciaddstr;
Emily Deng0f663562016-09-30 13:02:18 -04001389 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1390 pciaddname = strsep(&pciaddname_tmp, ",");
Yintian Tao967de2a2017-01-22 15:16:51 +08001391 if (!strcmp("all", pciaddname)
1392 || !strcmp(pci_address_name, pciaddname)) {
Emily Deng0f663562016-09-30 13:02:18 -04001393 long num_crtc;
1394 int res = -1;
1395
Emily Deng9accf2f2016-08-10 16:01:25 +08001396 adev->enable_virtual_display = true;
Emily Deng0f663562016-09-30 13:02:18 -04001397
1398 if (pciaddname_tmp)
1399 res = kstrtol(pciaddname_tmp, 10,
1400 &num_crtc);
1401
1402 if (!res) {
1403 if (num_crtc < 1)
1404 num_crtc = 1;
1405 if (num_crtc > 6)
1406 num_crtc = 6;
1407 adev->mode_info.num_crtc = num_crtc;
1408 } else {
1409 adev->mode_info.num_crtc = 1;
1410 }
Emily Deng9accf2f2016-08-10 16:01:25 +08001411 break;
1412 }
1413 }
1414
Emily Deng0f663562016-09-30 13:02:18 -04001415 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1416 amdgpu_virtual_display, pci_address_name,
1417 adev->enable_virtual_display, adev->mode_info.num_crtc);
Emily Deng9accf2f2016-08-10 16:01:25 +08001418
1419 kfree(pciaddstr);
1420 }
1421}
1422
Alex Deuchere2a75f82017-04-27 16:58:01 -04001423static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1424{
Alex Deuchere2a75f82017-04-27 16:58:01 -04001425 const char *chip_name;
1426 char fw_name[30];
1427 int err;
1428 const struct gpu_info_firmware_header_v1_0 *hdr;
1429
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001430 adev->firmware.gpu_info_fw = NULL;
1431
Alex Deuchere2a75f82017-04-27 16:58:01 -04001432 switch (adev->asic_type) {
1433 case CHIP_TOPAZ:
1434 case CHIP_TONGA:
1435 case CHIP_FIJI:
1436 case CHIP_POLARIS11:
1437 case CHIP_POLARIS10:
1438 case CHIP_POLARIS12:
1439 case CHIP_CARRIZO:
1440 case CHIP_STONEY:
1441#ifdef CONFIG_DRM_AMDGPU_SI
1442 case CHIP_VERDE:
1443 case CHIP_TAHITI:
1444 case CHIP_PITCAIRN:
1445 case CHIP_OLAND:
1446 case CHIP_HAINAN:
1447#endif
1448#ifdef CONFIG_DRM_AMDGPU_CIK
1449 case CHIP_BONAIRE:
1450 case CHIP_HAWAII:
1451 case CHIP_KAVERI:
1452 case CHIP_KABINI:
1453 case CHIP_MULLINS:
1454#endif
1455 default:
1456 return 0;
1457 case CHIP_VEGA10:
1458 chip_name = "vega10";
1459 break;
Alex Deucher2d2e5e72017-05-09 12:27:35 -04001460 case CHIP_RAVEN:
1461 chip_name = "raven";
1462 break;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001463 }
1464
1465 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001466 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001467 if (err) {
1468 dev_err(adev->dev,
1469 "Failed to load gpu_info firmware \"%s\"\n",
1470 fw_name);
1471 goto out;
1472 }
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001473 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001474 if (err) {
1475 dev_err(adev->dev,
1476 "Failed to validate gpu_info firmware \"%s\"\n",
1477 fw_name);
1478 goto out;
1479 }
1480
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001481 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001482 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1483
1484 switch (hdr->version_major) {
1485 case 1:
1486 {
1487 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001488 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
Alex Deuchere2a75f82017-04-27 16:58:01 -04001489 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1490
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001491 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1492 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1493 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1494 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001495 adev->gfx.config.max_texture_channel_caches =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001496 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1497 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1498 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1499 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1500 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001501 adev->gfx.config.double_offchip_lds_buf =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001502 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1503 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
Hawking Zhang51fd0372017-06-09 22:30:52 +08001504 adev->gfx.cu_info.max_waves_per_simd =
1505 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1506 adev->gfx.cu_info.max_scratch_slots_per_cu =
1507 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1508 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001509 break;
1510 }
1511 default:
1512 dev_err(adev->dev,
1513 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1514 err = -EINVAL;
1515 goto out;
1516 }
1517out:
Alex Deuchere2a75f82017-04-27 16:58:01 -04001518 return err;
1519}
1520
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001521static int amdgpu_early_init(struct amdgpu_device *adev)
1522{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001523 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001524
Alex Deucher483ef982016-09-30 12:43:04 -04001525 amdgpu_device_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001526
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001527 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001528 case CHIP_TOPAZ:
1529 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001530 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001531 case CHIP_POLARIS11:
1532 case CHIP_POLARIS10:
Junwei Zhangc4642a42016-12-14 15:32:28 -05001533 case CHIP_POLARIS12:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001534 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001535 case CHIP_STONEY:
1536 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001537 adev->family = AMDGPU_FAMILY_CZ;
1538 else
1539 adev->family = AMDGPU_FAMILY_VI;
1540
1541 r = vi_set_ip_blocks(adev);
1542 if (r)
1543 return r;
1544 break;
Ken Wang33f34802016-01-21 17:29:41 +08001545#ifdef CONFIG_DRM_AMDGPU_SI
1546 case CHIP_VERDE:
1547 case CHIP_TAHITI:
1548 case CHIP_PITCAIRN:
1549 case CHIP_OLAND:
1550 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001551 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001552 r = si_set_ip_blocks(adev);
1553 if (r)
1554 return r;
1555 break;
1556#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001557#ifdef CONFIG_DRM_AMDGPU_CIK
1558 case CHIP_BONAIRE:
1559 case CHIP_HAWAII:
1560 case CHIP_KAVERI:
1561 case CHIP_KABINI:
1562 case CHIP_MULLINS:
1563 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1564 adev->family = AMDGPU_FAMILY_CI;
1565 else
1566 adev->family = AMDGPU_FAMILY_KV;
1567
1568 r = cik_set_ip_blocks(adev);
1569 if (r)
1570 return r;
1571 break;
1572#endif
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +08001573 case CHIP_VEGA10:
1574 case CHIP_RAVEN:
1575 if (adev->asic_type == CHIP_RAVEN)
1576 adev->family = AMDGPU_FAMILY_RV;
1577 else
1578 adev->family = AMDGPU_FAMILY_AI;
Ken Wang460826e2017-03-06 14:53:16 -05001579
1580 r = soc15_set_ip_blocks(adev);
1581 if (r)
1582 return r;
1583 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001584 default:
1585 /* FIXME: not supported yet */
1586 return -EINVAL;
1587 }
1588
Alex Deuchere2a75f82017-04-27 16:58:01 -04001589 r = amdgpu_device_parse_gpu_info_fw(adev);
1590 if (r)
1591 return r;
1592
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001593 if (amdgpu_sriov_vf(adev)) {
1594 r = amdgpu_virt_request_full_gpu(adev, true);
1595 if (r)
1596 return r;
1597 }
1598
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001599 for (i = 0; i < adev->num_ip_blocks; i++) {
1600 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
Huang Ruied8cf002017-05-03 09:40:17 +08001601 DRM_ERROR("disabled ip block: %d <%s>\n",
1602 i, adev->ip_blocks[i].version->funcs->name);
Alex Deuchera1255102016-10-13 17:41:13 -04001603 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001604 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001605 if (adev->ip_blocks[i].version->funcs->early_init) {
1606 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001607 if (r == -ENOENT) {
Alex Deuchera1255102016-10-13 17:41:13 -04001608 adev->ip_blocks[i].status.valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001609 } else if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001610 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1611 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001612 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001613 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001614 adev->ip_blocks[i].status.valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001615 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001616 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001617 adev->ip_blocks[i].status.valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001618 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001619 }
1620 }
1621
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001622 adev->cg_flags &= amdgpu_cg_mask;
1623 adev->pg_flags &= amdgpu_pg_mask;
1624
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001625 return 0;
1626}
1627
1628static int amdgpu_init(struct amdgpu_device *adev)
1629{
1630 int i, r;
1631
1632 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001633 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001634 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001635 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001636 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001637 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1638 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001639 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001640 }
Alex Deuchera1255102016-10-13 17:41:13 -04001641 adev->ip_blocks[i].status.sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001642 /* need to do gmc hw init early so we can allocate gpu mem */
Alex Deuchera1255102016-10-13 17:41:13 -04001643 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001644 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001645 if (r) {
1646 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001647 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001648 }
Alex Deuchera1255102016-10-13 17:41:13 -04001649 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001650 if (r) {
1651 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001652 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001653 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001654 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001655 if (r) {
1656 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001657 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001658 }
Alex Deuchera1255102016-10-13 17:41:13 -04001659 adev->ip_blocks[i].status.hw = true;
Monk Liu24936642017-01-09 15:54:32 +08001660
1661 /* right after GMC hw init, we create CSA */
1662 if (amdgpu_sriov_vf(adev)) {
1663 r = amdgpu_allocate_static_csa(adev);
1664 if (r) {
1665 DRM_ERROR("allocate CSA failed %d\n", r);
1666 return r;
1667 }
1668 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001669 }
1670 }
1671
1672 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001673 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001674 continue;
1675 /* gmc hw init is done early */
Alex Deuchera1255102016-10-13 17:41:13 -04001676 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001677 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001678 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001679 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001680 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1681 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001682 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001683 }
Alex Deuchera1255102016-10-13 17:41:13 -04001684 adev->ip_blocks[i].status.hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001685 }
1686
1687 return 0;
1688}
1689
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001690static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1691{
1692 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1693}
1694
1695static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1696{
1697 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1698 AMDGPU_RESET_MAGIC_NUM);
1699}
1700
Shirish S2dc80b02017-05-25 10:05:25 +05301701static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
1702{
1703 int i = 0, r;
1704
1705 for (i = 0; i < adev->num_ip_blocks; i++) {
1706 if (!adev->ip_blocks[i].status.valid)
1707 continue;
1708 /* skip CG for VCE/UVD, it's handled specially */
1709 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1710 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1711 /* enable clockgating to save power */
1712 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1713 AMD_CG_STATE_GATE);
1714 if (r) {
1715 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1716 adev->ip_blocks[i].version->funcs->name, r);
1717 return r;
1718 }
1719 }
1720 }
1721 return 0;
1722}
1723
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001724static int amdgpu_late_init(struct amdgpu_device *adev)
1725{
1726 int i = 0, r;
1727
1728 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001729 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001730 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001731 if (adev->ip_blocks[i].version->funcs->late_init) {
1732 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001733 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001734 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1735 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001736 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001737 }
Alex Deuchera1255102016-10-13 17:41:13 -04001738 adev->ip_blocks[i].status.late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001739 }
1740 }
1741
Shirish S2dc80b02017-05-25 10:05:25 +05301742 mod_delayed_work(system_wq, &adev->late_init_work,
1743 msecs_to_jiffies(AMDGPU_RESUME_MS));
1744
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001745 amdgpu_fill_reset_magic(adev);
1746
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001747 return 0;
1748}
1749
1750static int amdgpu_fini(struct amdgpu_device *adev)
1751{
1752 int i, r;
1753
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001754 /* need to disable SMC first */
1755 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001756 if (!adev->ip_blocks[i].status.hw)
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001757 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001758 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001759 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
Alex Deuchera1255102016-10-13 17:41:13 -04001760 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1761 AMD_CG_STATE_UNGATE);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001762 if (r) {
1763 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001764 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001765 return r;
1766 }
Alex Deuchera1255102016-10-13 17:41:13 -04001767 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001768 /* XXX handle errors */
1769 if (r) {
1770 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001771 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001772 }
Alex Deuchera1255102016-10-13 17:41:13 -04001773 adev->ip_blocks[i].status.hw = false;
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001774 break;
1775 }
1776 }
1777
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001778 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001779 if (!adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001780 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001781 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001782 amdgpu_wb_fini(adev);
1783 amdgpu_vram_scratch_fini(adev);
1784 }
Rex Zhu8201a672016-11-24 21:44:44 +08001785
1786 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1787 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1788 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1789 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1790 AMD_CG_STATE_UNGATE);
1791 if (r) {
1792 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1793 adev->ip_blocks[i].version->funcs->name, r);
1794 return r;
1795 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001796 }
Rex Zhu8201a672016-11-24 21:44:44 +08001797
Alex Deuchera1255102016-10-13 17:41:13 -04001798 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001799 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001800 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001801 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1802 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001803 }
Rex Zhu8201a672016-11-24 21:44:44 +08001804
Alex Deuchera1255102016-10-13 17:41:13 -04001805 adev->ip_blocks[i].status.hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001806 }
1807
1808 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001809 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001810 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001811 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001812 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001813 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001814 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1815 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001816 }
Alex Deuchera1255102016-10-13 17:41:13 -04001817 adev->ip_blocks[i].status.sw = false;
1818 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001819 }
1820
Monk Liua6dcfd92016-05-19 14:36:34 +08001821 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001822 if (!adev->ip_blocks[i].status.late_initialized)
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001823 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001824 if (adev->ip_blocks[i].version->funcs->late_fini)
1825 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1826 adev->ip_blocks[i].status.late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001827 }
1828
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001829 if (amdgpu_sriov_vf(adev)) {
Monk Liu24936642017-01-09 15:54:32 +08001830 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001831 amdgpu_virt_release_full_gpu(adev, false);
1832 }
Monk Liu24936642017-01-09 15:54:32 +08001833
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001834 return 0;
1835}
1836
Shirish S2dc80b02017-05-25 10:05:25 +05301837static void amdgpu_late_init_func_handler(struct work_struct *work)
1838{
1839 struct amdgpu_device *adev =
1840 container_of(work, struct amdgpu_device, late_init_work.work);
1841 amdgpu_late_set_cg_state(adev);
1842}
1843
Alex Deucherfaefba92016-12-06 10:38:29 -05001844int amdgpu_suspend(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001845{
1846 int i, r;
1847
Xiangliang Yue941ea92017-01-18 12:47:55 +08001848 if (amdgpu_sriov_vf(adev))
1849 amdgpu_virt_request_full_gpu(adev, false);
1850
Flora Cuic5a93a22016-02-26 10:45:25 +08001851 /* ungate SMC block first */
1852 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1853 AMD_CG_STATE_UNGATE);
1854 if (r) {
1855 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1856 }
1857
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001858 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001859 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001860 continue;
1861 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001862 if (i != AMD_IP_BLOCK_TYPE_SMC) {
Alex Deuchera1255102016-10-13 17:41:13 -04001863 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1864 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001865 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001866 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1867 adev->ip_blocks[i].version->funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001868 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001869 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001870 /* XXX handle errors */
Alex Deuchera1255102016-10-13 17:41:13 -04001871 r = adev->ip_blocks[i].version->funcs->suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001872 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001873 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001874 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1875 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001876 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001877 }
1878
Xiangliang Yue941ea92017-01-18 12:47:55 +08001879 if (amdgpu_sriov_vf(adev))
1880 amdgpu_virt_release_full_gpu(adev, false);
1881
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001882 return 0;
1883}
1884
Monk Liue4f0fdc2017-02-09 11:55:49 +08001885static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001886{
1887 int i, r;
1888
Monk Liu2cb681b2017-04-26 12:00:49 +08001889 static enum amd_ip_block_type ip_order[] = {
1890 AMD_IP_BLOCK_TYPE_GMC,
1891 AMD_IP_BLOCK_TYPE_COMMON,
Monk Liu2cb681b2017-04-26 12:00:49 +08001892 AMD_IP_BLOCK_TYPE_IH,
1893 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001894
Monk Liu2cb681b2017-04-26 12:00:49 +08001895 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1896 int j;
1897 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001898
Monk Liu2cb681b2017-04-26 12:00:49 +08001899 for (j = 0; j < adev->num_ip_blocks; j++) {
1900 block = &adev->ip_blocks[j];
1901
1902 if (block->version->type != ip_order[i] ||
1903 !block->status.valid)
1904 continue;
1905
1906 r = block->version->funcs->hw_init(adev);
1907 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001908 }
1909 }
1910
1911 return 0;
1912}
1913
Monk Liue4f0fdc2017-02-09 11:55:49 +08001914static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001915{
1916 int i, r;
1917
Monk Liu2cb681b2017-04-26 12:00:49 +08001918 static enum amd_ip_block_type ip_order[] = {
1919 AMD_IP_BLOCK_TYPE_SMC,
1920 AMD_IP_BLOCK_TYPE_DCE,
1921 AMD_IP_BLOCK_TYPE_GFX,
1922 AMD_IP_BLOCK_TYPE_SDMA,
1923 AMD_IP_BLOCK_TYPE_VCE,
1924 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001925
Monk Liu2cb681b2017-04-26 12:00:49 +08001926 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1927 int j;
1928 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001929
Monk Liu2cb681b2017-04-26 12:00:49 +08001930 for (j = 0; j < adev->num_ip_blocks; j++) {
1931 block = &adev->ip_blocks[j];
1932
1933 if (block->version->type != ip_order[i] ||
1934 !block->status.valid)
1935 continue;
1936
1937 r = block->version->funcs->hw_init(adev);
1938 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001939 }
1940 }
1941
1942 return 0;
1943}
1944
Chunming Zhoufcf06492017-05-05 10:33:33 +08001945static int amdgpu_resume_phase1(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001946{
1947 int i, r;
1948
1949 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001950 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001951 continue;
Chunming Zhoufcf06492017-05-05 10:33:33 +08001952 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1953 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1954 adev->ip_blocks[i].version->type ==
1955 AMD_IP_BLOCK_TYPE_IH) {
1956 r = adev->ip_blocks[i].version->funcs->resume(adev);
1957 if (r) {
1958 DRM_ERROR("resume of IP block <%s> failed %d\n",
1959 adev->ip_blocks[i].version->funcs->name, r);
1960 return r;
1961 }
1962 }
1963 }
1964
1965 return 0;
1966}
1967
1968static int amdgpu_resume_phase2(struct amdgpu_device *adev)
1969{
1970 int i, r;
1971
1972 for (i = 0; i < adev->num_ip_blocks; i++) {
1973 if (!adev->ip_blocks[i].status.valid)
1974 continue;
1975 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1976 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1977 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1978 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001979 r = adev->ip_blocks[i].version->funcs->resume(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001980 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001981 DRM_ERROR("resume of IP block <%s> failed %d\n",
1982 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001983 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001984 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001985 }
1986
1987 return 0;
1988}
1989
Chunming Zhoufcf06492017-05-05 10:33:33 +08001990static int amdgpu_resume(struct amdgpu_device *adev)
1991{
1992 int r;
1993
1994 r = amdgpu_resume_phase1(adev);
1995 if (r)
1996 return r;
1997 r = amdgpu_resume_phase2(adev);
1998
1999 return r;
2000}
2001
Monk Liu4e99a442016-03-31 13:26:59 +08002002static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04002003{
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002004 if (adev->is_atom_fw) {
2005 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2006 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2007 } else {
2008 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2009 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2010 }
Andres Rodriguez048765a2016-06-11 02:51:32 -04002011}
2012
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002013/**
2014 * amdgpu_device_init - initialize the driver
2015 *
2016 * @adev: amdgpu_device pointer
2017 * @pdev: drm dev pointer
2018 * @pdev: pci dev pointer
2019 * @flags: driver flags
2020 *
2021 * Initializes the driver info and hw (all asics).
2022 * Returns 0 for success or an error on failure.
2023 * Called at driver startup.
2024 */
2025int amdgpu_device_init(struct amdgpu_device *adev,
2026 struct drm_device *ddev,
2027 struct pci_dev *pdev,
2028 uint32_t flags)
2029{
2030 int r, i;
2031 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02002032 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002033
2034 adev->shutdown = false;
2035 adev->dev = &pdev->dev;
2036 adev->ddev = ddev;
2037 adev->pdev = pdev;
2038 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08002039 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002040 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
Christian König6f02a692017-07-07 11:56:59 +02002041 adev->mc.gart_size = 512 * 1024 * 1024;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002042 adev->accel_working = false;
2043 adev->num_rings = 0;
2044 adev->mman.buffer_funcs = NULL;
2045 adev->mman.buffer_funcs_ring = NULL;
2046 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01002047 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002048 adev->gart.gart_funcs = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002049 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002050
2051 adev->smc_rreg = &amdgpu_invalid_rreg;
2052 adev->smc_wreg = &amdgpu_invalid_wreg;
2053 adev->pcie_rreg = &amdgpu_invalid_rreg;
2054 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08002055 adev->pciep_rreg = &amdgpu_invalid_rreg;
2056 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002057 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2058 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2059 adev->didt_rreg = &amdgpu_invalid_rreg;
2060 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08002061 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2062 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002063 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2064 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2065
Rex Zhuccdbb202016-06-08 12:47:41 +08002066
Alex Deucher3e39ab92015-06-05 15:04:33 -04002067 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2068 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2069 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002070
2071 /* mutex initialization are all done here so we
2072 * can recall function without having locking issues */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002073 atomic_set(&adev->irq.ih.lock, 0);
Huang Rui0e5ca0d2017-03-03 18:37:23 -05002074 mutex_init(&adev->firmware.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002075 mutex_init(&adev->pm.mutex);
2076 mutex_init(&adev->gfx.gpu_clock_mutex);
2077 mutex_init(&adev->srbm_mutex);
2078 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002079 mutex_init(&adev->mn_lock);
2080 hash_init(adev->mn_hash);
2081
2082 amdgpu_check_arguments(adev);
2083
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002084 spin_lock_init(&adev->mmio_idx_lock);
2085 spin_lock_init(&adev->smc_idx_lock);
2086 spin_lock_init(&adev->pcie_idx_lock);
2087 spin_lock_init(&adev->uvd_ctx_idx_lock);
2088 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08002089 spin_lock_init(&adev->gc_cac_idx_lock);
Evan Quan16abb5d2017-07-04 09:21:50 +08002090 spin_lock_init(&adev->se_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002091 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02002092 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002093
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08002094 INIT_LIST_HEAD(&adev->shadow_list);
2095 mutex_init(&adev->shadow_list_lock);
2096
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002097 INIT_LIST_HEAD(&adev->gtt_list);
2098 spin_lock_init(&adev->gtt_list_lock);
2099
Andres Rodriguez795f2812017-03-06 16:27:55 -05002100 INIT_LIST_HEAD(&adev->ring_lru_list);
2101 spin_lock_init(&adev->ring_lru_list_lock);
2102
Shirish S2dc80b02017-05-25 10:05:25 +05302103 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2104
Alex Xie0fa49552017-06-08 14:58:05 -04002105 /* Registers mapping */
2106 /* TODO: block userspace mapping of io register */
Ken Wangda69c1612016-01-21 19:08:55 +08002107 if (adev->asic_type >= CHIP_BONAIRE) {
2108 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2109 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2110 } else {
2111 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2112 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2113 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002114
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002115 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2116 if (adev->rmmio == NULL) {
2117 return -ENOMEM;
2118 }
2119 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2120 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2121
Ken Wangda69c1612016-01-21 19:08:55 +08002122 if (adev->asic_type >= CHIP_BONAIRE)
2123 /* doorbell bar mapping */
2124 amdgpu_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002125
2126 /* io port mapping */
2127 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2128 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2129 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2130 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2131 break;
2132 }
2133 }
2134 if (adev->rio_mem == NULL)
Amber Linb64a18c2017-01-04 08:06:58 -05002135 DRM_INFO("PCI I/O BAR is not found.\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002136
2137 /* early init functions */
2138 r = amdgpu_early_init(adev);
2139 if (r)
2140 return r;
2141
2142 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2143 /* this will fail for cards that aren't VGA class devices, just
2144 * ignore it */
2145 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2146
2147 if (amdgpu_runtime_pm == 1)
2148 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04002149 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002150 runtime = true;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002151 if (!pci_is_thunderbolt_attached(adev->pdev))
2152 vga_switcheroo_register_client(adev->pdev,
2153 &amdgpu_switcheroo_ops, runtime);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002154 if (runtime)
2155 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2156
2157 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04002158 if (!amdgpu_get_bios(adev)) {
2159 r = -EINVAL;
2160 goto failed;
2161 }
Nils Wallméniusf7e9e9f2016-12-14 21:52:45 +01002162
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002163 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002164 if (r) {
2165 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002166 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002167 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002168 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002169
Monk Liu4e99a442016-03-31 13:26:59 +08002170 /* detect if we are with an SRIOV vbios */
2171 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04002172
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002173 /* Post card if necessary */
Monk Liubec86372016-09-14 19:38:08 +08002174 if (amdgpu_vpost_needed(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002175 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08002176 dev_err(adev->dev, "no vBIOS found\n");
Gavin Wan89041942017-06-23 13:55:15 -04002177 amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002178 r = -EINVAL;
2179 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002180 }
Monk Liubec86372016-09-14 19:38:08 +08002181 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08002182 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2183 if (r) {
2184 dev_err(adev->dev, "gpu post error!\n");
Gavin Wan89041942017-06-23 13:55:15 -04002185 amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
Monk Liu4e99a442016-03-31 13:26:59 +08002186 goto failed;
2187 }
2188 } else {
2189 DRM_INFO("GPU post is not needed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002190 }
2191
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002192 if (!adev->is_atom_fw) {
2193 /* Initialize clocks */
2194 r = amdgpu_atombios_get_clock_info(adev);
2195 if (r) {
2196 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002197 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2198 goto failed;
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002199 }
2200 /* init i2c buses */
2201 amdgpu_atombios_i2c_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002202 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002203
2204 /* Fence driver */
2205 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002206 if (r) {
2207 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002208 amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002209 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002210 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002211
2212 /* init the mode config */
2213 drm_mode_config_init(adev->ddev);
2214
2215 r = amdgpu_init(adev);
2216 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05002217 dev_err(adev->dev, "amdgpu_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002218 amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002219 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002220 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002221 }
2222
2223 adev->accel_working = true;
2224
Alex Xiee59c0202017-06-01 09:42:59 -04002225 amdgpu_vm_check_compute_bug(adev);
2226
Marek Olšák95844d22016-08-17 23:49:27 +02002227 /* Initialize the buffer migration limit. */
2228 if (amdgpu_moverate >= 0)
2229 max_MBps = amdgpu_moverate;
2230 else
2231 max_MBps = 8; /* Allow 8 MB/s. */
2232 /* Get a log2 for easy divisions. */
2233 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2234
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002235 r = amdgpu_ib_pool_init(adev);
2236 if (r) {
2237 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Gavin Wan89041942017-06-23 13:55:15 -04002238 amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002239 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002240 }
2241
2242 r = amdgpu_ib_ring_tests(adev);
2243 if (r)
2244 DRM_ERROR("ib ring test failed (%d).\n", r);
2245
Monk Liu9bc92b92017-02-08 17:38:13 +08002246 amdgpu_fbdev_init(adev);
2247
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002248 r = amdgpu_gem_debugfs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002249 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002250 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002251
2252 r = amdgpu_debugfs_regs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002253 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002254 DRM_ERROR("registering register debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002255
Huang Rui4f0955f2017-05-10 23:04:06 +08002256 r = amdgpu_debugfs_test_ib_ring_init(adev);
2257 if (r)
2258 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2259
Huang Rui50ab2532016-06-12 15:51:09 +08002260 r = amdgpu_debugfs_firmware_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002261 if (r)
Huang Rui50ab2532016-06-12 15:51:09 +08002262 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
Huang Rui50ab2532016-06-12 15:51:09 +08002263
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002264 if ((amdgpu_testing & 1)) {
2265 if (adev->accel_working)
2266 amdgpu_test_moves(adev);
2267 else
2268 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2269 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002270 if (amdgpu_benchmarking) {
2271 if (adev->accel_working)
2272 amdgpu_benchmark(adev, amdgpu_benchmarking);
2273 else
2274 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2275 }
2276
2277 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2278 * explicit gating rather than handling it automatically.
2279 */
2280 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002281 if (r) {
2282 dev_err(adev->dev, "amdgpu_late_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002283 amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002284 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002285 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002286
2287 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04002288
2289failed:
Gavin Wan89041942017-06-23 13:55:15 -04002290 amdgpu_vf_error_trans_all(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002291 if (runtime)
2292 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2293 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002294}
2295
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002296/**
2297 * amdgpu_device_fini - tear down the driver
2298 *
2299 * @adev: amdgpu_device pointer
2300 *
2301 * Tear down the driver info (all asics).
2302 * Called at driver shutdown.
2303 */
2304void amdgpu_device_fini(struct amdgpu_device *adev)
2305{
2306 int r;
2307
2308 DRM_INFO("amdgpu: finishing device.\n");
2309 adev->shutdown = true;
Pixel Dingdb2c2a92017-04-25 16:47:42 +08002310 if (adev->mode_info.mode_config_initialized)
2311 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002312 /* evict vram memory */
2313 amdgpu_bo_evict_vram(adev);
2314 amdgpu_ib_pool_fini(adev);
2315 amdgpu_fence_driver_fini(adev);
2316 amdgpu_fbdev_fini(adev);
2317 r = amdgpu_fini(adev);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08002318 if (adev->firmware.gpu_info_fw) {
2319 release_firmware(adev->firmware.gpu_info_fw);
2320 adev->firmware.gpu_info_fw = NULL;
2321 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002322 adev->accel_working = false;
Shirish S2dc80b02017-05-25 10:05:25 +05302323 cancel_delayed_work_sync(&adev->late_init_work);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002324 /* free i2c buses */
2325 amdgpu_i2c_fini(adev);
2326 amdgpu_atombios_fini(adev);
2327 kfree(adev->bios);
2328 adev->bios = NULL;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002329 if (!pci_is_thunderbolt_attached(adev->pdev))
2330 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002331 if (adev->flags & AMD_IS_PX)
2332 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002333 vga_client_register(adev->pdev, NULL, NULL, NULL);
2334 if (adev->rio_mem)
2335 pci_iounmap(adev->pdev, adev->rio_mem);
2336 adev->rio_mem = NULL;
2337 iounmap(adev->rmmio);
2338 adev->rmmio = NULL;
Ken Wangda69c1612016-01-21 19:08:55 +08002339 if (adev->asic_type >= CHIP_BONAIRE)
2340 amdgpu_doorbell_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002341 amdgpu_debugfs_regs_cleanup(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002342}
2343
2344
2345/*
2346 * Suspend & resume.
2347 */
2348/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002349 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002350 *
2351 * @pdev: drm dev pointer
2352 * @state: suspend state
2353 *
2354 * Puts the hw in the suspend state (all asics).
2355 * Returns 0 for success or an error on failure.
2356 * Called at driver suspend.
2357 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002358int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002359{
2360 struct amdgpu_device *adev;
2361 struct drm_crtc *crtc;
2362 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002363 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002364
2365 if (dev == NULL || dev->dev_private == NULL) {
2366 return -ENODEV;
2367 }
2368
2369 adev = dev->dev_private;
2370
2371 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2372 return 0;
2373
2374 drm_kms_helper_poll_disable(dev);
2375
2376 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002377 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002378 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2379 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2380 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002381 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002382
Alex Deucher756e6882015-10-08 00:03:36 -04002383 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002384 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04002385 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002386 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2387 struct amdgpu_bo *robj;
2388
Alex Deucher756e6882015-10-08 00:03:36 -04002389 if (amdgpu_crtc->cursor_bo) {
2390 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002391 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002392 if (r == 0) {
2393 amdgpu_bo_unpin(aobj);
2394 amdgpu_bo_unreserve(aobj);
2395 }
2396 }
2397
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002398 if (rfb == NULL || rfb->obj == NULL) {
2399 continue;
2400 }
2401 robj = gem_to_amdgpu_bo(rfb->obj);
2402 /* don't unpin kernel fb objects */
2403 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
Alex Xie7a6901d2017-04-24 13:52:41 -04002404 r = amdgpu_bo_reserve(robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002405 if (r == 0) {
2406 amdgpu_bo_unpin(robj);
2407 amdgpu_bo_unreserve(robj);
2408 }
2409 }
2410 }
2411 /* evict vram memory */
2412 amdgpu_bo_evict_vram(adev);
2413
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002414 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002415
2416 r = amdgpu_suspend(adev);
2417
Alex Deuchera0a71e42016-10-10 12:41:36 -04002418 /* evict remaining vram memory
2419 * This second call to evict vram is to evict the gart page table
2420 * using the CPU.
2421 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002422 amdgpu_bo_evict_vram(adev);
2423
Alex Deucherd05da0e2017-06-30 17:08:45 -04002424 amdgpu_atombios_scratch_regs_save(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002425 pci_save_state(dev->pdev);
2426 if (suspend) {
2427 /* Shut down the device */
2428 pci_disable_device(dev->pdev);
2429 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08002430 } else {
2431 r = amdgpu_asic_reset(adev);
2432 if (r)
2433 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002434 }
2435
2436 if (fbcon) {
2437 console_lock();
2438 amdgpu_fbdev_set_suspend(adev, 1);
2439 console_unlock();
2440 }
2441 return 0;
2442}
2443
2444/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002445 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002446 *
2447 * @pdev: drm dev pointer
2448 *
2449 * Bring the hw back to operating state (all asics).
2450 * Returns 0 for success or an error on failure.
2451 * Called at driver resume.
2452 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002453int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002454{
2455 struct drm_connector *connector;
2456 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04002457 struct drm_crtc *crtc;
Huang Rui03161a62017-04-13 16:12:26 +08002458 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002459
2460 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2461 return 0;
2462
jimqu74b0b152016-09-07 17:09:12 +08002463 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002464 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08002465
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002466 if (resume) {
2467 pci_set_power_state(dev->pdev, PCI_D0);
2468 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08002469 r = pci_enable_device(dev->pdev);
Huang Rui03161a62017-04-13 16:12:26 +08002470 if (r)
2471 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002472 }
Alex Deucherd05da0e2017-06-30 17:08:45 -04002473 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002474
2475 /* post card */
Jim Quc836fec2017-02-10 15:59:59 +08002476 if (amdgpu_need_post(adev)) {
jimqu74b0b152016-09-07 17:09:12 +08002477 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2478 if (r)
2479 DRM_ERROR("amdgpu asic init failed\n");
2480 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002481
2482 r = amdgpu_resume(adev);
Rex Zhue6707212017-03-30 13:21:01 +08002483 if (r) {
Flora Cuica198522016-02-04 15:10:08 +08002484 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Huang Rui03161a62017-04-13 16:12:26 +08002485 goto unlock;
Rex Zhue6707212017-03-30 13:21:01 +08002486 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002487 amdgpu_fence_driver_resume(adev);
2488
Flora Cuica198522016-02-04 15:10:08 +08002489 if (resume) {
2490 r = amdgpu_ib_ring_tests(adev);
2491 if (r)
2492 DRM_ERROR("ib ring test failed (%d).\n", r);
2493 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002494
2495 r = amdgpu_late_init(adev);
Huang Rui03161a62017-04-13 16:12:26 +08002496 if (r)
2497 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002498
Alex Deucher756e6882015-10-08 00:03:36 -04002499 /* pin cursors */
2500 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2501 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2502
2503 if (amdgpu_crtc->cursor_bo) {
2504 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002505 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002506 if (r == 0) {
2507 r = amdgpu_bo_pin(aobj,
2508 AMDGPU_GEM_DOMAIN_VRAM,
2509 &amdgpu_crtc->cursor_addr);
2510 if (r != 0)
2511 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2512 amdgpu_bo_unreserve(aobj);
2513 }
2514 }
2515 }
2516
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002517 /* blat the mode back in */
2518 if (fbcon) {
2519 drm_helper_resume_force_mode(dev);
2520 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002521 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002522 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2523 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2524 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002525 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002526 }
2527
2528 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002529
2530 /*
2531 * Most of the connector probing functions try to acquire runtime pm
2532 * refs to ensure that the GPU is powered on when connector polling is
2533 * performed. Since we're calling this from a runtime PM callback,
2534 * trying to acquire rpm refs will cause us to deadlock.
2535 *
2536 * Since we're guaranteed to be holding the rpm lock, it's safe to
2537 * temporarily disable the rpm helpers so this doesn't deadlock us.
2538 */
2539#ifdef CONFIG_PM
2540 dev->dev->power.disable_depth++;
2541#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05002542 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002543#ifdef CONFIG_PM
2544 dev->dev->power.disable_depth--;
2545#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002546
Huang Rui03161a62017-04-13 16:12:26 +08002547 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002548 amdgpu_fbdev_set_suspend(adev, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002549
Huang Rui03161a62017-04-13 16:12:26 +08002550unlock:
2551 if (fbcon)
2552 console_unlock();
2553
2554 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002555}
2556
Chunming Zhou63fbf422016-07-15 11:19:20 +08002557static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2558{
2559 int i;
2560 bool asic_hang = false;
2561
2562 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002563 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002564 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002565 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2566 adev->ip_blocks[i].status.hang =
2567 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2568 if (adev->ip_blocks[i].status.hang) {
2569 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
Chunming Zhou63fbf422016-07-15 11:19:20 +08002570 asic_hang = true;
2571 }
2572 }
2573 return asic_hang;
2574}
2575
Baoyou Xie4d446652016-09-18 22:09:35 +08002576static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002577{
2578 int i, r = 0;
2579
2580 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002581 if (!adev->ip_blocks[i].status.valid)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002582 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002583 if (adev->ip_blocks[i].status.hang &&
2584 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2585 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
Chunming Zhoud31a5012016-07-18 10:04:34 +08002586 if (r)
2587 return r;
2588 }
2589 }
2590
2591 return 0;
2592}
2593
Chunming Zhou35d782f2016-07-15 15:57:13 +08002594static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2595{
Alex Deucherda146d32016-10-13 16:07:03 -04002596 int i;
2597
2598 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002599 if (!adev->ip_blocks[i].status.valid)
Alex Deucherda146d32016-10-13 16:07:03 -04002600 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002601 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2602 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2603 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2604 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2605 if (adev->ip_blocks[i].status.hang) {
Alex Deucherda146d32016-10-13 16:07:03 -04002606 DRM_INFO("Some block need full reset!\n");
2607 return true;
2608 }
2609 }
Chunming Zhou35d782f2016-07-15 15:57:13 +08002610 }
2611 return false;
2612}
2613
2614static int amdgpu_soft_reset(struct amdgpu_device *adev)
2615{
2616 int i, r = 0;
2617
2618 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002619 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002620 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002621 if (adev->ip_blocks[i].status.hang &&
2622 adev->ip_blocks[i].version->funcs->soft_reset) {
2623 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002624 if (r)
2625 return r;
2626 }
2627 }
2628
2629 return 0;
2630}
2631
2632static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2633{
2634 int i, r = 0;
2635
2636 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002637 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002638 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002639 if (adev->ip_blocks[i].status.hang &&
2640 adev->ip_blocks[i].version->funcs->post_soft_reset)
2641 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002642 if (r)
2643 return r;
2644 }
2645
2646 return 0;
2647}
2648
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002649bool amdgpu_need_backup(struct amdgpu_device *adev)
2650{
2651 if (adev->flags & AMD_IS_APU)
2652 return false;
2653
2654 return amdgpu_lockup_timeout > 0 ? true : false;
2655}
2656
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002657static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2658 struct amdgpu_ring *ring,
2659 struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +01002660 struct dma_fence **fence)
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002661{
2662 uint32_t domain;
2663 int r;
2664
Roger.He23d2e502017-04-21 14:24:26 +08002665 if (!bo->shadow)
2666 return 0;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002667
Alex Xie1d284792017-04-24 13:53:04 -04002668 r = amdgpu_bo_reserve(bo, true);
Roger.He23d2e502017-04-21 14:24:26 +08002669 if (r)
2670 return r;
2671 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2672 /* if bo has been evicted, then no need to recover */
2673 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
Roger.He82521312017-04-21 13:08:43 +08002674 r = amdgpu_bo_validate(bo->shadow);
2675 if (r) {
2676 DRM_ERROR("bo validate failed!\n");
2677 goto err;
2678 }
2679
2680 r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
2681 if (r) {
2682 DRM_ERROR("%p bind failed\n", bo->shadow);
2683 goto err;
2684 }
2685
Roger.He23d2e502017-04-21 14:24:26 +08002686 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002687 NULL, fence, true);
Roger.He23d2e502017-04-21 14:24:26 +08002688 if (r) {
2689 DRM_ERROR("recover page table failed!\n");
2690 goto err;
2691 }
2692 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002693err:
Roger.He23d2e502017-04-21 14:24:26 +08002694 amdgpu_bo_unreserve(bo);
2695 return r;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002696}
2697
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002698/**
Monk Liua90ad3c2017-01-23 14:22:08 +08002699 * amdgpu_sriov_gpu_reset - reset the asic
2700 *
2701 * @adev: amdgpu device pointer
Monk Liu7225f872017-04-26 14:51:54 +08002702 * @job: which job trigger hang
Monk Liua90ad3c2017-01-23 14:22:08 +08002703 *
2704 * Attempt the reset the GPU if it has hung (all asics).
2705 * for SRIOV case.
2706 * Returns 0 for success or an error on failure.
2707 */
Monk Liu7225f872017-04-26 14:51:54 +08002708int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002709{
Monk Liu65781c72017-05-11 13:36:44 +08002710 int i, j, r = 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002711 int resched;
2712 struct amdgpu_bo *bo, *tmp;
2713 struct amdgpu_ring *ring;
2714 struct dma_fence *fence = NULL, *next = NULL;
2715
Monk Liu147b5982017-01-25 15:48:01 +08002716 mutex_lock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002717 atomic_inc(&adev->gpu_reset_counter);
Monk Liu1fb37a32017-01-26 15:36:37 +08002718 adev->gfx.in_reset = true;
Monk Liua90ad3c2017-01-23 14:22:08 +08002719
2720 /* block TTM */
2721 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2722
Monk Liu65781c72017-05-11 13:36:44 +08002723 /* we start from the ring trigger GPU hang */
2724 j = job ? job->ring->idx : 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002725
Monk Liu65781c72017-05-11 13:36:44 +08002726 /* block scheduler */
2727 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2728 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002729 if (!ring || !ring->sched.thread)
2730 continue;
2731
2732 kthread_park(ring->sched.thread);
Monk Liua90ad3c2017-01-23 14:22:08 +08002733
Monk Liu65781c72017-05-11 13:36:44 +08002734 if (job && j != i)
2735 continue;
2736
Monk Liu4f059ec2017-05-11 13:59:15 +08002737 /* here give the last chance to check if job removed from mirror-list
Monk Liu65781c72017-05-11 13:36:44 +08002738 * since we already pay some time on kthread_park */
Monk Liu4f059ec2017-05-11 13:59:15 +08002739 if (job && list_empty(&job->base.node)) {
Monk Liu65781c72017-05-11 13:36:44 +08002740 kthread_unpark(ring->sched.thread);
2741 goto give_up_reset;
2742 }
2743
2744 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2745 amd_sched_job_kickout(&job->base);
2746
2747 /* only do job_reset on the hang ring if @job not NULL */
2748 amd_sched_hw_job_reset(&ring->sched);
2749
2750 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2751 amdgpu_fence_driver_force_completion_ring(ring);
2752 }
Monk Liua90ad3c2017-01-23 14:22:08 +08002753
2754 /* request to take full control of GPU before re-initialization */
Monk Liu7225f872017-04-26 14:51:54 +08002755 if (job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002756 amdgpu_virt_reset_gpu(adev);
2757 else
2758 amdgpu_virt_request_full_gpu(adev, true);
2759
2760
2761 /* Resume IP prior to SMC */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002762 amdgpu_sriov_reinit_early(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002763
2764 /* we need recover gart prior to run SMC/CP/SDMA resume */
2765 amdgpu_ttm_recover_gart(adev);
2766
2767 /* now we are okay to resume SMC/CP/SDMA */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002768 amdgpu_sriov_reinit_late(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002769
2770 amdgpu_irq_gpu_reset_resume_helper(adev);
2771
2772 if (amdgpu_ib_ring_tests(adev))
2773 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2774
2775 /* release full control of GPU after ib test */
2776 amdgpu_virt_release_full_gpu(adev, true);
2777
2778 DRM_INFO("recover vram bo from shadow\n");
2779
2780 ring = adev->mman.buffer_funcs_ring;
2781 mutex_lock(&adev->shadow_list_lock);
2782 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002783 next = NULL;
Monk Liua90ad3c2017-01-23 14:22:08 +08002784 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2785 if (fence) {
2786 r = dma_fence_wait(fence, false);
2787 if (r) {
2788 WARN(r, "recovery from shadow isn't completed\n");
2789 break;
2790 }
2791 }
2792
2793 dma_fence_put(fence);
2794 fence = next;
2795 }
2796 mutex_unlock(&adev->shadow_list_lock);
2797
2798 if (fence) {
2799 r = dma_fence_wait(fence, false);
2800 if (r)
2801 WARN(r, "recovery from shadow isn't completed\n");
2802 }
2803 dma_fence_put(fence);
2804
Monk Liu65781c72017-05-11 13:36:44 +08002805 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2806 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002807 if (!ring || !ring->sched.thread)
2808 continue;
2809
Monk Liu65781c72017-05-11 13:36:44 +08002810 if (job && j != i) {
2811 kthread_unpark(ring->sched.thread);
2812 continue;
2813 }
2814
Monk Liua90ad3c2017-01-23 14:22:08 +08002815 amd_sched_job_recovery(&ring->sched);
2816 kthread_unpark(ring->sched.thread);
2817 }
2818
2819 drm_helper_resume_force_mode(adev->ddev);
Monk Liu65781c72017-05-11 13:36:44 +08002820give_up_reset:
Monk Liua90ad3c2017-01-23 14:22:08 +08002821 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2822 if (r) {
2823 /* bad news, how to tell it to userspace ? */
2824 dev_info(adev->dev, "GPU reset failed\n");
Monk Liu65781c72017-05-11 13:36:44 +08002825 } else {
2826 dev_info(adev->dev, "GPU reset successed!\n");
Monk Liua90ad3c2017-01-23 14:22:08 +08002827 }
2828
Monk Liu1fb37a32017-01-26 15:36:37 +08002829 adev->gfx.in_reset = false;
Monk Liu147b5982017-01-25 15:48:01 +08002830 mutex_unlock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002831 return r;
2832}
2833
2834/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002835 * amdgpu_gpu_reset - reset the asic
2836 *
2837 * @adev: amdgpu device pointer
2838 *
2839 * Attempt the reset the GPU if it has hung (all asics).
2840 * Returns 0 for success or an error on failure.
2841 */
2842int amdgpu_gpu_reset(struct amdgpu_device *adev)
2843{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002844 int i, r;
2845 int resched;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002846 bool need_full_reset, vram_lost = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002847
Chunming Zhou63fbf422016-07-15 11:19:20 +08002848 if (!amdgpu_check_soft_reset(adev)) {
2849 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2850 return 0;
2851 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002852
Marek Olšákd94aed52015-05-05 21:13:49 +02002853 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002854
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002855 /* block TTM */
2856 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2857
Chunming Zhou0875dc92016-06-12 15:41:58 +08002858 /* block scheduler */
2859 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2860 struct amdgpu_ring *ring = adev->rings[i];
2861
Chunming Zhou51687752017-04-24 17:09:15 +08002862 if (!ring || !ring->sched.thread)
Chunming Zhou0875dc92016-06-12 15:41:58 +08002863 continue;
2864 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002865 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002866 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002867 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2868 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002869
Chunming Zhou35d782f2016-07-15 15:57:13 +08002870 need_full_reset = amdgpu_need_full_reset(adev);
2871
2872 if (!need_full_reset) {
2873 amdgpu_pre_soft_reset(adev);
2874 r = amdgpu_soft_reset(adev);
2875 amdgpu_post_soft_reset(adev);
2876 if (r || amdgpu_check_soft_reset(adev)) {
2877 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2878 need_full_reset = true;
2879 }
2880 }
2881
2882 if (need_full_reset) {
Chunming Zhou35d782f2016-07-15 15:57:13 +08002883 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002884
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002885retry:
Alex Deucherd05da0e2017-06-30 17:08:45 -04002886 amdgpu_atombios_scratch_regs_save(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002887 r = amdgpu_asic_reset(adev);
Alex Deucherd05da0e2017-06-30 17:08:45 -04002888 amdgpu_atombios_scratch_regs_restore(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002889 /* post card */
2890 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002891
Chunming Zhou35d782f2016-07-15 15:57:13 +08002892 if (!r) {
2893 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
Chunming Zhoufcf06492017-05-05 10:33:33 +08002894 r = amdgpu_resume_phase1(adev);
2895 if (r)
2896 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002897 vram_lost = amdgpu_check_vram_lost(adev);
Chunming Zhouf1892132017-05-15 16:48:27 +08002898 if (vram_lost) {
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002899 DRM_ERROR("VRAM is lost!\n");
Chunming Zhouf1892132017-05-15 16:48:27 +08002900 atomic_inc(&adev->vram_lost_counter);
2901 }
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002902 r = amdgpu_ttm_recover_gart(adev);
2903 if (r)
Chunming Zhoufcf06492017-05-05 10:33:33 +08002904 goto out;
2905 r = amdgpu_resume_phase2(adev);
2906 if (r)
2907 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002908 if (vram_lost)
2909 amdgpu_fill_reset_magic(adev);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002910 }
Chunming Zhoufcf06492017-05-05 10:33:33 +08002911 }
2912out:
2913 if (!r) {
2914 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou1f465082016-06-30 15:02:26 +08002915 r = amdgpu_ib_ring_tests(adev);
2916 if (r) {
2917 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002918 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002919 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002920 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002921 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002922 /**
2923 * recovery vm page tables, since we cannot depend on VRAM is
2924 * consistent after gpu full reset.
2925 */
2926 if (need_full_reset && amdgpu_need_backup(adev)) {
2927 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2928 struct amdgpu_bo *bo, *tmp;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002929 struct dma_fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002930
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002931 DRM_INFO("recover vram bo from shadow\n");
2932 mutex_lock(&adev->shadow_list_lock);
2933 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002934 next = NULL;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002935 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2936 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002937 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002938 if (r) {
Monk Liu1d7b17b2017-01-22 18:52:56 +08002939 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002940 break;
2941 }
2942 }
2943
Chris Wilsonf54d1862016-10-25 13:00:45 +01002944 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002945 fence = next;
2946 }
2947 mutex_unlock(&adev->shadow_list_lock);
2948 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002949 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002950 if (r)
Monk Liu1d7b17b2017-01-22 18:52:56 +08002951 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002952 }
Chris Wilsonf54d1862016-10-25 13:00:45 +01002953 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002954 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002955 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2956 struct amdgpu_ring *ring = adev->rings[i];
Chunming Zhou51687752017-04-24 17:09:15 +08002957
2958 if (!ring || !ring->sched.thread)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002959 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002960
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002961 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002962 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002963 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002964 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08002965 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Gavin Wan89041942017-06-23 13:55:15 -04002966 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002967 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou51687752017-04-24 17:09:15 +08002968 if (adev->rings[i] && adev->rings[i]->sched.thread) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08002969 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002970 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002971 }
2972 }
2973
2974 drm_helper_resume_force_mode(adev->ddev);
2975
2976 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
Gavin Wan89041942017-06-23 13:55:15 -04002977 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002978 /* bad news, how to tell it to userspace ? */
2979 dev_info(adev->dev, "GPU reset failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002980 amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
2981 }
2982 else {
Chunming Zhou6643be62017-05-05 10:50:09 +08002983 dev_info(adev->dev, "GPU reset successed!\n");
Gavin Wan89041942017-06-23 13:55:15 -04002984 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002985
Gavin Wan89041942017-06-23 13:55:15 -04002986 amdgpu_vf_error_trans_all(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002987 return r;
2988}
2989
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002990void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2991{
2992 u32 mask;
2993 int ret;
2994
Alex Deuchercd474ba2016-02-04 10:21:23 -05002995 if (amdgpu_pcie_gen_cap)
2996 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2997
2998 if (amdgpu_pcie_lane_cap)
2999 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
3000
3001 /* covers APUs as well */
3002 if (pci_is_root_bus(adev->pdev->bus)) {
3003 if (adev->pm.pcie_gen_mask == 0)
3004 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3005 if (adev->pm.pcie_mlw_mask == 0)
3006 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003007 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003008 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05003009
3010 if (adev->pm.pcie_gen_mask == 0) {
3011 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3012 if (!ret) {
3013 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3014 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3015 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3016
3017 if (mask & DRM_PCIE_SPEED_25)
3018 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3019 if (mask & DRM_PCIE_SPEED_50)
3020 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3021 if (mask & DRM_PCIE_SPEED_80)
3022 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3023 } else {
3024 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3025 }
3026 }
3027 if (adev->pm.pcie_mlw_mask == 0) {
3028 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3029 if (!ret) {
3030 switch (mask) {
3031 case 32:
3032 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3033 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3034 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3035 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3036 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3037 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3038 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3039 break;
3040 case 16:
3041 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3042 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3043 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3044 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3045 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3046 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3047 break;
3048 case 12:
3049 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3050 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3051 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3052 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3053 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3054 break;
3055 case 8:
3056 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3057 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3058 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3059 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3060 break;
3061 case 4:
3062 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3063 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3064 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3065 break;
3066 case 2:
3067 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3068 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3069 break;
3070 case 1:
3071 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3072 break;
3073 default:
3074 break;
3075 }
3076 } else {
3077 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003078 }
3079 }
3080}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003081
3082/*
3083 * Debugfs
3084 */
3085int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04003086 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003087 unsigned nfiles)
3088{
3089 unsigned i;
3090
3091 for (i = 0; i < adev->debugfs_count; i++) {
3092 if (adev->debugfs[i].files == files) {
3093 /* Already registered */
3094 return 0;
3095 }
3096 }
3097
3098 i = adev->debugfs_count + 1;
3099 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3100 DRM_ERROR("Reached maximum number of debugfs components.\n");
3101 DRM_ERROR("Report so we increase "
3102 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3103 return -EINVAL;
3104 }
3105 adev->debugfs[adev->debugfs_count].files = files;
3106 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3107 adev->debugfs_count = i;
3108#if defined(CONFIG_DEBUG_FS)
3109 drm_debugfs_create_files(files, nfiles,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003110 adev->ddev->primary->debugfs_root,
3111 adev->ddev->primary);
3112#endif
3113 return 0;
3114}
3115
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003116#if defined(CONFIG_DEBUG_FS)
3117
3118static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3119 size_t size, loff_t *pos)
3120{
Al Viro45063092016-12-04 18:24:56 -05003121 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003122 ssize_t result = 0;
3123 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04003124 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04003125 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003126
3127 if (size & 0x3 || *pos & 0x3)
3128 return -EINVAL;
3129
Tom St Denisbd122672016-07-28 09:39:22 -04003130 /* are we reading registers for which a PG lock is necessary? */
3131 pm_pg_lock = (*pos >> 23) & 1;
3132
Tom St Denis566281592016-06-27 11:55:07 -04003133 if (*pos & (1ULL << 62)) {
3134 se_bank = (*pos >> 24) & 0x3FF;
3135 sh_bank = (*pos >> 34) & 0x3FF;
3136 instance_bank = (*pos >> 44) & 0x3FF;
Tom St Denis32977f92016-10-09 07:41:26 -04003137
3138 if (se_bank == 0x3FF)
3139 se_bank = 0xFFFFFFFF;
3140 if (sh_bank == 0x3FF)
3141 sh_bank = 0xFFFFFFFF;
3142 if (instance_bank == 0x3FF)
3143 instance_bank = 0xFFFFFFFF;
Tom St Denis566281592016-06-27 11:55:07 -04003144 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04003145 } else {
3146 use_bank = 0;
3147 }
3148
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003149 *pos &= (1UL << 22) - 1;
Tom St Denisbd122672016-07-28 09:39:22 -04003150
Tom St Denis566281592016-06-27 11:55:07 -04003151 if (use_bank) {
Tom St Denis32977f92016-10-09 07:41:26 -04003152 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3153 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
Tom St Denis566281592016-06-27 11:55:07 -04003154 return -EINVAL;
3155 mutex_lock(&adev->grbm_idx_mutex);
3156 amdgpu_gfx_select_se_sh(adev, se_bank,
3157 sh_bank, instance_bank);
3158 }
3159
Tom St Denisbd122672016-07-28 09:39:22 -04003160 if (pm_pg_lock)
3161 mutex_lock(&adev->pm.mutex);
3162
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003163 while (size) {
3164 uint32_t value;
3165
3166 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04003167 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003168
3169 value = RREG32(*pos >> 2);
3170 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04003171 if (r) {
3172 result = r;
3173 goto end;
3174 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003175
3176 result += 4;
3177 buf += 4;
3178 *pos += 4;
3179 size -= 4;
3180 }
3181
Tom St Denis566281592016-06-27 11:55:07 -04003182end:
3183 if (use_bank) {
3184 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3185 mutex_unlock(&adev->grbm_idx_mutex);
3186 }
3187
Tom St Denisbd122672016-07-28 09:39:22 -04003188 if (pm_pg_lock)
3189 mutex_unlock(&adev->pm.mutex);
3190
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003191 return result;
3192}
3193
3194static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3195 size_t size, loff_t *pos)
3196{
Al Viro45063092016-12-04 18:24:56 -05003197 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003198 ssize_t result = 0;
3199 int r;
Tom St Denis394fdde2016-10-10 07:31:23 -04003200 bool pm_pg_lock, use_bank;
3201 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003202
3203 if (size & 0x3 || *pos & 0x3)
3204 return -EINVAL;
3205
Tom St Denis394fdde2016-10-10 07:31:23 -04003206 /* are we reading registers for which a PG lock is necessary? */
3207 pm_pg_lock = (*pos >> 23) & 1;
3208
3209 if (*pos & (1ULL << 62)) {
3210 se_bank = (*pos >> 24) & 0x3FF;
3211 sh_bank = (*pos >> 34) & 0x3FF;
3212 instance_bank = (*pos >> 44) & 0x3FF;
3213
3214 if (se_bank == 0x3FF)
3215 se_bank = 0xFFFFFFFF;
3216 if (sh_bank == 0x3FF)
3217 sh_bank = 0xFFFFFFFF;
3218 if (instance_bank == 0x3FF)
3219 instance_bank = 0xFFFFFFFF;
3220 use_bank = 1;
3221 } else {
3222 use_bank = 0;
3223 }
3224
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003225 *pos &= (1UL << 22) - 1;
Tom St Denis394fdde2016-10-10 07:31:23 -04003226
3227 if (use_bank) {
3228 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3229 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3230 return -EINVAL;
3231 mutex_lock(&adev->grbm_idx_mutex);
3232 amdgpu_gfx_select_se_sh(adev, se_bank,
3233 sh_bank, instance_bank);
3234 }
3235
3236 if (pm_pg_lock)
3237 mutex_lock(&adev->pm.mutex);
3238
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003239 while (size) {
3240 uint32_t value;
3241
3242 if (*pos > adev->rmmio_size)
3243 return result;
3244
3245 r = get_user(value, (uint32_t *)buf);
3246 if (r)
3247 return r;
3248
3249 WREG32(*pos >> 2, value);
3250
3251 result += 4;
3252 buf += 4;
3253 *pos += 4;
3254 size -= 4;
3255 }
3256
Tom St Denis394fdde2016-10-10 07:31:23 -04003257 if (use_bank) {
3258 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3259 mutex_unlock(&adev->grbm_idx_mutex);
3260 }
3261
3262 if (pm_pg_lock)
3263 mutex_unlock(&adev->pm.mutex);
3264
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003265 return result;
3266}
3267
Tom St Denisadcec282016-04-15 13:08:44 -04003268static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3269 size_t size, loff_t *pos)
3270{
Al Viro45063092016-12-04 18:24:56 -05003271 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003272 ssize_t result = 0;
3273 int r;
3274
3275 if (size & 0x3 || *pos & 0x3)
3276 return -EINVAL;
3277
3278 while (size) {
3279 uint32_t value;
3280
3281 value = RREG32_PCIE(*pos >> 2);
3282 r = put_user(value, (uint32_t *)buf);
3283 if (r)
3284 return r;
3285
3286 result += 4;
3287 buf += 4;
3288 *pos += 4;
3289 size -= 4;
3290 }
3291
3292 return result;
3293}
3294
3295static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3296 size_t size, loff_t *pos)
3297{
Al Viro45063092016-12-04 18:24:56 -05003298 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003299 ssize_t result = 0;
3300 int r;
3301
3302 if (size & 0x3 || *pos & 0x3)
3303 return -EINVAL;
3304
3305 while (size) {
3306 uint32_t value;
3307
3308 r = get_user(value, (uint32_t *)buf);
3309 if (r)
3310 return r;
3311
3312 WREG32_PCIE(*pos >> 2, value);
3313
3314 result += 4;
3315 buf += 4;
3316 *pos += 4;
3317 size -= 4;
3318 }
3319
3320 return result;
3321}
3322
3323static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3324 size_t size, loff_t *pos)
3325{
Al Viro45063092016-12-04 18:24:56 -05003326 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003327 ssize_t result = 0;
3328 int r;
3329
3330 if (size & 0x3 || *pos & 0x3)
3331 return -EINVAL;
3332
3333 while (size) {
3334 uint32_t value;
3335
3336 value = RREG32_DIDT(*pos >> 2);
3337 r = put_user(value, (uint32_t *)buf);
3338 if (r)
3339 return r;
3340
3341 result += 4;
3342 buf += 4;
3343 *pos += 4;
3344 size -= 4;
3345 }
3346
3347 return result;
3348}
3349
3350static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3351 size_t size, loff_t *pos)
3352{
Al Viro45063092016-12-04 18:24:56 -05003353 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003354 ssize_t result = 0;
3355 int r;
3356
3357 if (size & 0x3 || *pos & 0x3)
3358 return -EINVAL;
3359
3360 while (size) {
3361 uint32_t value;
3362
3363 r = get_user(value, (uint32_t *)buf);
3364 if (r)
3365 return r;
3366
3367 WREG32_DIDT(*pos >> 2, value);
3368
3369 result += 4;
3370 buf += 4;
3371 *pos += 4;
3372 size -= 4;
3373 }
3374
3375 return result;
3376}
3377
3378static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3379 size_t size, loff_t *pos)
3380{
Al Viro45063092016-12-04 18:24:56 -05003381 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003382 ssize_t result = 0;
3383 int r;
3384
3385 if (size & 0x3 || *pos & 0x3)
3386 return -EINVAL;
3387
3388 while (size) {
3389 uint32_t value;
3390
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003391 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04003392 r = put_user(value, (uint32_t *)buf);
3393 if (r)
3394 return r;
3395
3396 result += 4;
3397 buf += 4;
3398 *pos += 4;
3399 size -= 4;
3400 }
3401
3402 return result;
3403}
3404
3405static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3406 size_t size, loff_t *pos)
3407{
Al Viro45063092016-12-04 18:24:56 -05003408 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003409 ssize_t result = 0;
3410 int r;
3411
3412 if (size & 0x3 || *pos & 0x3)
3413 return -EINVAL;
3414
3415 while (size) {
3416 uint32_t value;
3417
3418 r = get_user(value, (uint32_t *)buf);
3419 if (r)
3420 return r;
3421
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003422 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04003423
3424 result += 4;
3425 buf += 4;
3426 *pos += 4;
3427 size -= 4;
3428 }
3429
3430 return result;
3431}
3432
Tom St Denis1e051412016-06-27 09:57:18 -04003433static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3434 size_t size, loff_t *pos)
3435{
Al Viro45063092016-12-04 18:24:56 -05003436 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis1e051412016-06-27 09:57:18 -04003437 ssize_t result = 0;
3438 int r;
3439 uint32_t *config, no_regs = 0;
3440
3441 if (size & 0x3 || *pos & 0x3)
3442 return -EINVAL;
3443
Markus Elfringecab7662016-09-18 17:00:52 +02003444 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
Tom St Denis1e051412016-06-27 09:57:18 -04003445 if (!config)
3446 return -ENOMEM;
3447
3448 /* version, increment each time something is added */
Tom St Denis9a999352017-01-18 13:01:25 -05003449 config[no_regs++] = 3;
Tom St Denis1e051412016-06-27 09:57:18 -04003450 config[no_regs++] = adev->gfx.config.max_shader_engines;
3451 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3452 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3453 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3454 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3455 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3456 config[no_regs++] = adev->gfx.config.max_gprs;
3457 config[no_regs++] = adev->gfx.config.max_gs_threads;
3458 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3459 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3460 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3461 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3462 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3463 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3464 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3465 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3466 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3467 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3468 config[no_regs++] = adev->gfx.config.num_gpus;
3469 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3470 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3471 config[no_regs++] = adev->gfx.config.gb_addr_config;
3472 config[no_regs++] = adev->gfx.config.num_rbs;
3473
Tom St Denis89a8f302016-08-12 15:14:31 -04003474 /* rev==1 */
3475 config[no_regs++] = adev->rev_id;
3476 config[no_regs++] = adev->pg_flags;
3477 config[no_regs++] = adev->cg_flags;
3478
Tom St Denise9f11dc2016-08-17 12:00:51 -04003479 /* rev==2 */
3480 config[no_regs++] = adev->family;
3481 config[no_regs++] = adev->external_rev_id;
3482
Tom St Denis9a999352017-01-18 13:01:25 -05003483 /* rev==3 */
3484 config[no_regs++] = adev->pdev->device;
3485 config[no_regs++] = adev->pdev->revision;
3486 config[no_regs++] = adev->pdev->subsystem_device;
3487 config[no_regs++] = adev->pdev->subsystem_vendor;
3488
Tom St Denis1e051412016-06-27 09:57:18 -04003489 while (size && (*pos < no_regs * 4)) {
3490 uint32_t value;
3491
3492 value = config[*pos >> 2];
3493 r = put_user(value, (uint32_t *)buf);
3494 if (r) {
3495 kfree(config);
3496 return r;
3497 }
3498
3499 result += 4;
3500 buf += 4;
3501 *pos += 4;
3502 size -= 4;
3503 }
3504
3505 kfree(config);
3506 return result;
3507}
3508
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003509static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3510 size_t size, loff_t *pos)
3511{
Al Viro45063092016-12-04 18:24:56 -05003512 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003513 int idx, x, outsize, r, valuesize;
3514 uint32_t values[16];
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003515
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003516 if (size & 3 || *pos & 0x3)
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003517 return -EINVAL;
3518
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003519 if (amdgpu_dpm == 0)
3520 return -EINVAL;
3521
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003522 /* convert offset to sensor number */
3523 idx = *pos >> 2;
3524
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003525 valuesize = sizeof(values);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003526 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003527 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003528 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3529 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3530 &valuesize);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003531 else
3532 return -EINVAL;
3533
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003534 if (size > valuesize)
3535 return -EINVAL;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003536
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003537 outsize = 0;
3538 x = 0;
3539 if (!r) {
3540 while (size) {
3541 r = put_user(values[x++], (int32_t *)buf);
3542 buf += 4;
3543 size -= 4;
3544 outsize += 4;
3545 }
3546 }
3547
3548 return !r ? outsize : r;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003549}
Tom St Denis1e051412016-06-27 09:57:18 -04003550
Tom St Denis273d7aa2016-10-11 14:48:55 -04003551static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3552 size_t size, loff_t *pos)
3553{
3554 struct amdgpu_device *adev = f->f_inode->i_private;
3555 int r, x;
3556 ssize_t result=0;
Tom St Denis472259f2016-10-14 09:49:09 -04003557 uint32_t offset, se, sh, cu, wave, simd, data[32];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003558
3559 if (size & 3 || *pos & 3)
3560 return -EINVAL;
3561
3562 /* decode offset */
3563 offset = (*pos & 0x7F);
3564 se = ((*pos >> 7) & 0xFF);
3565 sh = ((*pos >> 15) & 0xFF);
3566 cu = ((*pos >> 23) & 0xFF);
3567 wave = ((*pos >> 31) & 0xFF);
3568 simd = ((*pos >> 37) & 0xFF);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003569
3570 /* switch to the specific se/sh/cu */
3571 mutex_lock(&adev->grbm_idx_mutex);
3572 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3573
3574 x = 0;
Tom St Denis472259f2016-10-14 09:49:09 -04003575 if (adev->gfx.funcs->read_wave_data)
3576 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003577
3578 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3579 mutex_unlock(&adev->grbm_idx_mutex);
3580
Tom St Denis5ecfb3b2016-10-13 12:15:03 -04003581 if (!x)
3582 return -EINVAL;
3583
Tom St Denis472259f2016-10-14 09:49:09 -04003584 while (size && (offset < x * 4)) {
Tom St Denis273d7aa2016-10-11 14:48:55 -04003585 uint32_t value;
3586
Tom St Denis472259f2016-10-14 09:49:09 -04003587 value = data[offset >> 2];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003588 r = put_user(value, (uint32_t *)buf);
3589 if (r)
3590 return r;
3591
3592 result += 4;
3593 buf += 4;
Tom St Denis472259f2016-10-14 09:49:09 -04003594 offset += 4;
Tom St Denis273d7aa2016-10-11 14:48:55 -04003595 size -= 4;
3596 }
3597
3598 return result;
3599}
3600
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003601static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3602 size_t size, loff_t *pos)
3603{
3604 struct amdgpu_device *adev = f->f_inode->i_private;
3605 int r;
3606 ssize_t result = 0;
3607 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3608
3609 if (size & 3 || *pos & 3)
3610 return -EINVAL;
3611
3612 /* decode offset */
3613 offset = (*pos & 0xFFF); /* in dwords */
3614 se = ((*pos >> 12) & 0xFF);
3615 sh = ((*pos >> 20) & 0xFF);
3616 cu = ((*pos >> 28) & 0xFF);
3617 wave = ((*pos >> 36) & 0xFF);
3618 simd = ((*pos >> 44) & 0xFF);
3619 thread = ((*pos >> 52) & 0xFF);
3620 bank = ((*pos >> 60) & 1);
3621
3622 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3623 if (!data)
3624 return -ENOMEM;
3625
3626 /* switch to the specific se/sh/cu */
3627 mutex_lock(&adev->grbm_idx_mutex);
3628 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3629
3630 if (bank == 0) {
3631 if (adev->gfx.funcs->read_wave_vgprs)
3632 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3633 } else {
3634 if (adev->gfx.funcs->read_wave_sgprs)
3635 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3636 }
3637
3638 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3639 mutex_unlock(&adev->grbm_idx_mutex);
3640
3641 while (size) {
3642 uint32_t value;
3643
3644 value = data[offset++];
3645 r = put_user(value, (uint32_t *)buf);
3646 if (r) {
3647 result = r;
3648 goto err;
3649 }
3650
3651 result += 4;
3652 buf += 4;
3653 size -= 4;
3654 }
3655
3656err:
3657 kfree(data);
3658 return result;
3659}
3660
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003661static const struct file_operations amdgpu_debugfs_regs_fops = {
3662 .owner = THIS_MODULE,
3663 .read = amdgpu_debugfs_regs_read,
3664 .write = amdgpu_debugfs_regs_write,
3665 .llseek = default_llseek
3666};
Tom St Denisadcec282016-04-15 13:08:44 -04003667static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3668 .owner = THIS_MODULE,
3669 .read = amdgpu_debugfs_regs_didt_read,
3670 .write = amdgpu_debugfs_regs_didt_write,
3671 .llseek = default_llseek
3672};
3673static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3674 .owner = THIS_MODULE,
3675 .read = amdgpu_debugfs_regs_pcie_read,
3676 .write = amdgpu_debugfs_regs_pcie_write,
3677 .llseek = default_llseek
3678};
3679static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3680 .owner = THIS_MODULE,
3681 .read = amdgpu_debugfs_regs_smc_read,
3682 .write = amdgpu_debugfs_regs_smc_write,
3683 .llseek = default_llseek
3684};
3685
Tom St Denis1e051412016-06-27 09:57:18 -04003686static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3687 .owner = THIS_MODULE,
3688 .read = amdgpu_debugfs_gca_config_read,
3689 .llseek = default_llseek
3690};
3691
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003692static const struct file_operations amdgpu_debugfs_sensors_fops = {
3693 .owner = THIS_MODULE,
3694 .read = amdgpu_debugfs_sensor_read,
3695 .llseek = default_llseek
3696};
3697
Tom St Denis273d7aa2016-10-11 14:48:55 -04003698static const struct file_operations amdgpu_debugfs_wave_fops = {
3699 .owner = THIS_MODULE,
3700 .read = amdgpu_debugfs_wave_read,
3701 .llseek = default_llseek
3702};
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003703static const struct file_operations amdgpu_debugfs_gpr_fops = {
3704 .owner = THIS_MODULE,
3705 .read = amdgpu_debugfs_gpr_read,
3706 .llseek = default_llseek
3707};
Tom St Denis273d7aa2016-10-11 14:48:55 -04003708
Tom St Denisadcec282016-04-15 13:08:44 -04003709static const struct file_operations *debugfs_regs[] = {
3710 &amdgpu_debugfs_regs_fops,
3711 &amdgpu_debugfs_regs_didt_fops,
3712 &amdgpu_debugfs_regs_pcie_fops,
3713 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04003714 &amdgpu_debugfs_gca_config_fops,
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003715 &amdgpu_debugfs_sensors_fops,
Tom St Denis273d7aa2016-10-11 14:48:55 -04003716 &amdgpu_debugfs_wave_fops,
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003717 &amdgpu_debugfs_gpr_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04003718};
3719
3720static const char *debugfs_regs_names[] = {
3721 "amdgpu_regs",
3722 "amdgpu_regs_didt",
3723 "amdgpu_regs_pcie",
3724 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04003725 "amdgpu_gca_config",
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003726 "amdgpu_sensors",
Tom St Denis273d7aa2016-10-11 14:48:55 -04003727 "amdgpu_wave",
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003728 "amdgpu_gpr",
Tom St Denisadcec282016-04-15 13:08:44 -04003729};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003730
3731static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3732{
3733 struct drm_minor *minor = adev->ddev->primary;
3734 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04003735 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003736
Tom St Denisadcec282016-04-15 13:08:44 -04003737 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3738 ent = debugfs_create_file(debugfs_regs_names[i],
3739 S_IFREG | S_IRUGO, root,
3740 adev, debugfs_regs[i]);
3741 if (IS_ERR(ent)) {
3742 for (j = 0; j < i; j++) {
3743 debugfs_remove(adev->debugfs_regs[i]);
3744 adev->debugfs_regs[i] = NULL;
3745 }
3746 return PTR_ERR(ent);
3747 }
3748
3749 if (!i)
3750 i_size_write(ent->d_inode, adev->rmmio_size);
3751 adev->debugfs_regs[i] = ent;
3752 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003753
3754 return 0;
3755}
3756
3757static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3758{
Tom St Denisadcec282016-04-15 13:08:44 -04003759 unsigned i;
3760
3761 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3762 if (adev->debugfs_regs[i]) {
3763 debugfs_remove(adev->debugfs_regs[i]);
3764 adev->debugfs_regs[i] = NULL;
3765 }
3766 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003767}
3768
Huang Rui4f0955f2017-05-10 23:04:06 +08003769static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3770{
3771 struct drm_info_node *node = (struct drm_info_node *) m->private;
3772 struct drm_device *dev = node->minor->dev;
3773 struct amdgpu_device *adev = dev->dev_private;
3774 int r = 0, i;
3775
3776 /* hold on the scheduler */
3777 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3778 struct amdgpu_ring *ring = adev->rings[i];
3779
3780 if (!ring || !ring->sched.thread)
3781 continue;
3782 kthread_park(ring->sched.thread);
3783 }
3784
3785 seq_printf(m, "run ib test:\n");
3786 r = amdgpu_ib_ring_tests(adev);
3787 if (r)
3788 seq_printf(m, "ib ring tests failed (%d).\n", r);
3789 else
3790 seq_printf(m, "ib ring tests passed.\n");
3791
3792 /* go on the scheduler */
3793 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3794 struct amdgpu_ring *ring = adev->rings[i];
3795
3796 if (!ring || !ring->sched.thread)
3797 continue;
3798 kthread_unpark(ring->sched.thread);
3799 }
3800
3801 return 0;
3802}
3803
3804static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3805 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3806};
3807
3808static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3809{
3810 return amdgpu_debugfs_add_files(adev,
3811 amdgpu_debugfs_test_ib_ring_list, 1);
3812}
3813
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003814int amdgpu_debugfs_init(struct drm_minor *minor)
3815{
3816 return 0;
3817}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003818#else
Arnd Bergmann27bad5b2017-06-21 23:51:02 +02003819static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
Huang Rui4f0955f2017-05-10 23:04:06 +08003820{
3821 return 0;
3822}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003823static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3824{
3825 return 0;
3826}
3827static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003828#endif