blob: 7b3e3b5461c33ce7b12ad658c57343c48d20c8cf [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deuchera5bde2f2016-09-23 16:23:41 -040043#include "amdgpu_atomfirmware.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050044#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080045#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040048#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040051#include "vi.h"
Ken Wang460826e2017-03-06 14:53:16 -050052#include "soc15.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080054#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080055#include <linux/firmware.h>
Gavin Wan89041942017-06-23 13:55:15 -040056#include "amdgpu_vf_error.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040057
Yong Zhaoba997702015-11-09 17:21:45 -050058#include "amdgpu_amdkfd.h"
Rex Zhud2f52ac2017-09-22 17:47:27 +080059#include "amdgpu_pm.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040060
Alex Deuchere2a75f82017-04-27 16:58:01 -040061MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
Alex Deucher2d2e5e72017-05-09 12:27:35 -040062MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
Alex Deuchere2a75f82017-04-27 16:58:01 -040063
Shirish S2dc80b02017-05-25 10:05:25 +053064#define AMDGPU_RESUME_MS 2000
65
Alex Deucherd38ceaf2015-04-20 16:55:21 -040066static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
67static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
Huang Rui4f0955f2017-05-10 23:04:06 +080068static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
Kent Russelldb95e212017-08-22 12:31:43 -040069static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040070
71static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080072 "TAHITI",
73 "PITCAIRN",
74 "VERDE",
75 "OLAND",
76 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040077 "BONAIRE",
78 "KAVERI",
79 "KABINI",
80 "HAWAII",
81 "MULLINS",
82 "TOPAZ",
83 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080084 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040085 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040086 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040087 "POLARIS10",
88 "POLARIS11",
Junwei Zhangc4642a42016-12-14 15:32:28 -050089 "POLARIS12",
Ken Wangd4196f02016-03-09 09:28:32 +080090 "VEGA10",
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +080091 "RAVEN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040092 "LAST",
93};
94
95bool amdgpu_device_is_px(struct drm_device *dev)
96{
97 struct amdgpu_device *adev = dev->dev_private;
98
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080099 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400100 return true;
101 return false;
102}
103
104/*
105 * MMIO register access helper functions.
106 */
107uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
Monk Liu15d72fd2017-01-25 15:07:40 +0800108 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400109{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400110 uint32_t ret;
111
Monk Liu15d72fd2017-01-25 15:07:40 +0800112 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800113 BUG_ON(in_interrupt());
114 return amdgpu_virt_kiq_rreg(adev, reg);
115 }
116
Monk Liu15d72fd2017-01-25 15:07:40 +0800117 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Tom St Denisf4b373f2016-05-31 08:02:27 -0400118 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400119 else {
120 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400121
122 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
123 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
124 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
125 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400126 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400127 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
128 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400129}
130
131void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
Monk Liu15d72fd2017-01-25 15:07:40 +0800132 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400133{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400134 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800135
Ken Wang47ed4e12017-07-04 13:11:52 +0800136 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
137 adev->last_mm_index = v;
138 }
139
Monk Liu15d72fd2017-01-25 15:07:40 +0800140 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800141 BUG_ON(in_interrupt());
142 return amdgpu_virt_kiq_wreg(adev, reg, v);
143 }
144
Monk Liu15d72fd2017-01-25 15:07:40 +0800145 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400146 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
147 else {
148 unsigned long flags;
149
150 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
151 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
152 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
153 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
154 }
Ken Wang47ed4e12017-07-04 13:11:52 +0800155
156 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
157 udelay(500);
158 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400159}
160
161u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
162{
163 if ((reg * 4) < adev->rio_mem_size)
164 return ioread32(adev->rio_mem + (reg * 4));
165 else {
166 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
167 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
168 }
169}
170
171void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
172{
Ken Wang47ed4e12017-07-04 13:11:52 +0800173 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
174 adev->last_mm_index = v;
175 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400176
177 if ((reg * 4) < adev->rio_mem_size)
178 iowrite32(v, adev->rio_mem + (reg * 4));
179 else {
180 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
181 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
182 }
Ken Wang47ed4e12017-07-04 13:11:52 +0800183
184 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
185 udelay(500);
186 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400187}
188
189/**
190 * amdgpu_mm_rdoorbell - read a doorbell dword
191 *
192 * @adev: amdgpu_device pointer
193 * @index: doorbell index
194 *
195 * Returns the value in the doorbell aperture at the
196 * requested doorbell index (CIK).
197 */
198u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
199{
200 if (index < adev->doorbell.num_doorbells) {
201 return readl(adev->doorbell.ptr + index);
202 } else {
203 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
204 return 0;
205 }
206}
207
208/**
209 * amdgpu_mm_wdoorbell - write a doorbell dword
210 *
211 * @adev: amdgpu_device pointer
212 * @index: doorbell index
213 * @v: value to write
214 *
215 * Writes @v to the doorbell aperture at the
216 * requested doorbell index (CIK).
217 */
218void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
219{
220 if (index < adev->doorbell.num_doorbells) {
221 writel(v, adev->doorbell.ptr + index);
222 } else {
223 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
224 }
225}
226
227/**
Ken Wang832be402016-03-18 15:23:08 +0800228 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
229 *
230 * @adev: amdgpu_device pointer
231 * @index: doorbell index
232 *
233 * Returns the value in the doorbell aperture at the
234 * requested doorbell index (VEGA10+).
235 */
236u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
237{
238 if (index < adev->doorbell.num_doorbells) {
239 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
240 } else {
241 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
242 return 0;
243 }
244}
245
246/**
247 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
248 *
249 * @adev: amdgpu_device pointer
250 * @index: doorbell index
251 * @v: value to write
252 *
253 * Writes @v to the doorbell aperture at the
254 * requested doorbell index (VEGA10+).
255 */
256void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
257{
258 if (index < adev->doorbell.num_doorbells) {
259 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
260 } else {
261 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
262 }
263}
264
265/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400266 * amdgpu_invalid_rreg - dummy reg read function
267 *
268 * @adev: amdgpu device pointer
269 * @reg: offset of register
270 *
271 * Dummy register read function. Used for register blocks
272 * that certain asics don't have (all asics).
273 * Returns the value in the register.
274 */
275static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
276{
277 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
278 BUG();
279 return 0;
280}
281
282/**
283 * amdgpu_invalid_wreg - dummy reg write function
284 *
285 * @adev: amdgpu device pointer
286 * @reg: offset of register
287 * @v: value to write to the register
288 *
289 * Dummy register read function. Used for register blocks
290 * that certain asics don't have (all asics).
291 */
292static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
293{
294 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
295 reg, v);
296 BUG();
297}
298
299/**
300 * amdgpu_block_invalid_rreg - dummy reg read function
301 *
302 * @adev: amdgpu device pointer
303 * @block: offset of instance
304 * @reg: offset of register
305 *
306 * Dummy register read function. Used for register blocks
307 * that certain asics don't have (all asics).
308 * Returns the value in the register.
309 */
310static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
311 uint32_t block, uint32_t reg)
312{
313 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
314 reg, block);
315 BUG();
316 return 0;
317}
318
319/**
320 * amdgpu_block_invalid_wreg - dummy reg write function
321 *
322 * @adev: amdgpu device pointer
323 * @block: offset of instance
324 * @reg: offset of register
325 * @v: value to write to the register
326 *
327 * Dummy register read function. Used for register blocks
328 * that certain asics don't have (all asics).
329 */
330static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
331 uint32_t block,
332 uint32_t reg, uint32_t v)
333{
334 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
335 reg, block, v);
336 BUG();
337}
338
339static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
340{
Christian Königa4a02772017-07-27 17:24:36 +0200341 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
342 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
343 &adev->vram_scratch.robj,
344 &adev->vram_scratch.gpu_addr,
345 (void **)&adev->vram_scratch.ptr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400346}
347
348static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
349{
Christian König078af1a2017-07-27 17:43:00 +0200350 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400351}
352
353/**
354 * amdgpu_program_register_sequence - program an array of registers.
355 *
356 * @adev: amdgpu_device pointer
357 * @registers: pointer to the register array
358 * @array_size: size of the register array
359 *
360 * Programs an array or registers with and and or masks.
361 * This is a helper for setting golden registers.
362 */
363void amdgpu_program_register_sequence(struct amdgpu_device *adev,
364 const u32 *registers,
365 const u32 array_size)
366{
367 u32 tmp, reg, and_mask, or_mask;
368 int i;
369
370 if (array_size % 3)
371 return;
372
373 for (i = 0; i < array_size; i +=3) {
374 reg = registers[i + 0];
375 and_mask = registers[i + 1];
376 or_mask = registers[i + 2];
377
378 if (and_mask == 0xffffffff) {
379 tmp = or_mask;
380 } else {
381 tmp = RREG32(reg);
382 tmp &= ~and_mask;
383 tmp |= or_mask;
384 }
385 WREG32(reg, tmp);
386 }
387}
388
389void amdgpu_pci_config_reset(struct amdgpu_device *adev)
390{
391 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
392}
393
394/*
395 * GPU doorbell aperture helpers function.
396 */
397/**
398 * amdgpu_doorbell_init - Init doorbell driver information.
399 *
400 * @adev: amdgpu_device pointer
401 *
402 * Init doorbell driver information (CIK)
403 * Returns 0 on success, error on failure.
404 */
405static int amdgpu_doorbell_init(struct amdgpu_device *adev)
406{
Christian König705e5192017-06-08 11:15:16 +0200407 /* No doorbell on SI hardware generation */
408 if (adev->asic_type < CHIP_BONAIRE) {
409 adev->doorbell.base = 0;
410 adev->doorbell.size = 0;
411 adev->doorbell.num_doorbells = 0;
412 adev->doorbell.ptr = NULL;
413 return 0;
414 }
415
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400416 /* doorbell bar mapping */
417 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
418 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
419
Christian Königedf600d2016-05-03 15:54:54 +0200420 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400421 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
422 if (adev->doorbell.num_doorbells == 0)
423 return -EINVAL;
424
Christian König8972e5d2017-03-06 13:34:57 +0100425 adev->doorbell.ptr = ioremap(adev->doorbell.base,
426 adev->doorbell.num_doorbells *
427 sizeof(u32));
428 if (adev->doorbell.ptr == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400429 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400430
431 return 0;
432}
433
434/**
435 * amdgpu_doorbell_fini - Tear down doorbell driver information.
436 *
437 * @adev: amdgpu_device pointer
438 *
439 * Tear down doorbell driver information (CIK)
440 */
441static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
442{
443 iounmap(adev->doorbell.ptr);
444 adev->doorbell.ptr = NULL;
445}
446
447/**
448 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
449 * setup amdkfd
450 *
451 * @adev: amdgpu_device pointer
452 * @aperture_base: output returning doorbell aperture base physical address
453 * @aperture_size: output returning doorbell aperture size in bytes
454 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
455 *
456 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
457 * takes doorbells required for its own rings and reports the setup to amdkfd.
458 * amdgpu reserved doorbells are at the start of the doorbell aperture.
459 */
460void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
461 phys_addr_t *aperture_base,
462 size_t *aperture_size,
463 size_t *start_offset)
464{
465 /*
466 * The first num_doorbells are used by amdgpu.
467 * amdkfd takes whatever's left in the aperture.
468 */
469 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
470 *aperture_base = adev->doorbell.base;
471 *aperture_size = adev->doorbell.size;
472 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
473 } else {
474 *aperture_base = 0;
475 *aperture_size = 0;
476 *start_offset = 0;
477 }
478}
479
480/*
481 * amdgpu_wb_*()
Alex Xie455a7bc2017-05-08 21:36:03 -0400482 * Writeback is the method by which the GPU updates special pages in memory
Alex Xieea81a172017-05-08 13:41:11 -0400483 * with the status of certain GPU events (fences, ring pointers,etc.).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400484 */
485
486/**
487 * amdgpu_wb_fini - Disable Writeback and free memory
488 *
489 * @adev: amdgpu_device pointer
490 *
491 * Disables Writeback and frees the Writeback memory (all asics).
492 * Used at driver shutdown.
493 */
494static void amdgpu_wb_fini(struct amdgpu_device *adev)
495{
496 if (adev->wb.wb_obj) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400497 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
498 &adev->wb.gpu_addr,
499 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400500 adev->wb.wb_obj = NULL;
501 }
502}
503
504/**
505 * amdgpu_wb_init- Init Writeback driver info and allocate memory
506 *
507 * @adev: amdgpu_device pointer
508 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400509 * Initializes writeback and allocates writeback memory (all asics).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400510 * Used at driver startup.
511 * Returns 0 on success or an -error on failure.
512 */
513static int amdgpu_wb_init(struct amdgpu_device *adev)
514{
515 int r;
516
517 if (adev->wb.wb_obj == NULL) {
Alex Deucher97407b62017-07-28 12:14:15 -0400518 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
519 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
Alex Deuchera76ed482016-10-21 15:30:36 -0400520 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
521 &adev->wb.wb_obj, &adev->wb.gpu_addr,
522 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400523 if (r) {
524 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
525 return r;
526 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400527
528 adev->wb.num_wb = AMDGPU_MAX_WB;
529 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
530
531 /* clear wb memory */
Huang Rui60a970a62017-03-15 10:13:32 +0800532 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400533 }
534
535 return 0;
536}
537
538/**
539 * amdgpu_wb_get - Allocate a wb entry
540 *
541 * @adev: amdgpu_device pointer
542 * @wb: wb index
543 *
544 * Allocate a wb slot for use by the driver (all asics).
545 * Returns 0 on success or -EINVAL on failure.
546 */
547int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
548{
549 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
Alex Deucher97407b62017-07-28 12:14:15 -0400550
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400551 if (offset < adev->wb.num_wb) {
552 __set_bit(offset, adev->wb.used);
Alex Deucher97407b62017-07-28 12:14:15 -0400553 *wb = offset * 8; /* convert to dw offset */
Monk Liu0915fdb2017-06-19 10:19:41 -0400554 return 0;
555 } else {
556 return -EINVAL;
557 }
558}
559
Ken Wang70142852016-03-18 15:08:49 +0800560/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400561 * amdgpu_wb_free - Free a wb entry
562 *
563 * @adev: amdgpu_device pointer
564 * @wb: wb index
565 *
566 * Free a wb slot allocated for use by the driver (all asics)
567 */
568void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
569{
570 if (wb < adev->wb.num_wb)
571 __clear_bit(wb, adev->wb.used);
572}
573
574/**
575 * amdgpu_vram_location - try to find VRAM location
576 * @adev: amdgpu device structure holding all necessary informations
577 * @mc: memory controller structure holding memory informations
578 * @base: base address at which to put VRAM
579 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400580 * Function will try to place VRAM at base address provided
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400581 * as parameter (which is so far either PCI aperture address or
582 * for IGP TOM base address).
583 *
584 * If there is not enough space to fit the unvisible VRAM in the 32bits
585 * address space then we limit the VRAM size to the aperture.
586 *
587 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
588 * this shouldn't be a problem as we are using the PCI aperture as a reference.
589 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
590 * not IGP.
591 *
592 * Note: we use mc_vram_size as on some board we need to program the mc to
593 * cover the whole aperture even if VRAM size is inferior to aperture size
594 * Novell bug 204882 + along with lots of ubuntu ones
595 *
596 * Note: when limiting vram it's safe to overwritte real_vram_size because
597 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
598 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
599 * ones)
600 *
601 * Note: IGP TOM addr should be the same as the aperture addr, we don't
Alex Xie455a7bc2017-05-08 21:36:03 -0400602 * explicitly check for that though.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400603 *
604 * FIXME: when reducing VRAM size align new size on power of 2.
605 */
606void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
607{
608 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
609
610 mc->vram_start = base;
611 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
612 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
613 mc->real_vram_size = mc->aper_size;
614 mc->mc_vram_size = mc->aper_size;
615 }
616 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
617 if (limit && limit < mc->real_vram_size)
618 mc->real_vram_size = limit;
619 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
620 mc->mc_vram_size >> 20, mc->vram_start,
621 mc->vram_end, mc->real_vram_size >> 20);
622}
623
624/**
Christian König6f02a692017-07-07 11:56:59 +0200625 * amdgpu_gart_location - try to find GTT location
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400626 * @adev: amdgpu device structure holding all necessary informations
627 * @mc: memory controller structure holding memory informations
628 *
629 * Function will place try to place GTT before or after VRAM.
630 *
631 * If GTT size is bigger than space left then we ajust GTT size.
632 * Thus function will never fails.
633 *
634 * FIXME: when reducing GTT size align new size on power of 2.
635 */
Christian König6f02a692017-07-07 11:56:59 +0200636void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400637{
638 u64 size_af, size_bf;
639
Christian Königed21c042017-07-06 22:26:05 +0200640 size_af = adev->mc.mc_mask - mc->vram_end;
641 size_bf = mc->vram_start;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400642 if (size_bf > size_af) {
Christian König6f02a692017-07-07 11:56:59 +0200643 if (mc->gart_size > size_bf) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400644 dev_warn(adev->dev, "limiting GTT\n");
Christian König6f02a692017-07-07 11:56:59 +0200645 mc->gart_size = size_bf;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400646 }
Christian König6f02a692017-07-07 11:56:59 +0200647 mc->gart_start = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400648 } else {
Christian König6f02a692017-07-07 11:56:59 +0200649 if (mc->gart_size > size_af) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400650 dev_warn(adev->dev, "limiting GTT\n");
Christian König6f02a692017-07-07 11:56:59 +0200651 mc->gart_size = size_af;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400652 }
Christian König6f02a692017-07-07 11:56:59 +0200653 mc->gart_start = mc->vram_end + 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400654 }
Christian König6f02a692017-07-07 11:56:59 +0200655 mc->gart_end = mc->gart_start + mc->gart_size - 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400656 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
Christian König6f02a692017-07-07 11:56:59 +0200657 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400658}
659
660/*
Horace Chena05502e2017-09-29 14:41:57 +0800661 * Firmware Reservation functions
662 */
663/**
664 * amdgpu_fw_reserve_vram_fini - free fw reserved vram
665 *
666 * @adev: amdgpu_device pointer
667 *
668 * free fw reserved vram if it has been reserved.
669 */
670void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
671{
672 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
673 NULL, &adev->fw_vram_usage.va);
674}
675
676/**
677 * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
678 *
679 * @adev: amdgpu_device pointer
680 *
681 * create bo vram reservation from fw.
682 */
683int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
684{
685 int r = 0;
686 u64 gpu_addr;
687 u64 vram_size = adev->mc.visible_vram_size;
688
689 adev->fw_vram_usage.va = NULL;
690 adev->fw_vram_usage.reserved_bo = NULL;
691
692 if (adev->fw_vram_usage.size > 0 &&
693 adev->fw_vram_usage.size <= vram_size) {
694
695 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
696 PAGE_SIZE, true, 0,
697 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
698 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
699 &adev->fw_vram_usage.reserved_bo);
700 if (r)
701 goto error_create;
702
703 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
704 if (r)
705 goto error_reserve;
706 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
707 AMDGPU_GEM_DOMAIN_VRAM,
708 adev->fw_vram_usage.start_offset,
709 (adev->fw_vram_usage.start_offset +
710 adev->fw_vram_usage.size), &gpu_addr);
711 if (r)
712 goto error_pin;
713 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
714 &adev->fw_vram_usage.va);
715 if (r)
716 goto error_kmap;
717
718 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
719 }
720 return r;
721
722error_kmap:
723 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
724error_pin:
725 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
726error_reserve:
727 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
728error_create:
729 adev->fw_vram_usage.va = NULL;
730 adev->fw_vram_usage.reserved_bo = NULL;
731 return r;
732}
733
734
735/*
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400736 * GPU helpers function.
737 */
738/**
Jim Quc836fec2017-02-10 15:59:59 +0800739 * amdgpu_need_post - check if the hw need post or not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400740 *
741 * @adev: amdgpu_device pointer
742 *
Jim Quc836fec2017-02-10 15:59:59 +0800743 * Check if the asic has been initialized (all asics) at driver startup
744 * or post is needed if hw reset is performed.
745 * Returns true if need or false if not.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400746 */
Jim Quc836fec2017-02-10 15:59:59 +0800747bool amdgpu_need_post(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400748{
749 uint32_t reg;
750
Jim Quc836fec2017-02-10 15:59:59 +0800751 if (adev->has_hw_reset) {
752 adev->has_hw_reset = false;
753 return true;
754 }
Alex Deucher70d17a22017-06-30 17:26:47 -0400755
756 /* bios scratch used on CIK+ */
757 if (adev->asic_type >= CHIP_BONAIRE)
758 return amdgpu_atombios_scratch_need_asic_init(adev);
759
760 /* check MEM_SIZE for older asics */
Alex Deucherbbf282d2017-03-03 17:26:10 -0500761 reg = amdgpu_asic_get_config_memsize(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400762
Alex Deucherf2713e82017-03-28 12:19:31 -0400763 if ((reg != 0) && (reg != 0xffffffff))
Jim Quc836fec2017-02-10 15:59:59 +0800764 return false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400765
Jim Quc836fec2017-02-10 15:59:59 +0800766 return true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400767
768}
769
Monk Liubec86372016-09-14 19:38:08 +0800770static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
771{
772 if (amdgpu_sriov_vf(adev))
773 return false;
774
775 if (amdgpu_passthrough(adev)) {
Monk Liu1da2c322016-11-11 11:24:29 +0800776 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
777 * some old smc fw still need driver do vPost otherwise gpu hang, while
778 * those smc fw version above 22.15 doesn't have this flaw, so we force
779 * vpost executed for smc version below 22.15
Monk Liubec86372016-09-14 19:38:08 +0800780 */
781 if (adev->asic_type == CHIP_FIJI) {
782 int err;
783 uint32_t fw_ver;
784 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
785 /* force vPost if error occured */
786 if (err)
787 return true;
788
789 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
Monk Liu1da2c322016-11-11 11:24:29 +0800790 if (fw_ver < 0x00160e00)
791 return true;
Monk Liubec86372016-09-14 19:38:08 +0800792 }
Monk Liubec86372016-09-14 19:38:08 +0800793 }
Jim Quc836fec2017-02-10 15:59:59 +0800794 return amdgpu_need_post(adev);
Monk Liubec86372016-09-14 19:38:08 +0800795}
796
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400797/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400798 * amdgpu_dummy_page_init - init dummy page used by the driver
799 *
800 * @adev: amdgpu_device pointer
801 *
802 * Allocate the dummy page used by the driver (all asics).
803 * This dummy page is used by the driver as a filler for gart entries
804 * when pages are taken out of the GART
805 * Returns 0 on sucess, -ENOMEM on failure.
806 */
807int amdgpu_dummy_page_init(struct amdgpu_device *adev)
808{
809 if (adev->dummy_page.page)
810 return 0;
811 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
812 if (adev->dummy_page.page == NULL)
813 return -ENOMEM;
814 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
815 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
816 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
817 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
818 __free_page(adev->dummy_page.page);
819 adev->dummy_page.page = NULL;
820 return -ENOMEM;
821 }
822 return 0;
823}
824
825/**
826 * amdgpu_dummy_page_fini - free dummy page used by the driver
827 *
828 * @adev: amdgpu_device pointer
829 *
830 * Frees the dummy page used by the driver (all asics).
831 */
832void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
833{
834 if (adev->dummy_page.page == NULL)
835 return;
836 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
837 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
838 __free_page(adev->dummy_page.page);
839 adev->dummy_page.page = NULL;
840}
841
842
843/* ATOM accessor methods */
844/*
845 * ATOM is an interpreted byte code stored in tables in the vbios. The
846 * driver registers callbacks to access registers and the interpreter
847 * in the driver parses the tables and executes then to program specific
848 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
849 * atombios.h, and atom.c
850 */
851
852/**
853 * cail_pll_read - read PLL register
854 *
855 * @info: atom card_info pointer
856 * @reg: PLL register offset
857 *
858 * Provides a PLL register accessor for the atom interpreter (r4xx+).
859 * Returns the value of the PLL register.
860 */
861static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
862{
863 return 0;
864}
865
866/**
867 * cail_pll_write - write PLL register
868 *
869 * @info: atom card_info pointer
870 * @reg: PLL register offset
871 * @val: value to write to the pll register
872 *
873 * Provides a PLL register accessor for the atom interpreter (r4xx+).
874 */
875static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
876{
877
878}
879
880/**
881 * cail_mc_read - read MC (Memory Controller) register
882 *
883 * @info: atom card_info pointer
884 * @reg: MC register offset
885 *
886 * Provides an MC register accessor for the atom interpreter (r4xx+).
887 * Returns the value of the MC register.
888 */
889static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
890{
891 return 0;
892}
893
894/**
895 * cail_mc_write - write MC (Memory Controller) register
896 *
897 * @info: atom card_info pointer
898 * @reg: MC register offset
899 * @val: value to write to the pll register
900 *
901 * Provides a MC register accessor for the atom interpreter (r4xx+).
902 */
903static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
904{
905
906}
907
908/**
909 * cail_reg_write - write MMIO register
910 *
911 * @info: atom card_info pointer
912 * @reg: MMIO register offset
913 * @val: value to write to the pll register
914 *
915 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
916 */
917static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
918{
919 struct amdgpu_device *adev = info->dev->dev_private;
920
921 WREG32(reg, val);
922}
923
924/**
925 * cail_reg_read - read MMIO register
926 *
927 * @info: atom card_info pointer
928 * @reg: MMIO register offset
929 *
930 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
931 * Returns the value of the MMIO register.
932 */
933static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
934{
935 struct amdgpu_device *adev = info->dev->dev_private;
936 uint32_t r;
937
938 r = RREG32(reg);
939 return r;
940}
941
942/**
943 * cail_ioreg_write - write IO register
944 *
945 * @info: atom card_info pointer
946 * @reg: IO register offset
947 * @val: value to write to the pll register
948 *
949 * Provides a IO register accessor for the atom interpreter (r4xx+).
950 */
951static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
952{
953 struct amdgpu_device *adev = info->dev->dev_private;
954
955 WREG32_IO(reg, val);
956}
957
958/**
959 * cail_ioreg_read - read IO register
960 *
961 * @info: atom card_info pointer
962 * @reg: IO register offset
963 *
964 * Provides an IO register accessor for the atom interpreter (r4xx+).
965 * Returns the value of the IO register.
966 */
967static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
968{
969 struct amdgpu_device *adev = info->dev->dev_private;
970 uint32_t r;
971
972 r = RREG32_IO(reg);
973 return r;
974}
975
Kent Russell5b41d942017-08-22 12:31:43 -0400976static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
977 struct device_attribute *attr,
978 char *buf)
979{
980 struct drm_device *ddev = dev_get_drvdata(dev);
981 struct amdgpu_device *adev = ddev->dev_private;
982 struct atom_context *ctx = adev->mode_info.atom_context;
983
984 return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
985}
986
987static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
988 NULL);
989
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400990/**
991 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
992 *
993 * @adev: amdgpu_device pointer
994 *
995 * Frees the driver info and register access callbacks for the ATOM
996 * interpreter (r4xx+).
997 * Called at driver shutdown.
998 */
999static void amdgpu_atombios_fini(struct amdgpu_device *adev)
1000{
Monk Liu89e0ec9f2016-05-27 19:34:11 +08001001 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001002 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec9f2016-05-27 19:34:11 +08001003 kfree(adev->mode_info.atom_context->iio);
1004 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001005 kfree(adev->mode_info.atom_context);
1006 adev->mode_info.atom_context = NULL;
1007 kfree(adev->mode_info.atom_card_info);
1008 adev->mode_info.atom_card_info = NULL;
Kent Russell5b41d942017-08-22 12:31:43 -04001009 device_remove_file(adev->dev, &dev_attr_vbios_version);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001010}
1011
1012/**
1013 * amdgpu_atombios_init - init the driver info and callbacks for atombios
1014 *
1015 * @adev: amdgpu_device pointer
1016 *
1017 * Initializes the driver info and register access callbacks for the
1018 * ATOM interpreter (r4xx+).
1019 * Returns 0 on sucess, -ENOMEM on failure.
1020 * Called at driver startup.
1021 */
1022static int amdgpu_atombios_init(struct amdgpu_device *adev)
1023{
1024 struct card_info *atom_card_info =
1025 kzalloc(sizeof(struct card_info), GFP_KERNEL);
Kent Russell5b41d942017-08-22 12:31:43 -04001026 int ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001027
1028 if (!atom_card_info)
1029 return -ENOMEM;
1030
1031 adev->mode_info.atom_card_info = atom_card_info;
1032 atom_card_info->dev = adev->ddev;
1033 atom_card_info->reg_read = cail_reg_read;
1034 atom_card_info->reg_write = cail_reg_write;
1035 /* needed for iio ops */
1036 if (adev->rio_mem) {
1037 atom_card_info->ioreg_read = cail_ioreg_read;
1038 atom_card_info->ioreg_write = cail_ioreg_write;
1039 } else {
Amber Linb64a18c2017-01-04 08:06:58 -05001040 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001041 atom_card_info->ioreg_read = cail_reg_read;
1042 atom_card_info->ioreg_write = cail_reg_write;
1043 }
1044 atom_card_info->mc_read = cail_mc_read;
1045 atom_card_info->mc_write = cail_mc_write;
1046 atom_card_info->pll_read = cail_pll_read;
1047 atom_card_info->pll_write = cail_pll_write;
1048
1049 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
1050 if (!adev->mode_info.atom_context) {
1051 amdgpu_atombios_fini(adev);
1052 return -ENOMEM;
1053 }
1054
1055 mutex_init(&adev->mode_info.atom_context->mutex);
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001056 if (adev->is_atom_fw) {
1057 amdgpu_atomfirmware_scratch_regs_init(adev);
1058 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1059 } else {
1060 amdgpu_atombios_scratch_regs_init(adev);
1061 amdgpu_atombios_allocate_fb_scratch(adev);
1062 }
Kent Russell5b41d942017-08-22 12:31:43 -04001063
1064 ret = device_create_file(adev->dev, &dev_attr_vbios_version);
1065 if (ret) {
1066 DRM_ERROR("Failed to create device file for VBIOS version\n");
1067 return ret;
1068 }
1069
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001070 return 0;
1071}
1072
1073/* if we get transitioned to only one device, take VGA back */
1074/**
1075 * amdgpu_vga_set_decode - enable/disable vga decode
1076 *
1077 * @cookie: amdgpu_device pointer
1078 * @state: enable/disable vga decode
1079 *
1080 * Enable/disable vga decode (all asics).
1081 * Returns VGA resource flags.
1082 */
1083static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1084{
1085 struct amdgpu_device *adev = cookie;
1086 amdgpu_asic_set_vga_state(adev, state);
1087 if (state)
1088 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1089 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1090 else
1091 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1092}
1093
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001094static void amdgpu_check_block_size(struct amdgpu_device *adev)
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001095{
1096 /* defines number of bits in page table versus page directory,
1097 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1098 * page table and the remaining bits are in the page directory */
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001099 if (amdgpu_vm_block_size == -1)
1100 return;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001101
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001102 if (amdgpu_vm_block_size < 9) {
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001103 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1104 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001105 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001106 }
1107
1108 if (amdgpu_vm_block_size > 24 ||
1109 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1110 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1111 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001112 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001113 }
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001114
1115 return;
1116
1117def_value:
1118 amdgpu_vm_block_size = -1;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001119}
1120
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001121static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1122{
Alex Deucher64dab072017-06-15 18:20:09 -04001123 /* no need to check the default value */
1124 if (amdgpu_vm_size == -1)
1125 return;
1126
Alex Deucher76117502017-06-21 12:31:41 -04001127 if (!is_power_of_2(amdgpu_vm_size)) {
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001128 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1129 amdgpu_vm_size);
1130 goto def_value;
1131 }
1132
1133 if (amdgpu_vm_size < 1) {
1134 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1135 amdgpu_vm_size);
1136 goto def_value;
1137 }
1138
1139 /*
1140 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1141 */
1142 if (amdgpu_vm_size > 1024) {
1143 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1144 amdgpu_vm_size);
1145 goto def_value;
1146 }
1147
1148 return;
1149
1150def_value:
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001151 amdgpu_vm_size = -1;
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001152}
1153
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001154/**
1155 * amdgpu_check_arguments - validate module params
1156 *
1157 * @adev: amdgpu_device pointer
1158 *
1159 * Validates certain module parameters and updates
1160 * the associated values used by the driver (all asics).
1161 */
1162static void amdgpu_check_arguments(struct amdgpu_device *adev)
1163{
Chunming Zhou5b011232015-12-10 17:34:33 +08001164 if (amdgpu_sched_jobs < 4) {
1165 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1166 amdgpu_sched_jobs);
1167 amdgpu_sched_jobs = 4;
Alex Deucher76117502017-06-21 12:31:41 -04001168 } else if (!is_power_of_2(amdgpu_sched_jobs)){
Chunming Zhou5b011232015-12-10 17:34:33 +08001169 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1170 amdgpu_sched_jobs);
1171 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1172 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001173
Alex Deucher83e74db2017-08-21 11:58:25 -04001174 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
Christian Königf9321cc2017-07-07 13:44:05 +02001175 /* gart size must be greater or equal to 32M */
1176 dev_warn(adev->dev, "gart size (%d) too small\n",
1177 amdgpu_gart_size);
Alex Deucher83e74db2017-08-21 11:58:25 -04001178 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001179 }
1180
Christian König36d38372017-07-07 13:17:45 +02001181 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001182 /* gtt size must be greater or equal to 32M */
Christian König36d38372017-07-07 13:17:45 +02001183 dev_warn(adev->dev, "gtt size (%d) too small\n",
1184 amdgpu_gtt_size);
1185 amdgpu_gtt_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001186 }
1187
Roger Hed07f14b2017-08-15 16:05:59 +08001188 /* valid range is between 4 and 9 inclusive */
1189 if (amdgpu_vm_fragment_size != -1 &&
1190 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1191 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1192 amdgpu_vm_fragment_size = -1;
1193 }
1194
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001195 amdgpu_check_vm_size(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001196
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001197 amdgpu_check_block_size(adev);
Christian König6a7f76e2016-08-24 15:51:49 +02001198
jimqu526bae32016-11-07 09:53:10 +08001199 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
Alex Deucher76117502017-06-21 12:31:41 -04001200 !is_power_of_2(amdgpu_vram_page_split))) {
Christian König6a7f76e2016-08-24 15:51:49 +02001201 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1202 amdgpu_vram_page_split);
1203 amdgpu_vram_page_split = 1024;
1204 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001205}
1206
1207/**
1208 * amdgpu_switcheroo_set_state - set switcheroo state
1209 *
1210 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001211 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001212 *
1213 * Callback for the switcheroo driver. Suspends or resumes the
1214 * the asics before or after it is powered up using ACPI methods.
1215 */
1216static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1217{
1218 struct drm_device *dev = pci_get_drvdata(pdev);
1219
1220 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1221 return;
1222
1223 if (state == VGA_SWITCHEROO_ON) {
Joe Perches7ca85292017-02-28 04:55:52 -08001224 pr_info("amdgpu: switched on\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001225 /* don't suspend or resume card normally */
1226 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1227
Alex Deucher810ddc32016-08-23 13:25:49 -04001228 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001229
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001230 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1231 drm_kms_helper_poll_enable(dev);
1232 } else {
Joe Perches7ca85292017-02-28 04:55:52 -08001233 pr_info("amdgpu: switched off\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001234 drm_kms_helper_poll_disable(dev);
1235 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001236 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001237 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1238 }
1239}
1240
1241/**
1242 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1243 *
1244 * @pdev: pci dev pointer
1245 *
1246 * Callback for the switcheroo driver. Check of the switcheroo
1247 * state can be changed.
1248 * Returns true if the state can be changed, false if not.
1249 */
1250static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1251{
1252 struct drm_device *dev = pci_get_drvdata(pdev);
1253
1254 /*
1255 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1256 * locking inversion with the driver load path. And the access here is
1257 * completely racy anyway. So don't bother with locking for now.
1258 */
1259 return dev->open_count == 0;
1260}
1261
1262static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1263 .set_gpu_state = amdgpu_switcheroo_set_state,
1264 .reprobe = NULL,
1265 .can_switch = amdgpu_switcheroo_can_switch,
1266};
1267
1268int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001269 enum amd_ip_block_type block_type,
1270 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001271{
1272 int i, r = 0;
1273
1274 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001275 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001276 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001277 if (adev->ip_blocks[i].version->type != block_type)
1278 continue;
1279 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1280 continue;
1281 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1282 (void *)adev, state);
1283 if (r)
1284 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1285 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001286 }
1287 return r;
1288}
1289
1290int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001291 enum amd_ip_block_type block_type,
1292 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001293{
1294 int i, r = 0;
1295
1296 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001297 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001298 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001299 if (adev->ip_blocks[i].version->type != block_type)
1300 continue;
1301 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1302 continue;
1303 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1304 (void *)adev, state);
1305 if (r)
1306 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1307 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001308 }
1309 return r;
1310}
1311
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001312void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1313{
1314 int i;
1315
1316 for (i = 0; i < adev->num_ip_blocks; i++) {
1317 if (!adev->ip_blocks[i].status.valid)
1318 continue;
1319 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1320 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1321 }
1322}
1323
Alex Deucher5dbbb602016-06-23 11:41:04 -04001324int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1325 enum amd_ip_block_type block_type)
1326{
1327 int i, r;
1328
1329 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001330 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001331 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001332 if (adev->ip_blocks[i].version->type == block_type) {
1333 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001334 if (r)
1335 return r;
1336 break;
1337 }
1338 }
1339 return 0;
1340
1341}
1342
1343bool amdgpu_is_idle(struct amdgpu_device *adev,
1344 enum amd_ip_block_type block_type)
1345{
1346 int i;
1347
1348 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001349 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001350 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001351 if (adev->ip_blocks[i].version->type == block_type)
1352 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001353 }
1354 return true;
1355
1356}
1357
Alex Deuchera1255102016-10-13 17:41:13 -04001358struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1359 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001360{
1361 int i;
1362
1363 for (i = 0; i < adev->num_ip_blocks; i++)
Alex Deuchera1255102016-10-13 17:41:13 -04001364 if (adev->ip_blocks[i].version->type == type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001365 return &adev->ip_blocks[i];
1366
1367 return NULL;
1368}
1369
1370/**
1371 * amdgpu_ip_block_version_cmp
1372 *
1373 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001374 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001375 * @major: major version
1376 * @minor: minor version
1377 *
1378 * return 0 if equal or greater
1379 * return 1 if smaller or the ip_block doesn't exist
1380 */
1381int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001382 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001383 u32 major, u32 minor)
1384{
Alex Deuchera1255102016-10-13 17:41:13 -04001385 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001386
Alex Deuchera1255102016-10-13 17:41:13 -04001387 if (ip_block && ((ip_block->version->major > major) ||
1388 ((ip_block->version->major == major) &&
1389 (ip_block->version->minor >= minor))))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001390 return 0;
1391
1392 return 1;
1393}
1394
Alex Deuchera1255102016-10-13 17:41:13 -04001395/**
1396 * amdgpu_ip_block_add
1397 *
1398 * @adev: amdgpu_device pointer
1399 * @ip_block_version: pointer to the IP to add
1400 *
1401 * Adds the IP block driver information to the collection of IPs
1402 * on the asic.
1403 */
1404int amdgpu_ip_block_add(struct amdgpu_device *adev,
1405 const struct amdgpu_ip_block_version *ip_block_version)
1406{
1407 if (!ip_block_version)
1408 return -EINVAL;
1409
Huang Ruia0bae352017-05-03 09:52:06 +08001410 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1411 ip_block_version->funcs->name);
1412
Alex Deuchera1255102016-10-13 17:41:13 -04001413 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1414
1415 return 0;
1416}
1417
Alex Deucher483ef982016-09-30 12:43:04 -04001418static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
Emily Deng9accf2f2016-08-10 16:01:25 +08001419{
1420 adev->enable_virtual_display = false;
1421
1422 if (amdgpu_virtual_display) {
1423 struct drm_device *ddev = adev->ddev;
1424 const char *pci_address_name = pci_name(ddev->pdev);
Emily Deng0f663562016-09-30 13:02:18 -04001425 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
Emily Deng9accf2f2016-08-10 16:01:25 +08001426
1427 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1428 pciaddstr_tmp = pciaddstr;
Emily Deng0f663562016-09-30 13:02:18 -04001429 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1430 pciaddname = strsep(&pciaddname_tmp, ",");
Yintian Tao967de2a2017-01-22 15:16:51 +08001431 if (!strcmp("all", pciaddname)
1432 || !strcmp(pci_address_name, pciaddname)) {
Emily Deng0f663562016-09-30 13:02:18 -04001433 long num_crtc;
1434 int res = -1;
1435
Emily Deng9accf2f2016-08-10 16:01:25 +08001436 adev->enable_virtual_display = true;
Emily Deng0f663562016-09-30 13:02:18 -04001437
1438 if (pciaddname_tmp)
1439 res = kstrtol(pciaddname_tmp, 10,
1440 &num_crtc);
1441
1442 if (!res) {
1443 if (num_crtc < 1)
1444 num_crtc = 1;
1445 if (num_crtc > 6)
1446 num_crtc = 6;
1447 adev->mode_info.num_crtc = num_crtc;
1448 } else {
1449 adev->mode_info.num_crtc = 1;
1450 }
Emily Deng9accf2f2016-08-10 16:01:25 +08001451 break;
1452 }
1453 }
1454
Emily Deng0f663562016-09-30 13:02:18 -04001455 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1456 amdgpu_virtual_display, pci_address_name,
1457 adev->enable_virtual_display, adev->mode_info.num_crtc);
Emily Deng9accf2f2016-08-10 16:01:25 +08001458
1459 kfree(pciaddstr);
1460 }
1461}
1462
Alex Deuchere2a75f82017-04-27 16:58:01 -04001463static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1464{
Alex Deuchere2a75f82017-04-27 16:58:01 -04001465 const char *chip_name;
1466 char fw_name[30];
1467 int err;
1468 const struct gpu_info_firmware_header_v1_0 *hdr;
1469
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001470 adev->firmware.gpu_info_fw = NULL;
1471
Alex Deuchere2a75f82017-04-27 16:58:01 -04001472 switch (adev->asic_type) {
1473 case CHIP_TOPAZ:
1474 case CHIP_TONGA:
1475 case CHIP_FIJI:
1476 case CHIP_POLARIS11:
1477 case CHIP_POLARIS10:
1478 case CHIP_POLARIS12:
1479 case CHIP_CARRIZO:
1480 case CHIP_STONEY:
1481#ifdef CONFIG_DRM_AMDGPU_SI
1482 case CHIP_VERDE:
1483 case CHIP_TAHITI:
1484 case CHIP_PITCAIRN:
1485 case CHIP_OLAND:
1486 case CHIP_HAINAN:
1487#endif
1488#ifdef CONFIG_DRM_AMDGPU_CIK
1489 case CHIP_BONAIRE:
1490 case CHIP_HAWAII:
1491 case CHIP_KAVERI:
1492 case CHIP_KABINI:
1493 case CHIP_MULLINS:
1494#endif
1495 default:
1496 return 0;
1497 case CHIP_VEGA10:
1498 chip_name = "vega10";
1499 break;
Alex Deucher2d2e5e72017-05-09 12:27:35 -04001500 case CHIP_RAVEN:
1501 chip_name = "raven";
1502 break;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001503 }
1504
1505 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001506 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001507 if (err) {
1508 dev_err(adev->dev,
1509 "Failed to load gpu_info firmware \"%s\"\n",
1510 fw_name);
1511 goto out;
1512 }
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001513 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001514 if (err) {
1515 dev_err(adev->dev,
1516 "Failed to validate gpu_info firmware \"%s\"\n",
1517 fw_name);
1518 goto out;
1519 }
1520
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001521 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001522 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1523
1524 switch (hdr->version_major) {
1525 case 1:
1526 {
1527 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001528 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
Alex Deuchere2a75f82017-04-27 16:58:01 -04001529 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1530
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001531 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1532 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1533 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1534 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001535 adev->gfx.config.max_texture_channel_caches =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001536 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1537 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1538 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1539 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1540 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001541 adev->gfx.config.double_offchip_lds_buf =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001542 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1543 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
Hawking Zhang51fd0372017-06-09 22:30:52 +08001544 adev->gfx.cu_info.max_waves_per_simd =
1545 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1546 adev->gfx.cu_info.max_scratch_slots_per_cu =
1547 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1548 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001549 break;
1550 }
1551 default:
1552 dev_err(adev->dev,
1553 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1554 err = -EINVAL;
1555 goto out;
1556 }
1557out:
Alex Deuchere2a75f82017-04-27 16:58:01 -04001558 return err;
1559}
1560
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001561static int amdgpu_early_init(struct amdgpu_device *adev)
1562{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001563 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001564
Alex Deucher483ef982016-09-30 12:43:04 -04001565 amdgpu_device_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001566
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001567 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001568 case CHIP_TOPAZ:
1569 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001570 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001571 case CHIP_POLARIS11:
1572 case CHIP_POLARIS10:
Junwei Zhangc4642a42016-12-14 15:32:28 -05001573 case CHIP_POLARIS12:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001574 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001575 case CHIP_STONEY:
1576 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001577 adev->family = AMDGPU_FAMILY_CZ;
1578 else
1579 adev->family = AMDGPU_FAMILY_VI;
1580
1581 r = vi_set_ip_blocks(adev);
1582 if (r)
1583 return r;
1584 break;
Ken Wang33f34802016-01-21 17:29:41 +08001585#ifdef CONFIG_DRM_AMDGPU_SI
1586 case CHIP_VERDE:
1587 case CHIP_TAHITI:
1588 case CHIP_PITCAIRN:
1589 case CHIP_OLAND:
1590 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001591 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001592 r = si_set_ip_blocks(adev);
1593 if (r)
1594 return r;
1595 break;
1596#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001597#ifdef CONFIG_DRM_AMDGPU_CIK
1598 case CHIP_BONAIRE:
1599 case CHIP_HAWAII:
1600 case CHIP_KAVERI:
1601 case CHIP_KABINI:
1602 case CHIP_MULLINS:
1603 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1604 adev->family = AMDGPU_FAMILY_CI;
1605 else
1606 adev->family = AMDGPU_FAMILY_KV;
1607
1608 r = cik_set_ip_blocks(adev);
1609 if (r)
1610 return r;
1611 break;
1612#endif
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +08001613 case CHIP_VEGA10:
1614 case CHIP_RAVEN:
1615 if (adev->asic_type == CHIP_RAVEN)
1616 adev->family = AMDGPU_FAMILY_RV;
1617 else
1618 adev->family = AMDGPU_FAMILY_AI;
Ken Wang460826e2017-03-06 14:53:16 -05001619
1620 r = soc15_set_ip_blocks(adev);
1621 if (r)
1622 return r;
1623 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001624 default:
1625 /* FIXME: not supported yet */
1626 return -EINVAL;
1627 }
1628
Alex Deuchere2a75f82017-04-27 16:58:01 -04001629 r = amdgpu_device_parse_gpu_info_fw(adev);
1630 if (r)
1631 return r;
1632
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001633 if (amdgpu_sriov_vf(adev)) {
1634 r = amdgpu_virt_request_full_gpu(adev, true);
1635 if (r)
1636 return r;
1637 }
1638
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001639 for (i = 0; i < adev->num_ip_blocks; i++) {
1640 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
Huang Ruied8cf002017-05-03 09:40:17 +08001641 DRM_ERROR("disabled ip block: %d <%s>\n",
1642 i, adev->ip_blocks[i].version->funcs->name);
Alex Deuchera1255102016-10-13 17:41:13 -04001643 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001644 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001645 if (adev->ip_blocks[i].version->funcs->early_init) {
1646 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001647 if (r == -ENOENT) {
Alex Deuchera1255102016-10-13 17:41:13 -04001648 adev->ip_blocks[i].status.valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001649 } else if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001650 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1651 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001652 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001653 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001654 adev->ip_blocks[i].status.valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001655 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001656 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001657 adev->ip_blocks[i].status.valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001658 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001659 }
1660 }
1661
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001662 adev->cg_flags &= amdgpu_cg_mask;
1663 adev->pg_flags &= amdgpu_pg_mask;
1664
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001665 return 0;
1666}
1667
1668static int amdgpu_init(struct amdgpu_device *adev)
1669{
1670 int i, r;
1671
1672 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001673 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001674 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001675 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001676 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001677 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1678 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001679 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001680 }
Alex Deuchera1255102016-10-13 17:41:13 -04001681 adev->ip_blocks[i].status.sw = true;
Rex Zhu46967c22017-09-22 18:03:59 +08001682
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001683 /* need to do gmc hw init early so we can allocate gpu mem */
Alex Deuchera1255102016-10-13 17:41:13 -04001684 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001685 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001686 if (r) {
1687 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001688 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001689 }
Alex Deuchera1255102016-10-13 17:41:13 -04001690 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001691 if (r) {
1692 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001693 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001694 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001695 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001696 if (r) {
1697 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001698 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001699 }
Alex Deuchera1255102016-10-13 17:41:13 -04001700 adev->ip_blocks[i].status.hw = true;
Monk Liu24936642017-01-09 15:54:32 +08001701
1702 /* right after GMC hw init, we create CSA */
1703 if (amdgpu_sriov_vf(adev)) {
1704 r = amdgpu_allocate_static_csa(adev);
1705 if (r) {
1706 DRM_ERROR("allocate CSA failed %d\n", r);
1707 return r;
1708 }
1709 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001710 }
1711 }
1712
Rex Zhu46967c22017-09-22 18:03:59 +08001713 mutex_lock(&adev->firmware.mutex);
1714 if (amdgpu_ucode_init_bo(adev))
1715 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
1716 mutex_unlock(&adev->firmware.mutex);
1717
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001718 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001719 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001720 continue;
1721 /* gmc hw init is done early */
Alex Deuchera1255102016-10-13 17:41:13 -04001722 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001723 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001724 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001725 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001726 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1727 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001728 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001729 }
Alex Deuchera1255102016-10-13 17:41:13 -04001730 adev->ip_blocks[i].status.hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001731 }
1732
1733 return 0;
1734}
1735
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001736static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1737{
1738 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1739}
1740
1741static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1742{
1743 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1744 AMDGPU_RESET_MAGIC_NUM);
1745}
1746
Shirish S2dc80b02017-05-25 10:05:25 +05301747static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
1748{
1749 int i = 0, r;
1750
1751 for (i = 0; i < adev->num_ip_blocks; i++) {
1752 if (!adev->ip_blocks[i].status.valid)
1753 continue;
1754 /* skip CG for VCE/UVD, it's handled specially */
1755 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1756 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1757 /* enable clockgating to save power */
1758 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1759 AMD_CG_STATE_GATE);
1760 if (r) {
1761 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1762 adev->ip_blocks[i].version->funcs->name, r);
1763 return r;
1764 }
1765 }
1766 }
1767 return 0;
1768}
1769
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001770static int amdgpu_late_init(struct amdgpu_device *adev)
1771{
1772 int i = 0, r;
1773
1774 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001775 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001776 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001777 if (adev->ip_blocks[i].version->funcs->late_init) {
1778 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001779 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001780 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1781 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001782 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001783 }
Alex Deuchera1255102016-10-13 17:41:13 -04001784 adev->ip_blocks[i].status.late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001785 }
1786 }
1787
Shirish S2dc80b02017-05-25 10:05:25 +05301788 mod_delayed_work(system_wq, &adev->late_init_work,
1789 msecs_to_jiffies(AMDGPU_RESUME_MS));
1790
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001791 amdgpu_fill_reset_magic(adev);
1792
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001793 return 0;
1794}
1795
1796static int amdgpu_fini(struct amdgpu_device *adev)
1797{
1798 int i, r;
1799
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001800 /* need to disable SMC first */
1801 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001802 if (!adev->ip_blocks[i].status.hw)
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001803 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001804 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001805 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
Alex Deuchera1255102016-10-13 17:41:13 -04001806 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1807 AMD_CG_STATE_UNGATE);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001808 if (r) {
1809 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001810 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001811 return r;
1812 }
Alex Deuchera1255102016-10-13 17:41:13 -04001813 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001814 /* XXX handle errors */
1815 if (r) {
1816 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001817 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001818 }
Alex Deuchera1255102016-10-13 17:41:13 -04001819 adev->ip_blocks[i].status.hw = false;
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001820 break;
1821 }
1822 }
1823
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001824 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001825 if (!adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001826 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001827 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001828 amdgpu_wb_fini(adev);
1829 amdgpu_vram_scratch_fini(adev);
1830 }
Rex Zhu8201a672016-11-24 21:44:44 +08001831
1832 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1833 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1834 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1835 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1836 AMD_CG_STATE_UNGATE);
1837 if (r) {
1838 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1839 adev->ip_blocks[i].version->funcs->name, r);
1840 return r;
1841 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001842 }
Rex Zhu8201a672016-11-24 21:44:44 +08001843
Alex Deuchera1255102016-10-13 17:41:13 -04001844 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001845 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001846 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001847 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1848 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001849 }
Rex Zhu8201a672016-11-24 21:44:44 +08001850
Alex Deuchera1255102016-10-13 17:41:13 -04001851 adev->ip_blocks[i].status.hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001852 }
Rex Zhu46967c22017-09-22 18:03:59 +08001853 if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
1854 amdgpu_ucode_fini_bo(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001855
1856 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001857 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001858 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001859 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001860 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001861 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001862 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1863 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001864 }
Alex Deuchera1255102016-10-13 17:41:13 -04001865 adev->ip_blocks[i].status.sw = false;
1866 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001867 }
1868
Monk Liua6dcfd92016-05-19 14:36:34 +08001869 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001870 if (!adev->ip_blocks[i].status.late_initialized)
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001871 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001872 if (adev->ip_blocks[i].version->funcs->late_fini)
1873 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1874 adev->ip_blocks[i].status.late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001875 }
1876
Monk Liu030308f2017-09-15 15:34:52 +08001877 if (amdgpu_sriov_vf(adev))
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001878 amdgpu_virt_release_full_gpu(adev, false);
Monk Liu24936642017-01-09 15:54:32 +08001879
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001880 return 0;
1881}
1882
Shirish S2dc80b02017-05-25 10:05:25 +05301883static void amdgpu_late_init_func_handler(struct work_struct *work)
1884{
1885 struct amdgpu_device *adev =
1886 container_of(work, struct amdgpu_device, late_init_work.work);
1887 amdgpu_late_set_cg_state(adev);
1888}
1889
Alex Deucherfaefba92016-12-06 10:38:29 -05001890int amdgpu_suspend(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001891{
1892 int i, r;
1893
Xiangliang Yue941ea92017-01-18 12:47:55 +08001894 if (amdgpu_sriov_vf(adev))
1895 amdgpu_virt_request_full_gpu(adev, false);
1896
Flora Cuic5a93a22016-02-26 10:45:25 +08001897 /* ungate SMC block first */
1898 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1899 AMD_CG_STATE_UNGATE);
1900 if (r) {
1901 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1902 }
1903
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001904 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001905 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001906 continue;
1907 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001908 if (i != AMD_IP_BLOCK_TYPE_SMC) {
Alex Deuchera1255102016-10-13 17:41:13 -04001909 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1910 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001911 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001912 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1913 adev->ip_blocks[i].version->funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001914 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001915 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001916 /* XXX handle errors */
Alex Deuchera1255102016-10-13 17:41:13 -04001917 r = adev->ip_blocks[i].version->funcs->suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001918 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001919 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001920 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1921 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001922 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001923 }
1924
Xiangliang Yue941ea92017-01-18 12:47:55 +08001925 if (amdgpu_sriov_vf(adev))
1926 amdgpu_virt_release_full_gpu(adev, false);
1927
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001928 return 0;
1929}
1930
Monk Liue4f0fdc2017-02-09 11:55:49 +08001931static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001932{
1933 int i, r;
1934
Monk Liu2cb681b2017-04-26 12:00:49 +08001935 static enum amd_ip_block_type ip_order[] = {
1936 AMD_IP_BLOCK_TYPE_GMC,
1937 AMD_IP_BLOCK_TYPE_COMMON,
Monk Liu2cb681b2017-04-26 12:00:49 +08001938 AMD_IP_BLOCK_TYPE_IH,
1939 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001940
Monk Liu2cb681b2017-04-26 12:00:49 +08001941 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1942 int j;
1943 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001944
Monk Liu2cb681b2017-04-26 12:00:49 +08001945 for (j = 0; j < adev->num_ip_blocks; j++) {
1946 block = &adev->ip_blocks[j];
1947
1948 if (block->version->type != ip_order[i] ||
1949 !block->status.valid)
1950 continue;
1951
1952 r = block->version->funcs->hw_init(adev);
1953 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001954 }
1955 }
1956
1957 return 0;
1958}
1959
Monk Liue4f0fdc2017-02-09 11:55:49 +08001960static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001961{
1962 int i, r;
1963
Monk Liu2cb681b2017-04-26 12:00:49 +08001964 static enum amd_ip_block_type ip_order[] = {
1965 AMD_IP_BLOCK_TYPE_SMC,
1966 AMD_IP_BLOCK_TYPE_DCE,
1967 AMD_IP_BLOCK_TYPE_GFX,
1968 AMD_IP_BLOCK_TYPE_SDMA,
Frank Min257deb82017-06-15 20:07:36 +08001969 AMD_IP_BLOCK_TYPE_UVD,
1970 AMD_IP_BLOCK_TYPE_VCE
Monk Liu2cb681b2017-04-26 12:00:49 +08001971 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001972
Monk Liu2cb681b2017-04-26 12:00:49 +08001973 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1974 int j;
1975 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001976
Monk Liu2cb681b2017-04-26 12:00:49 +08001977 for (j = 0; j < adev->num_ip_blocks; j++) {
1978 block = &adev->ip_blocks[j];
1979
1980 if (block->version->type != ip_order[i] ||
1981 !block->status.valid)
1982 continue;
1983
1984 r = block->version->funcs->hw_init(adev);
1985 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001986 }
1987 }
1988
1989 return 0;
1990}
1991
Chunming Zhoufcf06492017-05-05 10:33:33 +08001992static int amdgpu_resume_phase1(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001993{
1994 int i, r;
1995
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001996 for (i = 0; i < adev->num_ip_blocks; i++) {
1997 if (!adev->ip_blocks[i].status.valid)
1998 continue;
Chunming Zhoufcf06492017-05-05 10:33:33 +08001999 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2000 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2001 adev->ip_blocks[i].version->type ==
2002 AMD_IP_BLOCK_TYPE_IH) {
2003 r = adev->ip_blocks[i].version->funcs->resume(adev);
2004 if (r) {
2005 DRM_ERROR("resume of IP block <%s> failed %d\n",
2006 adev->ip_blocks[i].version->funcs->name, r);
2007 return r;
2008 }
2009 }
2010 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002011
Chunming Zhoufcf06492017-05-05 10:33:33 +08002012 return 0;
2013}
2014
2015static int amdgpu_resume_phase2(struct amdgpu_device *adev)
2016{
2017 int i, r;
2018
2019 for (i = 0; i < adev->num_ip_blocks; i++) {
2020 if (!adev->ip_blocks[i].status.valid)
2021 continue;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002022 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2023 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2024 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
2025 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002026 r = adev->ip_blocks[i].version->funcs->resume(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002027 if (r) {
2028 DRM_ERROR("resume of IP block <%s> failed %d\n",
2029 adev->ip_blocks[i].version->funcs->name, r);
2030 return r;
2031 }
2032 }
2033
2034 return 0;
2035}
2036
2037static int amdgpu_resume(struct amdgpu_device *adev)
2038{
Chunming Zhoufcf06492017-05-05 10:33:33 +08002039 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002040
Chunming Zhoufcf06492017-05-05 10:33:33 +08002041 r = amdgpu_resume_phase1(adev);
2042 if (r)
2043 return r;
2044 r = amdgpu_resume_phase2(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002045
Chunming Zhoufcf06492017-05-05 10:33:33 +08002046 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002047}
2048
Monk Liu4e99a442016-03-31 13:26:59 +08002049static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04002050{
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002051 if (adev->is_atom_fw) {
2052 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2053 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2054 } else {
2055 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2056 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2057 }
Andres Rodriguez048765a2016-06-11 02:51:32 -04002058}
2059
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002060/**
2061 * amdgpu_device_init - initialize the driver
2062 *
2063 * @adev: amdgpu_device pointer
2064 * @pdev: drm dev pointer
2065 * @pdev: pci dev pointer
2066 * @flags: driver flags
2067 *
2068 * Initializes the driver info and hw (all asics).
2069 * Returns 0 for success or an error on failure.
2070 * Called at driver startup.
2071 */
2072int amdgpu_device_init(struct amdgpu_device *adev,
2073 struct drm_device *ddev,
2074 struct pci_dev *pdev,
2075 uint32_t flags)
2076{
2077 int r, i;
2078 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02002079 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002080
2081 adev->shutdown = false;
2082 adev->dev = &pdev->dev;
2083 adev->ddev = ddev;
2084 adev->pdev = pdev;
2085 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08002086 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002087 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
Christian König6f02a692017-07-07 11:56:59 +02002088 adev->mc.gart_size = 512 * 1024 * 1024;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002089 adev->accel_working = false;
2090 adev->num_rings = 0;
2091 adev->mman.buffer_funcs = NULL;
2092 adev->mman.buffer_funcs_ring = NULL;
2093 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01002094 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002095 adev->gart.gart_funcs = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002096 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002097
2098 adev->smc_rreg = &amdgpu_invalid_rreg;
2099 adev->smc_wreg = &amdgpu_invalid_wreg;
2100 adev->pcie_rreg = &amdgpu_invalid_rreg;
2101 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08002102 adev->pciep_rreg = &amdgpu_invalid_rreg;
2103 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002104 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2105 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2106 adev->didt_rreg = &amdgpu_invalid_rreg;
2107 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08002108 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2109 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002110 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2111 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2112
Rex Zhuccdbb202016-06-08 12:47:41 +08002113
Alex Deucher3e39ab92015-06-05 15:04:33 -04002114 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2115 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2116 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002117
2118 /* mutex initialization are all done here so we
2119 * can recall function without having locking issues */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002120 atomic_set(&adev->irq.ih.lock, 0);
Huang Rui0e5ca0d2017-03-03 18:37:23 -05002121 mutex_init(&adev->firmware.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002122 mutex_init(&adev->pm.mutex);
2123 mutex_init(&adev->gfx.gpu_clock_mutex);
2124 mutex_init(&adev->srbm_mutex);
2125 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002126 mutex_init(&adev->mn_lock);
Alex Deuchere23b74a2017-09-28 09:47:32 -04002127 mutex_init(&adev->virt.vf_errors.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002128 hash_init(adev->mn_hash);
2129
2130 amdgpu_check_arguments(adev);
2131
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002132 spin_lock_init(&adev->mmio_idx_lock);
2133 spin_lock_init(&adev->smc_idx_lock);
2134 spin_lock_init(&adev->pcie_idx_lock);
2135 spin_lock_init(&adev->uvd_ctx_idx_lock);
2136 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08002137 spin_lock_init(&adev->gc_cac_idx_lock);
Evan Quan16abb5d2017-07-04 09:21:50 +08002138 spin_lock_init(&adev->se_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002139 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02002140 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002141
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08002142 INIT_LIST_HEAD(&adev->shadow_list);
2143 mutex_init(&adev->shadow_list_lock);
2144
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002145 INIT_LIST_HEAD(&adev->gtt_list);
2146 spin_lock_init(&adev->gtt_list_lock);
2147
Andres Rodriguez795f2812017-03-06 16:27:55 -05002148 INIT_LIST_HEAD(&adev->ring_lru_list);
2149 spin_lock_init(&adev->ring_lru_list_lock);
2150
Shirish S2dc80b02017-05-25 10:05:25 +05302151 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2152
Alex Xie0fa49552017-06-08 14:58:05 -04002153 /* Registers mapping */
2154 /* TODO: block userspace mapping of io register */
Ken Wangda69c1612016-01-21 19:08:55 +08002155 if (adev->asic_type >= CHIP_BONAIRE) {
2156 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2157 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2158 } else {
2159 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2160 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2161 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002162
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002163 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2164 if (adev->rmmio == NULL) {
2165 return -ENOMEM;
2166 }
2167 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2168 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2169
Christian König705e5192017-06-08 11:15:16 +02002170 /* doorbell bar mapping */
2171 amdgpu_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002172
2173 /* io port mapping */
2174 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2175 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2176 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2177 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2178 break;
2179 }
2180 }
2181 if (adev->rio_mem == NULL)
Amber Linb64a18c2017-01-04 08:06:58 -05002182 DRM_INFO("PCI I/O BAR is not found.\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002183
2184 /* early init functions */
2185 r = amdgpu_early_init(adev);
2186 if (r)
2187 return r;
2188
2189 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2190 /* this will fail for cards that aren't VGA class devices, just
2191 * ignore it */
2192 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2193
2194 if (amdgpu_runtime_pm == 1)
2195 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04002196 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002197 runtime = true;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002198 if (!pci_is_thunderbolt_attached(adev->pdev))
2199 vga_switcheroo_register_client(adev->pdev,
2200 &amdgpu_switcheroo_ops, runtime);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002201 if (runtime)
2202 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2203
2204 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04002205 if (!amdgpu_get_bios(adev)) {
2206 r = -EINVAL;
2207 goto failed;
2208 }
Nils Wallméniusf7e9e9f2016-12-14 21:52:45 +01002209
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002210 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002211 if (r) {
2212 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002213 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002214 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002215 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002216
Monk Liu4e99a442016-03-31 13:26:59 +08002217 /* detect if we are with an SRIOV vbios */
2218 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04002219
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002220 /* Post card if necessary */
Monk Liubec86372016-09-14 19:38:08 +08002221 if (amdgpu_vpost_needed(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002222 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08002223 dev_err(adev->dev, "no vBIOS found\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002224 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002225 r = -EINVAL;
2226 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002227 }
Monk Liubec86372016-09-14 19:38:08 +08002228 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08002229 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2230 if (r) {
2231 dev_err(adev->dev, "gpu post error!\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002232 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
Monk Liu4e99a442016-03-31 13:26:59 +08002233 goto failed;
2234 }
2235 } else {
2236 DRM_INFO("GPU post is not needed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002237 }
2238
Alex Deucher88b64e92017-07-10 10:43:10 -04002239 if (adev->is_atom_fw) {
2240 /* Initialize clocks */
2241 r = amdgpu_atomfirmware_get_clock_info(adev);
2242 if (r) {
2243 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002244 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
Alex Deucher88b64e92017-07-10 10:43:10 -04002245 goto failed;
2246 }
2247 } else {
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002248 /* Initialize clocks */
2249 r = amdgpu_atombios_get_clock_info(adev);
2250 if (r) {
2251 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002252 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
Gavin Wan89041942017-06-23 13:55:15 -04002253 goto failed;
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002254 }
2255 /* init i2c buses */
2256 amdgpu_atombios_i2c_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002257 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002258
2259 /* Fence driver */
2260 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002261 if (r) {
2262 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002263 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002264 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002265 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002266
2267 /* init the mode config */
2268 drm_mode_config_init(adev->ddev);
2269
2270 r = amdgpu_init(adev);
2271 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05002272 dev_err(adev->dev, "amdgpu_init failed\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002273 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002274 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002275 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002276 }
2277
2278 adev->accel_working = true;
2279
Alex Xiee59c0202017-06-01 09:42:59 -04002280 amdgpu_vm_check_compute_bug(adev);
2281
Marek Olšák95844d22016-08-17 23:49:27 +02002282 /* Initialize the buffer migration limit. */
2283 if (amdgpu_moverate >= 0)
2284 max_MBps = amdgpu_moverate;
2285 else
2286 max_MBps = 8; /* Allow 8 MB/s. */
2287 /* Get a log2 for easy divisions. */
2288 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2289
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002290 r = amdgpu_ib_pool_init(adev);
2291 if (r) {
2292 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Alex Deuchere23b74a2017-09-28 09:47:32 -04002293 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002294 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002295 }
2296
2297 r = amdgpu_ib_ring_tests(adev);
2298 if (r)
2299 DRM_ERROR("ib ring test failed (%d).\n", r);
2300
Monk Liu9bc92b92017-02-08 17:38:13 +08002301 amdgpu_fbdev_init(adev);
2302
Rex Zhud2f52ac2017-09-22 17:47:27 +08002303 r = amdgpu_pm_sysfs_init(adev);
2304 if (r)
2305 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2306
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002307 r = amdgpu_gem_debugfs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002308 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002309 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002310
2311 r = amdgpu_debugfs_regs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002312 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002313 DRM_ERROR("registering register debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002314
Huang Rui4f0955f2017-05-10 23:04:06 +08002315 r = amdgpu_debugfs_test_ib_ring_init(adev);
2316 if (r)
2317 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2318
Huang Rui50ab2532016-06-12 15:51:09 +08002319 r = amdgpu_debugfs_firmware_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002320 if (r)
Huang Rui50ab2532016-06-12 15:51:09 +08002321 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
Huang Rui50ab2532016-06-12 15:51:09 +08002322
Kent Russelldb95e212017-08-22 12:31:43 -04002323 r = amdgpu_debugfs_vbios_dump_init(adev);
2324 if (r)
2325 DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
2326
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002327 if ((amdgpu_testing & 1)) {
2328 if (adev->accel_working)
2329 amdgpu_test_moves(adev);
2330 else
2331 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2332 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002333 if (amdgpu_benchmarking) {
2334 if (adev->accel_working)
2335 amdgpu_benchmark(adev, amdgpu_benchmarking);
2336 else
2337 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2338 }
2339
2340 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2341 * explicit gating rather than handling it automatically.
2342 */
2343 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002344 if (r) {
2345 dev_err(adev->dev, "amdgpu_late_init failed\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002346 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002347 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002348 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002349
2350 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04002351
2352failed:
Gavin Wan89041942017-06-23 13:55:15 -04002353 amdgpu_vf_error_trans_all(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002354 if (runtime)
2355 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2356 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002357}
2358
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002359/**
2360 * amdgpu_device_fini - tear down the driver
2361 *
2362 * @adev: amdgpu_device pointer
2363 *
2364 * Tear down the driver info (all asics).
2365 * Called at driver shutdown.
2366 */
2367void amdgpu_device_fini(struct amdgpu_device *adev)
2368{
2369 int r;
2370
2371 DRM_INFO("amdgpu: finishing device.\n");
2372 adev->shutdown = true;
Pixel Dingdb2c2a92017-04-25 16:47:42 +08002373 if (adev->mode_info.mode_config_initialized)
2374 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002375 /* evict vram memory */
2376 amdgpu_bo_evict_vram(adev);
2377 amdgpu_ib_pool_fini(adev);
Horace Chena05502e2017-09-29 14:41:57 +08002378 amdgpu_fw_reserve_vram_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002379 amdgpu_fence_driver_fini(adev);
2380 amdgpu_fbdev_fini(adev);
2381 r = amdgpu_fini(adev);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08002382 if (adev->firmware.gpu_info_fw) {
2383 release_firmware(adev->firmware.gpu_info_fw);
2384 adev->firmware.gpu_info_fw = NULL;
2385 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002386 adev->accel_working = false;
Shirish S2dc80b02017-05-25 10:05:25 +05302387 cancel_delayed_work_sync(&adev->late_init_work);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002388 /* free i2c buses */
2389 amdgpu_i2c_fini(adev);
2390 amdgpu_atombios_fini(adev);
2391 kfree(adev->bios);
2392 adev->bios = NULL;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002393 if (!pci_is_thunderbolt_attached(adev->pdev))
2394 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002395 if (adev->flags & AMD_IS_PX)
2396 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002397 vga_client_register(adev->pdev, NULL, NULL, NULL);
2398 if (adev->rio_mem)
2399 pci_iounmap(adev->pdev, adev->rio_mem);
2400 adev->rio_mem = NULL;
2401 iounmap(adev->rmmio);
2402 adev->rmmio = NULL;
Christian König705e5192017-06-08 11:15:16 +02002403 amdgpu_doorbell_fini(adev);
Rex Zhud2f52ac2017-09-22 17:47:27 +08002404 amdgpu_pm_sysfs_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002405 amdgpu_debugfs_regs_cleanup(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002406}
2407
2408
2409/*
2410 * Suspend & resume.
2411 */
2412/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002413 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002414 *
2415 * @pdev: drm dev pointer
2416 * @state: suspend state
2417 *
2418 * Puts the hw in the suspend state (all asics).
2419 * Returns 0 for success or an error on failure.
2420 * Called at driver suspend.
2421 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002422int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002423{
2424 struct amdgpu_device *adev;
2425 struct drm_crtc *crtc;
2426 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002427 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002428
2429 if (dev == NULL || dev->dev_private == NULL) {
2430 return -ENODEV;
2431 }
2432
2433 adev = dev->dev_private;
2434
2435 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2436 return 0;
2437
2438 drm_kms_helper_poll_disable(dev);
2439
2440 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002441 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002442 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2443 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2444 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002445 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002446
Yong Zhaoba997702015-11-09 17:21:45 -05002447 amdgpu_amdkfd_suspend(adev);
2448
Alex Deucher756e6882015-10-08 00:03:36 -04002449 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002450 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04002451 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002452 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2453 struct amdgpu_bo *robj;
2454
Alex Deucher756e6882015-10-08 00:03:36 -04002455 if (amdgpu_crtc->cursor_bo) {
2456 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002457 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002458 if (r == 0) {
2459 amdgpu_bo_unpin(aobj);
2460 amdgpu_bo_unreserve(aobj);
2461 }
2462 }
2463
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002464 if (rfb == NULL || rfb->obj == NULL) {
2465 continue;
2466 }
2467 robj = gem_to_amdgpu_bo(rfb->obj);
2468 /* don't unpin kernel fb objects */
2469 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
Alex Xie7a6901d2017-04-24 13:52:41 -04002470 r = amdgpu_bo_reserve(robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002471 if (r == 0) {
2472 amdgpu_bo_unpin(robj);
2473 amdgpu_bo_unreserve(robj);
2474 }
2475 }
2476 }
2477 /* evict vram memory */
2478 amdgpu_bo_evict_vram(adev);
2479
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002480 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002481
2482 r = amdgpu_suspend(adev);
2483
Alex Deuchera0a71e42016-10-10 12:41:36 -04002484 /* evict remaining vram memory
2485 * This second call to evict vram is to evict the gart page table
2486 * using the CPU.
2487 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002488 amdgpu_bo_evict_vram(adev);
2489
Alex Deucherd05da0e2017-06-30 17:08:45 -04002490 amdgpu_atombios_scratch_regs_save(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002491 pci_save_state(dev->pdev);
2492 if (suspend) {
2493 /* Shut down the device */
2494 pci_disable_device(dev->pdev);
2495 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08002496 } else {
2497 r = amdgpu_asic_reset(adev);
2498 if (r)
2499 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002500 }
2501
2502 if (fbcon) {
2503 console_lock();
2504 amdgpu_fbdev_set_suspend(adev, 1);
2505 console_unlock();
2506 }
2507 return 0;
2508}
2509
2510/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002511 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002512 *
2513 * @pdev: drm dev pointer
2514 *
2515 * Bring the hw back to operating state (all asics).
2516 * Returns 0 for success or an error on failure.
2517 * Called at driver resume.
2518 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002519int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002520{
2521 struct drm_connector *connector;
2522 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04002523 struct drm_crtc *crtc;
Huang Rui03161a62017-04-13 16:12:26 +08002524 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002525
2526 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2527 return 0;
2528
jimqu74b0b152016-09-07 17:09:12 +08002529 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002530 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08002531
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002532 if (resume) {
2533 pci_set_power_state(dev->pdev, PCI_D0);
2534 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08002535 r = pci_enable_device(dev->pdev);
Huang Rui03161a62017-04-13 16:12:26 +08002536 if (r)
2537 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002538 }
Alex Deucherd05da0e2017-06-30 17:08:45 -04002539 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002540
2541 /* post card */
Jim Quc836fec2017-02-10 15:59:59 +08002542 if (amdgpu_need_post(adev)) {
jimqu74b0b152016-09-07 17:09:12 +08002543 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2544 if (r)
2545 DRM_ERROR("amdgpu asic init failed\n");
2546 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002547
2548 r = amdgpu_resume(adev);
Rex Zhue6707212017-03-30 13:21:01 +08002549 if (r) {
Flora Cuica198522016-02-04 15:10:08 +08002550 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Huang Rui03161a62017-04-13 16:12:26 +08002551 goto unlock;
Rex Zhue6707212017-03-30 13:21:01 +08002552 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002553 amdgpu_fence_driver_resume(adev);
2554
Flora Cuica198522016-02-04 15:10:08 +08002555 if (resume) {
2556 r = amdgpu_ib_ring_tests(adev);
2557 if (r)
2558 DRM_ERROR("ib ring test failed (%d).\n", r);
2559 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002560
2561 r = amdgpu_late_init(adev);
Huang Rui03161a62017-04-13 16:12:26 +08002562 if (r)
2563 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002564
Alex Deucher756e6882015-10-08 00:03:36 -04002565 /* pin cursors */
2566 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2567 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2568
2569 if (amdgpu_crtc->cursor_bo) {
2570 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002571 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002572 if (r == 0) {
2573 r = amdgpu_bo_pin(aobj,
2574 AMDGPU_GEM_DOMAIN_VRAM,
2575 &amdgpu_crtc->cursor_addr);
2576 if (r != 0)
2577 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2578 amdgpu_bo_unreserve(aobj);
2579 }
2580 }
2581 }
Yong Zhaoba997702015-11-09 17:21:45 -05002582 r = amdgpu_amdkfd_resume(adev);
2583 if (r)
2584 return r;
Alex Deucher756e6882015-10-08 00:03:36 -04002585
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002586 /* blat the mode back in */
2587 if (fbcon) {
2588 drm_helper_resume_force_mode(dev);
2589 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002590 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002591 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2592 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2593 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002594 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002595 }
2596
2597 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002598
2599 /*
2600 * Most of the connector probing functions try to acquire runtime pm
2601 * refs to ensure that the GPU is powered on when connector polling is
2602 * performed. Since we're calling this from a runtime PM callback,
2603 * trying to acquire rpm refs will cause us to deadlock.
2604 *
2605 * Since we're guaranteed to be holding the rpm lock, it's safe to
2606 * temporarily disable the rpm helpers so this doesn't deadlock us.
2607 */
2608#ifdef CONFIG_PM
2609 dev->dev->power.disable_depth++;
2610#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05002611 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002612#ifdef CONFIG_PM
2613 dev->dev->power.disable_depth--;
2614#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002615
Huang Rui03161a62017-04-13 16:12:26 +08002616 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002617 amdgpu_fbdev_set_suspend(adev, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002618
Huang Rui03161a62017-04-13 16:12:26 +08002619unlock:
2620 if (fbcon)
2621 console_unlock();
2622
2623 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002624}
2625
Chunming Zhou63fbf422016-07-15 11:19:20 +08002626static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2627{
2628 int i;
2629 bool asic_hang = false;
2630
2631 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002632 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002633 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002634 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2635 adev->ip_blocks[i].status.hang =
2636 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2637 if (adev->ip_blocks[i].status.hang) {
2638 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
Chunming Zhou63fbf422016-07-15 11:19:20 +08002639 asic_hang = true;
2640 }
2641 }
2642 return asic_hang;
2643}
2644
Baoyou Xie4d446652016-09-18 22:09:35 +08002645static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002646{
2647 int i, r = 0;
2648
2649 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002650 if (!adev->ip_blocks[i].status.valid)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002651 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002652 if (adev->ip_blocks[i].status.hang &&
2653 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2654 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
Chunming Zhoud31a5012016-07-18 10:04:34 +08002655 if (r)
2656 return r;
2657 }
2658 }
2659
2660 return 0;
2661}
2662
Chunming Zhou35d782f2016-07-15 15:57:13 +08002663static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2664{
Alex Deucherda146d32016-10-13 16:07:03 -04002665 int i;
2666
2667 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002668 if (!adev->ip_blocks[i].status.valid)
Alex Deucherda146d32016-10-13 16:07:03 -04002669 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002670 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2671 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2672 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
Ken Wang98512bb2017-09-14 16:25:19 +08002673 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2674 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
Alex Deuchera1255102016-10-13 17:41:13 -04002675 if (adev->ip_blocks[i].status.hang) {
Alex Deucherda146d32016-10-13 16:07:03 -04002676 DRM_INFO("Some block need full reset!\n");
2677 return true;
2678 }
2679 }
Chunming Zhou35d782f2016-07-15 15:57:13 +08002680 }
2681 return false;
2682}
2683
2684static int amdgpu_soft_reset(struct amdgpu_device *adev)
2685{
2686 int i, r = 0;
2687
2688 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002689 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002690 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002691 if (adev->ip_blocks[i].status.hang &&
2692 adev->ip_blocks[i].version->funcs->soft_reset) {
2693 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002694 if (r)
2695 return r;
2696 }
2697 }
2698
2699 return 0;
2700}
2701
2702static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2703{
2704 int i, r = 0;
2705
2706 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002707 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002708 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002709 if (adev->ip_blocks[i].status.hang &&
2710 adev->ip_blocks[i].version->funcs->post_soft_reset)
2711 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002712 if (r)
2713 return r;
2714 }
2715
2716 return 0;
2717}
2718
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002719bool amdgpu_need_backup(struct amdgpu_device *adev)
2720{
2721 if (adev->flags & AMD_IS_APU)
2722 return false;
2723
2724 return amdgpu_lockup_timeout > 0 ? true : false;
2725}
2726
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002727static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2728 struct amdgpu_ring *ring,
2729 struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +01002730 struct dma_fence **fence)
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002731{
2732 uint32_t domain;
2733 int r;
2734
Roger.He23d2e502017-04-21 14:24:26 +08002735 if (!bo->shadow)
2736 return 0;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002737
Alex Xie1d284792017-04-24 13:53:04 -04002738 r = amdgpu_bo_reserve(bo, true);
Roger.He23d2e502017-04-21 14:24:26 +08002739 if (r)
2740 return r;
2741 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2742 /* if bo has been evicted, then no need to recover */
2743 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
Roger.He82521312017-04-21 13:08:43 +08002744 r = amdgpu_bo_validate(bo->shadow);
2745 if (r) {
2746 DRM_ERROR("bo validate failed!\n");
2747 goto err;
2748 }
2749
Roger.He23d2e502017-04-21 14:24:26 +08002750 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002751 NULL, fence, true);
Roger.He23d2e502017-04-21 14:24:26 +08002752 if (r) {
2753 DRM_ERROR("recover page table failed!\n");
2754 goto err;
2755 }
2756 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002757err:
Roger.He23d2e502017-04-21 14:24:26 +08002758 amdgpu_bo_unreserve(bo);
2759 return r;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002760}
2761
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002762/**
Monk Liua90ad3c2017-01-23 14:22:08 +08002763 * amdgpu_sriov_gpu_reset - reset the asic
2764 *
2765 * @adev: amdgpu device pointer
Monk Liu7225f872017-04-26 14:51:54 +08002766 * @job: which job trigger hang
Monk Liua90ad3c2017-01-23 14:22:08 +08002767 *
2768 * Attempt the reset the GPU if it has hung (all asics).
2769 * for SRIOV case.
2770 * Returns 0 for success or an error on failure.
2771 */
Monk Liu7225f872017-04-26 14:51:54 +08002772int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002773{
Monk Liu65781c72017-05-11 13:36:44 +08002774 int i, j, r = 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002775 int resched;
2776 struct amdgpu_bo *bo, *tmp;
2777 struct amdgpu_ring *ring;
2778 struct dma_fence *fence = NULL, *next = NULL;
2779
Monk Liu147b5982017-01-25 15:48:01 +08002780 mutex_lock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002781 atomic_inc(&adev->gpu_reset_counter);
Monk Liu3224a122017-09-15 18:57:12 +08002782 adev->in_sriov_reset = true;
Monk Liua90ad3c2017-01-23 14:22:08 +08002783
2784 /* block TTM */
2785 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2786
Monk Liu65781c72017-05-11 13:36:44 +08002787 /* we start from the ring trigger GPU hang */
2788 j = job ? job->ring->idx : 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002789
Monk Liu65781c72017-05-11 13:36:44 +08002790 /* block scheduler */
2791 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2792 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002793 if (!ring || !ring->sched.thread)
2794 continue;
2795
2796 kthread_park(ring->sched.thread);
Monk Liua90ad3c2017-01-23 14:22:08 +08002797
Monk Liu65781c72017-05-11 13:36:44 +08002798 if (job && j != i)
2799 continue;
2800
Monk Liu4f059ec2017-05-11 13:59:15 +08002801 /* here give the last chance to check if job removed from mirror-list
Monk Liu65781c72017-05-11 13:36:44 +08002802 * since we already pay some time on kthread_park */
Monk Liu4f059ec2017-05-11 13:59:15 +08002803 if (job && list_empty(&job->base.node)) {
Monk Liu65781c72017-05-11 13:36:44 +08002804 kthread_unpark(ring->sched.thread);
2805 goto give_up_reset;
2806 }
2807
2808 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2809 amd_sched_job_kickout(&job->base);
2810
2811 /* only do job_reset on the hang ring if @job not NULL */
Monk Liua90ad3c2017-01-23 14:22:08 +08002812 amd_sched_hw_job_reset(&ring->sched);
Monk Liu65781c72017-05-11 13:36:44 +08002813
2814 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2815 amdgpu_fence_driver_force_completion_ring(ring);
Monk Liua90ad3c2017-01-23 14:22:08 +08002816 }
2817
Monk Liua90ad3c2017-01-23 14:22:08 +08002818 /* request to take full control of GPU before re-initialization */
Monk Liu7225f872017-04-26 14:51:54 +08002819 if (job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002820 amdgpu_virt_reset_gpu(adev);
2821 else
2822 amdgpu_virt_request_full_gpu(adev, true);
2823
2824
2825 /* Resume IP prior to SMC */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002826 amdgpu_sriov_reinit_early(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002827
2828 /* we need recover gart prior to run SMC/CP/SDMA resume */
2829 amdgpu_ttm_recover_gart(adev);
2830
2831 /* now we are okay to resume SMC/CP/SDMA */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002832 amdgpu_sriov_reinit_late(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002833
2834 amdgpu_irq_gpu_reset_resume_helper(adev);
2835
2836 if (amdgpu_ib_ring_tests(adev))
2837 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2838
2839 /* release full control of GPU after ib test */
2840 amdgpu_virt_release_full_gpu(adev, true);
2841
2842 DRM_INFO("recover vram bo from shadow\n");
2843
2844 ring = adev->mman.buffer_funcs_ring;
2845 mutex_lock(&adev->shadow_list_lock);
2846 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002847 next = NULL;
Monk Liua90ad3c2017-01-23 14:22:08 +08002848 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2849 if (fence) {
2850 r = dma_fence_wait(fence, false);
2851 if (r) {
2852 WARN(r, "recovery from shadow isn't completed\n");
2853 break;
2854 }
2855 }
2856
2857 dma_fence_put(fence);
2858 fence = next;
2859 }
2860 mutex_unlock(&adev->shadow_list_lock);
2861
2862 if (fence) {
2863 r = dma_fence_wait(fence, false);
2864 if (r)
2865 WARN(r, "recovery from shadow isn't completed\n");
2866 }
2867 dma_fence_put(fence);
2868
Monk Liu65781c72017-05-11 13:36:44 +08002869 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2870 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002871 if (!ring || !ring->sched.thread)
2872 continue;
2873
Monk Liu65781c72017-05-11 13:36:44 +08002874 if (job && j != i) {
2875 kthread_unpark(ring->sched.thread);
2876 continue;
2877 }
2878
Monk Liua90ad3c2017-01-23 14:22:08 +08002879 amd_sched_job_recovery(&ring->sched);
2880 kthread_unpark(ring->sched.thread);
2881 }
2882
2883 drm_helper_resume_force_mode(adev->ddev);
Monk Liu65781c72017-05-11 13:36:44 +08002884give_up_reset:
Monk Liua90ad3c2017-01-23 14:22:08 +08002885 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2886 if (r) {
2887 /* bad news, how to tell it to userspace ? */
2888 dev_info(adev->dev, "GPU reset failed\n");
Monk Liu65781c72017-05-11 13:36:44 +08002889 } else {
2890 dev_info(adev->dev, "GPU reset successed!\n");
Monk Liua90ad3c2017-01-23 14:22:08 +08002891 }
2892
Monk Liu3224a122017-09-15 18:57:12 +08002893 adev->in_sriov_reset = false;
Monk Liu147b5982017-01-25 15:48:01 +08002894 mutex_unlock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002895 return r;
2896}
2897
2898/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002899 * amdgpu_gpu_reset - reset the asic
2900 *
2901 * @adev: amdgpu device pointer
2902 *
2903 * Attempt the reset the GPU if it has hung (all asics).
2904 * Returns 0 for success or an error on failure.
2905 */
2906int amdgpu_gpu_reset(struct amdgpu_device *adev)
2907{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002908 int i, r;
2909 int resched;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002910 bool need_full_reset, vram_lost = false;
Xiangliang Yufb140b22016-12-17 22:48:57 +08002911
Chunming Zhou63fbf422016-07-15 11:19:20 +08002912 if (!amdgpu_check_soft_reset(adev)) {
2913 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2914 return 0;
2915 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002916
Marek Olšákd94aed52015-05-05 21:13:49 +02002917 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002918
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002919 /* block TTM */
2920 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2921
Chunming Zhou0875dc92016-06-12 15:41:58 +08002922 /* block scheduler */
2923 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2924 struct amdgpu_ring *ring = adev->rings[i];
2925
Chunming Zhou51687752017-04-24 17:09:15 +08002926 if (!ring || !ring->sched.thread)
Chunming Zhou0875dc92016-06-12 15:41:58 +08002927 continue;
2928 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002929 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002930 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002931 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2932 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002933
Chunming Zhou35d782f2016-07-15 15:57:13 +08002934 need_full_reset = amdgpu_need_full_reset(adev);
2935
2936 if (!need_full_reset) {
2937 amdgpu_pre_soft_reset(adev);
2938 r = amdgpu_soft_reset(adev);
2939 amdgpu_post_soft_reset(adev);
2940 if (r || amdgpu_check_soft_reset(adev)) {
2941 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2942 need_full_reset = true;
2943 }
2944 }
2945
2946 if (need_full_reset) {
Chunming Zhou35d782f2016-07-15 15:57:13 +08002947 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002948
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002949retry:
Alex Deucherd05da0e2017-06-30 17:08:45 -04002950 amdgpu_atombios_scratch_regs_save(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002951 r = amdgpu_asic_reset(adev);
Alex Deucherd05da0e2017-06-30 17:08:45 -04002952 amdgpu_atombios_scratch_regs_restore(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002953 /* post card */
2954 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002955
Chunming Zhou35d782f2016-07-15 15:57:13 +08002956 if (!r) {
2957 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
Chunming Zhoufcf06492017-05-05 10:33:33 +08002958 r = amdgpu_resume_phase1(adev);
2959 if (r)
2960 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002961 vram_lost = amdgpu_check_vram_lost(adev);
Chunming Zhouf1892132017-05-15 16:48:27 +08002962 if (vram_lost) {
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002963 DRM_ERROR("VRAM is lost!\n");
Chunming Zhouf1892132017-05-15 16:48:27 +08002964 atomic_inc(&adev->vram_lost_counter);
2965 }
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002966 r = amdgpu_ttm_recover_gart(adev);
2967 if (r)
Chunming Zhoufcf06492017-05-05 10:33:33 +08002968 goto out;
2969 r = amdgpu_resume_phase2(adev);
2970 if (r)
2971 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002972 if (vram_lost)
2973 amdgpu_fill_reset_magic(adev);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002974 }
Chunming Zhoufcf06492017-05-05 10:33:33 +08002975 }
2976out:
2977 if (!r) {
2978 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou1f465082016-06-30 15:02:26 +08002979 r = amdgpu_ib_ring_tests(adev);
2980 if (r) {
2981 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002982 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002983 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002984 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002985 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002986 /**
2987 * recovery vm page tables, since we cannot depend on VRAM is
2988 * consistent after gpu full reset.
2989 */
2990 if (need_full_reset && amdgpu_need_backup(adev)) {
2991 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2992 struct amdgpu_bo *bo, *tmp;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002993 struct dma_fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002994
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002995 DRM_INFO("recover vram bo from shadow\n");
2996 mutex_lock(&adev->shadow_list_lock);
2997 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002998 next = NULL;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002999 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
3000 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01003001 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08003002 if (r) {
Monk Liu1d7b17b2017-01-22 18:52:56 +08003003 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08003004 break;
3005 }
3006 }
3007
Chris Wilsonf54d1862016-10-25 13:00:45 +01003008 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08003009 fence = next;
3010 }
3011 mutex_unlock(&adev->shadow_list_lock);
3012 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01003013 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08003014 if (r)
Monk Liu1d7b17b2017-01-22 18:52:56 +08003015 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08003016 }
Chris Wilsonf54d1862016-10-25 13:00:45 +01003017 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08003018 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003019 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3020 struct amdgpu_ring *ring = adev->rings[i];
Chunming Zhou51687752017-04-24 17:09:15 +08003021
3022 if (!ring || !ring->sched.thread)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003023 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08003024
Chunming Zhouaa1c8902016-06-30 13:56:02 +08003025 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08003026 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003027 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003028 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08003029 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Alex Deuchere23b74a2017-09-28 09:47:32 -04003030 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003031 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou51687752017-04-24 17:09:15 +08003032 if (adev->rings[i] && adev->rings[i]->sched.thread) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08003033 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08003034 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003035 }
3036 }
3037
3038 drm_helper_resume_force_mode(adev->ddev);
3039
3040 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
Gavin Wan89041942017-06-23 13:55:15 -04003041 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003042 /* bad news, how to tell it to userspace ? */
3043 dev_info(adev->dev, "GPU reset failed\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04003044 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
Gavin Wan89041942017-06-23 13:55:15 -04003045 }
3046 else {
Chunming Zhou6643be62017-05-05 10:50:09 +08003047 dev_info(adev->dev, "GPU reset successed!\n");
Gavin Wan89041942017-06-23 13:55:15 -04003048 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003049
Gavin Wan89041942017-06-23 13:55:15 -04003050 amdgpu_vf_error_trans_all(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003051 return r;
3052}
3053
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003054void amdgpu_get_pcie_info(struct amdgpu_device *adev)
3055{
3056 u32 mask;
3057 int ret;
3058
Alex Deuchercd474ba2016-02-04 10:21:23 -05003059 if (amdgpu_pcie_gen_cap)
3060 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
3061
3062 if (amdgpu_pcie_lane_cap)
3063 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
3064
3065 /* covers APUs as well */
3066 if (pci_is_root_bus(adev->pdev->bus)) {
3067 if (adev->pm.pcie_gen_mask == 0)
3068 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3069 if (adev->pm.pcie_mlw_mask == 0)
3070 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003071 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003072 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05003073
3074 if (adev->pm.pcie_gen_mask == 0) {
3075 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3076 if (!ret) {
3077 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3078 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3079 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3080
3081 if (mask & DRM_PCIE_SPEED_25)
3082 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3083 if (mask & DRM_PCIE_SPEED_50)
3084 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3085 if (mask & DRM_PCIE_SPEED_80)
3086 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3087 } else {
3088 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3089 }
3090 }
3091 if (adev->pm.pcie_mlw_mask == 0) {
3092 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3093 if (!ret) {
3094 switch (mask) {
3095 case 32:
3096 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3097 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3098 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3099 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3100 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3101 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3102 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3103 break;
3104 case 16:
3105 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3106 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3107 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3108 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3109 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3110 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3111 break;
3112 case 12:
3113 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3114 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3115 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3116 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3117 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3118 break;
3119 case 8:
3120 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3121 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3122 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3123 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3124 break;
3125 case 4:
3126 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3127 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3128 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3129 break;
3130 case 2:
3131 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3132 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3133 break;
3134 case 1:
3135 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3136 break;
3137 default:
3138 break;
3139 }
3140 } else {
3141 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003142 }
3143 }
3144}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003145
3146/*
3147 * Debugfs
3148 */
3149int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04003150 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003151 unsigned nfiles)
3152{
3153 unsigned i;
3154
3155 for (i = 0; i < adev->debugfs_count; i++) {
3156 if (adev->debugfs[i].files == files) {
3157 /* Already registered */
3158 return 0;
3159 }
3160 }
3161
3162 i = adev->debugfs_count + 1;
3163 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3164 DRM_ERROR("Reached maximum number of debugfs components.\n");
3165 DRM_ERROR("Report so we increase "
3166 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3167 return -EINVAL;
3168 }
3169 adev->debugfs[adev->debugfs_count].files = files;
3170 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3171 adev->debugfs_count = i;
3172#if defined(CONFIG_DEBUG_FS)
3173 drm_debugfs_create_files(files, nfiles,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003174 adev->ddev->primary->debugfs_root,
3175 adev->ddev->primary);
3176#endif
3177 return 0;
3178}
3179
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003180#if defined(CONFIG_DEBUG_FS)
3181
3182static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3183 size_t size, loff_t *pos)
3184{
Al Viro45063092016-12-04 18:24:56 -05003185 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003186 ssize_t result = 0;
3187 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04003188 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04003189 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003190
3191 if (size & 0x3 || *pos & 0x3)
3192 return -EINVAL;
3193
Tom St Denisbd122672016-07-28 09:39:22 -04003194 /* are we reading registers for which a PG lock is necessary? */
3195 pm_pg_lock = (*pos >> 23) & 1;
3196
Tom St Denis566281592016-06-27 11:55:07 -04003197 if (*pos & (1ULL << 62)) {
3198 se_bank = (*pos >> 24) & 0x3FF;
3199 sh_bank = (*pos >> 34) & 0x3FF;
3200 instance_bank = (*pos >> 44) & 0x3FF;
Tom St Denis32977f92016-10-09 07:41:26 -04003201
3202 if (se_bank == 0x3FF)
3203 se_bank = 0xFFFFFFFF;
3204 if (sh_bank == 0x3FF)
3205 sh_bank = 0xFFFFFFFF;
3206 if (instance_bank == 0x3FF)
3207 instance_bank = 0xFFFFFFFF;
Tom St Denis566281592016-06-27 11:55:07 -04003208 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04003209 } else {
3210 use_bank = 0;
3211 }
3212
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003213 *pos &= (1UL << 22) - 1;
Tom St Denisbd122672016-07-28 09:39:22 -04003214
Tom St Denis566281592016-06-27 11:55:07 -04003215 if (use_bank) {
Tom St Denis32977f92016-10-09 07:41:26 -04003216 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3217 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
Tom St Denis566281592016-06-27 11:55:07 -04003218 return -EINVAL;
3219 mutex_lock(&adev->grbm_idx_mutex);
3220 amdgpu_gfx_select_se_sh(adev, se_bank,
3221 sh_bank, instance_bank);
3222 }
3223
Tom St Denisbd122672016-07-28 09:39:22 -04003224 if (pm_pg_lock)
3225 mutex_lock(&adev->pm.mutex);
3226
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003227 while (size) {
3228 uint32_t value;
3229
3230 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04003231 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003232
3233 value = RREG32(*pos >> 2);
3234 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04003235 if (r) {
3236 result = r;
3237 goto end;
3238 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003239
3240 result += 4;
3241 buf += 4;
3242 *pos += 4;
3243 size -= 4;
3244 }
3245
Tom St Denis566281592016-06-27 11:55:07 -04003246end:
3247 if (use_bank) {
3248 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3249 mutex_unlock(&adev->grbm_idx_mutex);
3250 }
3251
Tom St Denisbd122672016-07-28 09:39:22 -04003252 if (pm_pg_lock)
3253 mutex_unlock(&adev->pm.mutex);
3254
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003255 return result;
3256}
3257
3258static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3259 size_t size, loff_t *pos)
3260{
Al Viro45063092016-12-04 18:24:56 -05003261 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003262 ssize_t result = 0;
3263 int r;
Tom St Denis394fdde2016-10-10 07:31:23 -04003264 bool pm_pg_lock, use_bank;
3265 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003266
3267 if (size & 0x3 || *pos & 0x3)
3268 return -EINVAL;
3269
Tom St Denis394fdde2016-10-10 07:31:23 -04003270 /* are we reading registers for which a PG lock is necessary? */
3271 pm_pg_lock = (*pos >> 23) & 1;
3272
3273 if (*pos & (1ULL << 62)) {
3274 se_bank = (*pos >> 24) & 0x3FF;
3275 sh_bank = (*pos >> 34) & 0x3FF;
3276 instance_bank = (*pos >> 44) & 0x3FF;
3277
3278 if (se_bank == 0x3FF)
3279 se_bank = 0xFFFFFFFF;
3280 if (sh_bank == 0x3FF)
3281 sh_bank = 0xFFFFFFFF;
3282 if (instance_bank == 0x3FF)
3283 instance_bank = 0xFFFFFFFF;
3284 use_bank = 1;
3285 } else {
3286 use_bank = 0;
3287 }
3288
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003289 *pos &= (1UL << 22) - 1;
Tom St Denis394fdde2016-10-10 07:31:23 -04003290
3291 if (use_bank) {
3292 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3293 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3294 return -EINVAL;
3295 mutex_lock(&adev->grbm_idx_mutex);
3296 amdgpu_gfx_select_se_sh(adev, se_bank,
3297 sh_bank, instance_bank);
3298 }
3299
3300 if (pm_pg_lock)
3301 mutex_lock(&adev->pm.mutex);
3302
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003303 while (size) {
3304 uint32_t value;
3305
3306 if (*pos > adev->rmmio_size)
3307 return result;
3308
3309 r = get_user(value, (uint32_t *)buf);
3310 if (r)
3311 return r;
3312
3313 WREG32(*pos >> 2, value);
3314
3315 result += 4;
3316 buf += 4;
3317 *pos += 4;
3318 size -= 4;
3319 }
3320
Tom St Denis394fdde2016-10-10 07:31:23 -04003321 if (use_bank) {
3322 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3323 mutex_unlock(&adev->grbm_idx_mutex);
3324 }
3325
3326 if (pm_pg_lock)
3327 mutex_unlock(&adev->pm.mutex);
3328
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003329 return result;
3330}
3331
Tom St Denisadcec282016-04-15 13:08:44 -04003332static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3333 size_t size, loff_t *pos)
3334{
Al Viro45063092016-12-04 18:24:56 -05003335 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003336 ssize_t result = 0;
3337 int r;
3338
3339 if (size & 0x3 || *pos & 0x3)
3340 return -EINVAL;
3341
3342 while (size) {
3343 uint32_t value;
3344
3345 value = RREG32_PCIE(*pos >> 2);
3346 r = put_user(value, (uint32_t *)buf);
3347 if (r)
3348 return r;
3349
3350 result += 4;
3351 buf += 4;
3352 *pos += 4;
3353 size -= 4;
3354 }
3355
3356 return result;
3357}
3358
3359static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3360 size_t size, loff_t *pos)
3361{
Al Viro45063092016-12-04 18:24:56 -05003362 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003363 ssize_t result = 0;
3364 int r;
3365
3366 if (size & 0x3 || *pos & 0x3)
3367 return -EINVAL;
3368
3369 while (size) {
3370 uint32_t value;
3371
3372 r = get_user(value, (uint32_t *)buf);
3373 if (r)
3374 return r;
3375
3376 WREG32_PCIE(*pos >> 2, value);
3377
3378 result += 4;
3379 buf += 4;
3380 *pos += 4;
3381 size -= 4;
3382 }
3383
3384 return result;
3385}
3386
3387static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3388 size_t size, loff_t *pos)
3389{
Al Viro45063092016-12-04 18:24:56 -05003390 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003391 ssize_t result = 0;
3392 int r;
3393
3394 if (size & 0x3 || *pos & 0x3)
3395 return -EINVAL;
3396
3397 while (size) {
3398 uint32_t value;
3399
3400 value = RREG32_DIDT(*pos >> 2);
3401 r = put_user(value, (uint32_t *)buf);
3402 if (r)
3403 return r;
3404
3405 result += 4;
3406 buf += 4;
3407 *pos += 4;
3408 size -= 4;
3409 }
3410
3411 return result;
3412}
3413
3414static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3415 size_t size, loff_t *pos)
3416{
Al Viro45063092016-12-04 18:24:56 -05003417 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003418 ssize_t result = 0;
3419 int r;
3420
3421 if (size & 0x3 || *pos & 0x3)
3422 return -EINVAL;
3423
3424 while (size) {
3425 uint32_t value;
3426
3427 r = get_user(value, (uint32_t *)buf);
3428 if (r)
3429 return r;
3430
3431 WREG32_DIDT(*pos >> 2, value);
3432
3433 result += 4;
3434 buf += 4;
3435 *pos += 4;
3436 size -= 4;
3437 }
3438
3439 return result;
3440}
3441
3442static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3443 size_t size, loff_t *pos)
3444{
Al Viro45063092016-12-04 18:24:56 -05003445 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003446 ssize_t result = 0;
3447 int r;
3448
3449 if (size & 0x3 || *pos & 0x3)
3450 return -EINVAL;
3451
3452 while (size) {
3453 uint32_t value;
3454
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003455 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04003456 r = put_user(value, (uint32_t *)buf);
3457 if (r)
3458 return r;
3459
3460 result += 4;
3461 buf += 4;
3462 *pos += 4;
3463 size -= 4;
3464 }
3465
3466 return result;
3467}
3468
3469static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3470 size_t size, loff_t *pos)
3471{
Al Viro45063092016-12-04 18:24:56 -05003472 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003473 ssize_t result = 0;
3474 int r;
3475
3476 if (size & 0x3 || *pos & 0x3)
3477 return -EINVAL;
3478
3479 while (size) {
3480 uint32_t value;
3481
3482 r = get_user(value, (uint32_t *)buf);
3483 if (r)
3484 return r;
3485
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003486 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04003487
3488 result += 4;
3489 buf += 4;
3490 *pos += 4;
3491 size -= 4;
3492 }
3493
3494 return result;
3495}
3496
Tom St Denis1e051412016-06-27 09:57:18 -04003497static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3498 size_t size, loff_t *pos)
3499{
Al Viro45063092016-12-04 18:24:56 -05003500 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis1e051412016-06-27 09:57:18 -04003501 ssize_t result = 0;
3502 int r;
3503 uint32_t *config, no_regs = 0;
3504
3505 if (size & 0x3 || *pos & 0x3)
3506 return -EINVAL;
3507
Markus Elfringecab7662016-09-18 17:00:52 +02003508 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
Tom St Denis1e051412016-06-27 09:57:18 -04003509 if (!config)
3510 return -ENOMEM;
3511
3512 /* version, increment each time something is added */
Tom St Denis9a999352017-01-18 13:01:25 -05003513 config[no_regs++] = 3;
Tom St Denis1e051412016-06-27 09:57:18 -04003514 config[no_regs++] = adev->gfx.config.max_shader_engines;
3515 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3516 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3517 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3518 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3519 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3520 config[no_regs++] = adev->gfx.config.max_gprs;
3521 config[no_regs++] = adev->gfx.config.max_gs_threads;
3522 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3523 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3524 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3525 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3526 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3527 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3528 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3529 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3530 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3531 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3532 config[no_regs++] = adev->gfx.config.num_gpus;
3533 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3534 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3535 config[no_regs++] = adev->gfx.config.gb_addr_config;
3536 config[no_regs++] = adev->gfx.config.num_rbs;
3537
Tom St Denis89a8f302016-08-12 15:14:31 -04003538 /* rev==1 */
3539 config[no_regs++] = adev->rev_id;
3540 config[no_regs++] = adev->pg_flags;
3541 config[no_regs++] = adev->cg_flags;
3542
Tom St Denise9f11dc2016-08-17 12:00:51 -04003543 /* rev==2 */
3544 config[no_regs++] = adev->family;
3545 config[no_regs++] = adev->external_rev_id;
3546
Tom St Denis9a999352017-01-18 13:01:25 -05003547 /* rev==3 */
3548 config[no_regs++] = adev->pdev->device;
3549 config[no_regs++] = adev->pdev->revision;
3550 config[no_regs++] = adev->pdev->subsystem_device;
3551 config[no_regs++] = adev->pdev->subsystem_vendor;
3552
Tom St Denis1e051412016-06-27 09:57:18 -04003553 while (size && (*pos < no_regs * 4)) {
3554 uint32_t value;
3555
3556 value = config[*pos >> 2];
3557 r = put_user(value, (uint32_t *)buf);
3558 if (r) {
3559 kfree(config);
3560 return r;
3561 }
3562
3563 result += 4;
3564 buf += 4;
3565 *pos += 4;
3566 size -= 4;
3567 }
3568
3569 kfree(config);
3570 return result;
3571}
3572
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003573static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3574 size_t size, loff_t *pos)
3575{
Al Viro45063092016-12-04 18:24:56 -05003576 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003577 int idx, x, outsize, r, valuesize;
3578 uint32_t values[16];
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003579
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003580 if (size & 3 || *pos & 0x3)
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003581 return -EINVAL;
3582
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003583 if (amdgpu_dpm == 0)
3584 return -EINVAL;
3585
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003586 /* convert offset to sensor number */
3587 idx = *pos >> 2;
3588
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003589 valuesize = sizeof(values);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003590 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
Rex Zhucd4d7462017-09-06 18:43:52 +08003591 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003592 else
3593 return -EINVAL;
3594
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003595 if (size > valuesize)
3596 return -EINVAL;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003597
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003598 outsize = 0;
3599 x = 0;
3600 if (!r) {
3601 while (size) {
3602 r = put_user(values[x++], (int32_t *)buf);
3603 buf += 4;
3604 size -= 4;
3605 outsize += 4;
3606 }
3607 }
3608
3609 return !r ? outsize : r;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003610}
Tom St Denis1e051412016-06-27 09:57:18 -04003611
Tom St Denis273d7aa2016-10-11 14:48:55 -04003612static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3613 size_t size, loff_t *pos)
3614{
3615 struct amdgpu_device *adev = f->f_inode->i_private;
3616 int r, x;
3617 ssize_t result=0;
Tom St Denis472259f2016-10-14 09:49:09 -04003618 uint32_t offset, se, sh, cu, wave, simd, data[32];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003619
3620 if (size & 3 || *pos & 3)
3621 return -EINVAL;
3622
3623 /* decode offset */
3624 offset = (*pos & 0x7F);
3625 se = ((*pos >> 7) & 0xFF);
3626 sh = ((*pos >> 15) & 0xFF);
3627 cu = ((*pos >> 23) & 0xFF);
3628 wave = ((*pos >> 31) & 0xFF);
3629 simd = ((*pos >> 37) & 0xFF);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003630
3631 /* switch to the specific se/sh/cu */
3632 mutex_lock(&adev->grbm_idx_mutex);
3633 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3634
3635 x = 0;
Tom St Denis472259f2016-10-14 09:49:09 -04003636 if (adev->gfx.funcs->read_wave_data)
3637 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003638
3639 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3640 mutex_unlock(&adev->grbm_idx_mutex);
3641
Tom St Denis5ecfb3b2016-10-13 12:15:03 -04003642 if (!x)
3643 return -EINVAL;
3644
Tom St Denis472259f2016-10-14 09:49:09 -04003645 while (size && (offset < x * 4)) {
Tom St Denis273d7aa2016-10-11 14:48:55 -04003646 uint32_t value;
3647
Tom St Denis472259f2016-10-14 09:49:09 -04003648 value = data[offset >> 2];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003649 r = put_user(value, (uint32_t *)buf);
3650 if (r)
3651 return r;
3652
3653 result += 4;
3654 buf += 4;
Tom St Denis472259f2016-10-14 09:49:09 -04003655 offset += 4;
Tom St Denis273d7aa2016-10-11 14:48:55 -04003656 size -= 4;
3657 }
3658
3659 return result;
3660}
3661
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003662static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3663 size_t size, loff_t *pos)
3664{
3665 struct amdgpu_device *adev = f->f_inode->i_private;
3666 int r;
3667 ssize_t result = 0;
3668 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3669
3670 if (size & 3 || *pos & 3)
3671 return -EINVAL;
3672
3673 /* decode offset */
3674 offset = (*pos & 0xFFF); /* in dwords */
3675 se = ((*pos >> 12) & 0xFF);
3676 sh = ((*pos >> 20) & 0xFF);
3677 cu = ((*pos >> 28) & 0xFF);
3678 wave = ((*pos >> 36) & 0xFF);
3679 simd = ((*pos >> 44) & 0xFF);
3680 thread = ((*pos >> 52) & 0xFF);
3681 bank = ((*pos >> 60) & 1);
3682
3683 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3684 if (!data)
3685 return -ENOMEM;
3686
3687 /* switch to the specific se/sh/cu */
3688 mutex_lock(&adev->grbm_idx_mutex);
3689 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3690
3691 if (bank == 0) {
3692 if (adev->gfx.funcs->read_wave_vgprs)
3693 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3694 } else {
3695 if (adev->gfx.funcs->read_wave_sgprs)
3696 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3697 }
3698
3699 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3700 mutex_unlock(&adev->grbm_idx_mutex);
3701
3702 while (size) {
3703 uint32_t value;
3704
3705 value = data[offset++];
3706 r = put_user(value, (uint32_t *)buf);
3707 if (r) {
3708 result = r;
3709 goto err;
3710 }
3711
3712 result += 4;
3713 buf += 4;
3714 size -= 4;
3715 }
3716
3717err:
3718 kfree(data);
3719 return result;
3720}
3721
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003722static const struct file_operations amdgpu_debugfs_regs_fops = {
3723 .owner = THIS_MODULE,
3724 .read = amdgpu_debugfs_regs_read,
3725 .write = amdgpu_debugfs_regs_write,
3726 .llseek = default_llseek
3727};
Tom St Denisadcec282016-04-15 13:08:44 -04003728static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3729 .owner = THIS_MODULE,
3730 .read = amdgpu_debugfs_regs_didt_read,
3731 .write = amdgpu_debugfs_regs_didt_write,
3732 .llseek = default_llseek
3733};
3734static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3735 .owner = THIS_MODULE,
3736 .read = amdgpu_debugfs_regs_pcie_read,
3737 .write = amdgpu_debugfs_regs_pcie_write,
3738 .llseek = default_llseek
3739};
3740static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3741 .owner = THIS_MODULE,
3742 .read = amdgpu_debugfs_regs_smc_read,
3743 .write = amdgpu_debugfs_regs_smc_write,
3744 .llseek = default_llseek
3745};
3746
Tom St Denis1e051412016-06-27 09:57:18 -04003747static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3748 .owner = THIS_MODULE,
3749 .read = amdgpu_debugfs_gca_config_read,
3750 .llseek = default_llseek
3751};
3752
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003753static const struct file_operations amdgpu_debugfs_sensors_fops = {
3754 .owner = THIS_MODULE,
3755 .read = amdgpu_debugfs_sensor_read,
3756 .llseek = default_llseek
3757};
3758
Tom St Denis273d7aa2016-10-11 14:48:55 -04003759static const struct file_operations amdgpu_debugfs_wave_fops = {
3760 .owner = THIS_MODULE,
3761 .read = amdgpu_debugfs_wave_read,
3762 .llseek = default_llseek
3763};
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003764static const struct file_operations amdgpu_debugfs_gpr_fops = {
3765 .owner = THIS_MODULE,
3766 .read = amdgpu_debugfs_gpr_read,
3767 .llseek = default_llseek
3768};
Tom St Denis273d7aa2016-10-11 14:48:55 -04003769
Tom St Denisadcec282016-04-15 13:08:44 -04003770static const struct file_operations *debugfs_regs[] = {
3771 &amdgpu_debugfs_regs_fops,
3772 &amdgpu_debugfs_regs_didt_fops,
3773 &amdgpu_debugfs_regs_pcie_fops,
3774 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04003775 &amdgpu_debugfs_gca_config_fops,
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003776 &amdgpu_debugfs_sensors_fops,
Tom St Denis273d7aa2016-10-11 14:48:55 -04003777 &amdgpu_debugfs_wave_fops,
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003778 &amdgpu_debugfs_gpr_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04003779};
3780
3781static const char *debugfs_regs_names[] = {
3782 "amdgpu_regs",
3783 "amdgpu_regs_didt",
3784 "amdgpu_regs_pcie",
3785 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04003786 "amdgpu_gca_config",
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003787 "amdgpu_sensors",
Tom St Denis273d7aa2016-10-11 14:48:55 -04003788 "amdgpu_wave",
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003789 "amdgpu_gpr",
Tom St Denisadcec282016-04-15 13:08:44 -04003790};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003791
3792static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3793{
3794 struct drm_minor *minor = adev->ddev->primary;
3795 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04003796 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003797
Tom St Denisadcec282016-04-15 13:08:44 -04003798 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3799 ent = debugfs_create_file(debugfs_regs_names[i],
3800 S_IFREG | S_IRUGO, root,
3801 adev, debugfs_regs[i]);
3802 if (IS_ERR(ent)) {
3803 for (j = 0; j < i; j++) {
3804 debugfs_remove(adev->debugfs_regs[i]);
3805 adev->debugfs_regs[i] = NULL;
3806 }
3807 return PTR_ERR(ent);
3808 }
3809
3810 if (!i)
3811 i_size_write(ent->d_inode, adev->rmmio_size);
3812 adev->debugfs_regs[i] = ent;
3813 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003814
3815 return 0;
3816}
3817
3818static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3819{
Tom St Denisadcec282016-04-15 13:08:44 -04003820 unsigned i;
3821
3822 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3823 if (adev->debugfs_regs[i]) {
3824 debugfs_remove(adev->debugfs_regs[i]);
3825 adev->debugfs_regs[i] = NULL;
3826 }
3827 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003828}
3829
Huang Rui4f0955f2017-05-10 23:04:06 +08003830static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3831{
3832 struct drm_info_node *node = (struct drm_info_node *) m->private;
3833 struct drm_device *dev = node->minor->dev;
3834 struct amdgpu_device *adev = dev->dev_private;
3835 int r = 0, i;
3836
3837 /* hold on the scheduler */
3838 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3839 struct amdgpu_ring *ring = adev->rings[i];
3840
3841 if (!ring || !ring->sched.thread)
3842 continue;
3843 kthread_park(ring->sched.thread);
3844 }
3845
3846 seq_printf(m, "run ib test:\n");
3847 r = amdgpu_ib_ring_tests(adev);
3848 if (r)
3849 seq_printf(m, "ib ring tests failed (%d).\n", r);
3850 else
3851 seq_printf(m, "ib ring tests passed.\n");
3852
3853 /* go on the scheduler */
3854 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3855 struct amdgpu_ring *ring = adev->rings[i];
3856
3857 if (!ring || !ring->sched.thread)
3858 continue;
3859 kthread_unpark(ring->sched.thread);
3860 }
3861
3862 return 0;
3863}
3864
3865static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3866 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3867};
3868
3869static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3870{
3871 return amdgpu_debugfs_add_files(adev,
3872 amdgpu_debugfs_test_ib_ring_list, 1);
3873}
3874
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003875int amdgpu_debugfs_init(struct drm_minor *minor)
3876{
3877 return 0;
3878}
Kent Russelldb95e212017-08-22 12:31:43 -04003879
3880static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
3881{
3882 struct drm_info_node *node = (struct drm_info_node *) m->private;
3883 struct drm_device *dev = node->minor->dev;
3884 struct amdgpu_device *adev = dev->dev_private;
3885
3886 seq_write(m, adev->bios, adev->bios_size);
3887 return 0;
3888}
3889
Kent Russelldb95e212017-08-22 12:31:43 -04003890static const struct drm_info_list amdgpu_vbios_dump_list[] = {
3891 {"amdgpu_vbios",
3892 amdgpu_debugfs_get_vbios_dump,
3893 0, NULL},
3894};
3895
Kent Russelldb95e212017-08-22 12:31:43 -04003896static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3897{
3898 return amdgpu_debugfs_add_files(adev,
3899 amdgpu_vbios_dump_list, 1);
3900}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003901#else
Arnd Bergmann27bad5b2017-06-21 23:51:02 +02003902static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
Huang Rui4f0955f2017-05-10 23:04:06 +08003903{
3904 return 0;
3905}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003906static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3907{
3908 return 0;
3909}
Kent Russelldb95e212017-08-22 12:31:43 -04003910static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3911{
3912 return 0;
3913}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003914static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003915#endif