blob: e630d918fefc0993e540ac4ecb1734eeaaf3aec6 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deuchera5bde2f2016-09-23 16:23:41 -040043#include "amdgpu_atomfirmware.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050044#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080045#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040048#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040051#include "vi.h"
Ken Wang460826e2017-03-06 14:53:16 -050052#include "soc15.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080054#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080055#include <linux/firmware.h>
Gavin Wan89041942017-06-23 13:55:15 -040056#include "amdgpu_vf_error.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040057
Yong Zhaoba997702015-11-09 17:21:45 -050058#include "amdgpu_amdkfd.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040059
Alex Deuchere2a75f82017-04-27 16:58:01 -040060MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
Alex Deucher2d2e5e72017-05-09 12:27:35 -040061MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
Alex Deuchere2a75f82017-04-27 16:58:01 -040062
Shirish S2dc80b02017-05-25 10:05:25 +053063#define AMDGPU_RESUME_MS 2000
64
Alex Deucherd38ceaf2015-04-20 16:55:21 -040065static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
66static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
Huang Rui4f0955f2017-05-10 23:04:06 +080067static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040068
69static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080070 "TAHITI",
71 "PITCAIRN",
72 "VERDE",
73 "OLAND",
74 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040075 "BONAIRE",
76 "KAVERI",
77 "KABINI",
78 "HAWAII",
79 "MULLINS",
80 "TOPAZ",
81 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080082 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040083 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040084 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040085 "POLARIS10",
86 "POLARIS11",
Junwei Zhangc4642a42016-12-14 15:32:28 -050087 "POLARIS12",
Ken Wangd4196f02016-03-09 09:28:32 +080088 "VEGA10",
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +080089 "RAVEN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040090 "LAST",
91};
92
93bool amdgpu_device_is_px(struct drm_device *dev)
94{
95 struct amdgpu_device *adev = dev->dev_private;
96
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080097 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040098 return true;
99 return false;
100}
101
102/*
103 * MMIO register access helper functions.
104 */
105uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
Monk Liu15d72fd2017-01-25 15:07:40 +0800106 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400107{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400108 uint32_t ret;
109
Monk Liu15d72fd2017-01-25 15:07:40 +0800110 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800111 BUG_ON(in_interrupt());
112 return amdgpu_virt_kiq_rreg(adev, reg);
113 }
114
Monk Liu15d72fd2017-01-25 15:07:40 +0800115 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Tom St Denisf4b373f2016-05-31 08:02:27 -0400116 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400117 else {
118 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400119
120 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
121 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
122 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
123 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400124 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400125 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
126 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400127}
128
129void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
Monk Liu15d72fd2017-01-25 15:07:40 +0800130 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400131{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400132 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800133
Ken Wang47ed4e12017-07-04 13:11:52 +0800134 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
135 adev->last_mm_index = v;
136 }
137
Monk Liu15d72fd2017-01-25 15:07:40 +0800138 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800139 BUG_ON(in_interrupt());
140 return amdgpu_virt_kiq_wreg(adev, reg, v);
141 }
142
Monk Liu15d72fd2017-01-25 15:07:40 +0800143 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400144 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
145 else {
146 unsigned long flags;
147
148 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
149 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
150 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
151 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
152 }
Ken Wang47ed4e12017-07-04 13:11:52 +0800153
154 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
155 udelay(500);
156 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400157}
158
159u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
160{
161 if ((reg * 4) < adev->rio_mem_size)
162 return ioread32(adev->rio_mem + (reg * 4));
163 else {
164 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
165 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
166 }
167}
168
169void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
170{
Ken Wang47ed4e12017-07-04 13:11:52 +0800171 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
172 adev->last_mm_index = v;
173 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400174
175 if ((reg * 4) < adev->rio_mem_size)
176 iowrite32(v, adev->rio_mem + (reg * 4));
177 else {
178 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
179 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
180 }
Ken Wang47ed4e12017-07-04 13:11:52 +0800181
182 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
183 udelay(500);
184 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400185}
186
187/**
188 * amdgpu_mm_rdoorbell - read a doorbell dword
189 *
190 * @adev: amdgpu_device pointer
191 * @index: doorbell index
192 *
193 * Returns the value in the doorbell aperture at the
194 * requested doorbell index (CIK).
195 */
196u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
197{
198 if (index < adev->doorbell.num_doorbells) {
199 return readl(adev->doorbell.ptr + index);
200 } else {
201 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
202 return 0;
203 }
204}
205
206/**
207 * amdgpu_mm_wdoorbell - write a doorbell dword
208 *
209 * @adev: amdgpu_device pointer
210 * @index: doorbell index
211 * @v: value to write
212 *
213 * Writes @v to the doorbell aperture at the
214 * requested doorbell index (CIK).
215 */
216void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
217{
218 if (index < adev->doorbell.num_doorbells) {
219 writel(v, adev->doorbell.ptr + index);
220 } else {
221 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
222 }
223}
224
225/**
Ken Wang832be402016-03-18 15:23:08 +0800226 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
227 *
228 * @adev: amdgpu_device pointer
229 * @index: doorbell index
230 *
231 * Returns the value in the doorbell aperture at the
232 * requested doorbell index (VEGA10+).
233 */
234u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
235{
236 if (index < adev->doorbell.num_doorbells) {
237 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
238 } else {
239 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
240 return 0;
241 }
242}
243
244/**
245 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
246 *
247 * @adev: amdgpu_device pointer
248 * @index: doorbell index
249 * @v: value to write
250 *
251 * Writes @v to the doorbell aperture at the
252 * requested doorbell index (VEGA10+).
253 */
254void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
255{
256 if (index < adev->doorbell.num_doorbells) {
257 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
258 } else {
259 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
260 }
261}
262
263/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400264 * amdgpu_invalid_rreg - dummy reg read function
265 *
266 * @adev: amdgpu device pointer
267 * @reg: offset of register
268 *
269 * Dummy register read function. Used for register blocks
270 * that certain asics don't have (all asics).
271 * Returns the value in the register.
272 */
273static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
274{
275 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
276 BUG();
277 return 0;
278}
279
280/**
281 * amdgpu_invalid_wreg - dummy reg write function
282 *
283 * @adev: amdgpu device pointer
284 * @reg: offset of register
285 * @v: value to write to the register
286 *
287 * Dummy register read function. Used for register blocks
288 * that certain asics don't have (all asics).
289 */
290static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
291{
292 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
293 reg, v);
294 BUG();
295}
296
297/**
298 * amdgpu_block_invalid_rreg - dummy reg read function
299 *
300 * @adev: amdgpu device pointer
301 * @block: offset of instance
302 * @reg: offset of register
303 *
304 * Dummy register read function. Used for register blocks
305 * that certain asics don't have (all asics).
306 * Returns the value in the register.
307 */
308static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
309 uint32_t block, uint32_t reg)
310{
311 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
312 reg, block);
313 BUG();
314 return 0;
315}
316
317/**
318 * amdgpu_block_invalid_wreg - dummy reg write function
319 *
320 * @adev: amdgpu device pointer
321 * @block: offset of instance
322 * @reg: offset of register
323 * @v: value to write to the register
324 *
325 * Dummy register read function. Used for register blocks
326 * that certain asics don't have (all asics).
327 */
328static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
329 uint32_t block,
330 uint32_t reg, uint32_t v)
331{
332 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
333 reg, block, v);
334 BUG();
335}
336
337static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
338{
Christian Königa4a02772017-07-27 17:24:36 +0200339 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
340 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
341 &adev->vram_scratch.robj,
342 &adev->vram_scratch.gpu_addr,
343 (void **)&adev->vram_scratch.ptr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400344}
345
346static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
347{
Christian König078af1a2017-07-27 17:43:00 +0200348 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400349}
350
351/**
352 * amdgpu_program_register_sequence - program an array of registers.
353 *
354 * @adev: amdgpu_device pointer
355 * @registers: pointer to the register array
356 * @array_size: size of the register array
357 *
358 * Programs an array or registers with and and or masks.
359 * This is a helper for setting golden registers.
360 */
361void amdgpu_program_register_sequence(struct amdgpu_device *adev,
362 const u32 *registers,
363 const u32 array_size)
364{
365 u32 tmp, reg, and_mask, or_mask;
366 int i;
367
368 if (array_size % 3)
369 return;
370
371 for (i = 0; i < array_size; i +=3) {
372 reg = registers[i + 0];
373 and_mask = registers[i + 1];
374 or_mask = registers[i + 2];
375
376 if (and_mask == 0xffffffff) {
377 tmp = or_mask;
378 } else {
379 tmp = RREG32(reg);
380 tmp &= ~and_mask;
381 tmp |= or_mask;
382 }
383 WREG32(reg, tmp);
384 }
385}
386
387void amdgpu_pci_config_reset(struct amdgpu_device *adev)
388{
389 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
390}
391
392/*
393 * GPU doorbell aperture helpers function.
394 */
395/**
396 * amdgpu_doorbell_init - Init doorbell driver information.
397 *
398 * @adev: amdgpu_device pointer
399 *
400 * Init doorbell driver information (CIK)
401 * Returns 0 on success, error on failure.
402 */
403static int amdgpu_doorbell_init(struct amdgpu_device *adev)
404{
405 /* doorbell bar mapping */
406 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
407 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
408
Christian Königedf600d2016-05-03 15:54:54 +0200409 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400410 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
411 if (adev->doorbell.num_doorbells == 0)
412 return -EINVAL;
413
Christian König8972e5d2017-03-06 13:34:57 +0100414 adev->doorbell.ptr = ioremap(adev->doorbell.base,
415 adev->doorbell.num_doorbells *
416 sizeof(u32));
417 if (adev->doorbell.ptr == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400418 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400419
420 return 0;
421}
422
423/**
424 * amdgpu_doorbell_fini - Tear down doorbell driver information.
425 *
426 * @adev: amdgpu_device pointer
427 *
428 * Tear down doorbell driver information (CIK)
429 */
430static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
431{
432 iounmap(adev->doorbell.ptr);
433 adev->doorbell.ptr = NULL;
434}
435
436/**
437 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
438 * setup amdkfd
439 *
440 * @adev: amdgpu_device pointer
441 * @aperture_base: output returning doorbell aperture base physical address
442 * @aperture_size: output returning doorbell aperture size in bytes
443 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
444 *
445 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
446 * takes doorbells required for its own rings and reports the setup to amdkfd.
447 * amdgpu reserved doorbells are at the start of the doorbell aperture.
448 */
449void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
450 phys_addr_t *aperture_base,
451 size_t *aperture_size,
452 size_t *start_offset)
453{
454 /*
455 * The first num_doorbells are used by amdgpu.
456 * amdkfd takes whatever's left in the aperture.
457 */
458 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
459 *aperture_base = adev->doorbell.base;
460 *aperture_size = adev->doorbell.size;
461 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
462 } else {
463 *aperture_base = 0;
464 *aperture_size = 0;
465 *start_offset = 0;
466 }
467}
468
469/*
470 * amdgpu_wb_*()
Alex Xie455a7bc2017-05-08 21:36:03 -0400471 * Writeback is the method by which the GPU updates special pages in memory
Alex Xieea81a172017-05-08 13:41:11 -0400472 * with the status of certain GPU events (fences, ring pointers,etc.).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400473 */
474
475/**
476 * amdgpu_wb_fini - Disable Writeback and free memory
477 *
478 * @adev: amdgpu_device pointer
479 *
480 * Disables Writeback and frees the Writeback memory (all asics).
481 * Used at driver shutdown.
482 */
483static void amdgpu_wb_fini(struct amdgpu_device *adev)
484{
485 if (adev->wb.wb_obj) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400486 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
487 &adev->wb.gpu_addr,
488 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400489 adev->wb.wb_obj = NULL;
490 }
491}
492
493/**
494 * amdgpu_wb_init- Init Writeback driver info and allocate memory
495 *
496 * @adev: amdgpu_device pointer
497 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400498 * Initializes writeback and allocates writeback memory (all asics).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400499 * Used at driver startup.
500 * Returns 0 on success or an -error on failure.
501 */
502static int amdgpu_wb_init(struct amdgpu_device *adev)
503{
504 int r;
505
506 if (adev->wb.wb_obj == NULL) {
Alex Deucher97407b62017-07-28 12:14:15 -0400507 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
508 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
Alex Deuchera76ed482016-10-21 15:30:36 -0400509 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
510 &adev->wb.wb_obj, &adev->wb.gpu_addr,
511 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400512 if (r) {
513 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
514 return r;
515 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400516
517 adev->wb.num_wb = AMDGPU_MAX_WB;
518 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
519
520 /* clear wb memory */
Huang Rui60a970a62017-03-15 10:13:32 +0800521 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400522 }
523
524 return 0;
525}
526
527/**
528 * amdgpu_wb_get - Allocate a wb entry
529 *
530 * @adev: amdgpu_device pointer
531 * @wb: wb index
532 *
533 * Allocate a wb slot for use by the driver (all asics).
534 * Returns 0 on success or -EINVAL on failure.
535 */
536int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
537{
538 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
Alex Deucher97407b62017-07-28 12:14:15 -0400539
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400540 if (offset < adev->wb.num_wb) {
541 __set_bit(offset, adev->wb.used);
Alex Deucher97407b62017-07-28 12:14:15 -0400542 *wb = offset * 8; /* convert to dw offset */
Monk Liu0915fdb2017-06-19 10:19:41 -0400543 return 0;
544 } else {
545 return -EINVAL;
546 }
547}
548
Ken Wang70142852016-03-18 15:08:49 +0800549/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400550 * amdgpu_wb_free - Free a wb entry
551 *
552 * @adev: amdgpu_device pointer
553 * @wb: wb index
554 *
555 * Free a wb slot allocated for use by the driver (all asics)
556 */
557void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
558{
559 if (wb < adev->wb.num_wb)
560 __clear_bit(wb, adev->wb.used);
561}
562
563/**
564 * amdgpu_vram_location - try to find VRAM location
565 * @adev: amdgpu device structure holding all necessary informations
566 * @mc: memory controller structure holding memory informations
567 * @base: base address at which to put VRAM
568 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400569 * Function will try to place VRAM at base address provided
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400570 * as parameter (which is so far either PCI aperture address or
571 * for IGP TOM base address).
572 *
573 * If there is not enough space to fit the unvisible VRAM in the 32bits
574 * address space then we limit the VRAM size to the aperture.
575 *
576 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
577 * this shouldn't be a problem as we are using the PCI aperture as a reference.
578 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
579 * not IGP.
580 *
581 * Note: we use mc_vram_size as on some board we need to program the mc to
582 * cover the whole aperture even if VRAM size is inferior to aperture size
583 * Novell bug 204882 + along with lots of ubuntu ones
584 *
585 * Note: when limiting vram it's safe to overwritte real_vram_size because
586 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
587 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
588 * ones)
589 *
590 * Note: IGP TOM addr should be the same as the aperture addr, we don't
Alex Xie455a7bc2017-05-08 21:36:03 -0400591 * explicitly check for that though.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400592 *
593 * FIXME: when reducing VRAM size align new size on power of 2.
594 */
595void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
596{
597 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
598
599 mc->vram_start = base;
600 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
601 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
602 mc->real_vram_size = mc->aper_size;
603 mc->mc_vram_size = mc->aper_size;
604 }
605 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
606 if (limit && limit < mc->real_vram_size)
607 mc->real_vram_size = limit;
608 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
609 mc->mc_vram_size >> 20, mc->vram_start,
610 mc->vram_end, mc->real_vram_size >> 20);
611}
612
613/**
Christian König6f02a692017-07-07 11:56:59 +0200614 * amdgpu_gart_location - try to find GTT location
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400615 * @adev: amdgpu device structure holding all necessary informations
616 * @mc: memory controller structure holding memory informations
617 *
618 * Function will place try to place GTT before or after VRAM.
619 *
620 * If GTT size is bigger than space left then we ajust GTT size.
621 * Thus function will never fails.
622 *
623 * FIXME: when reducing GTT size align new size on power of 2.
624 */
Christian König6f02a692017-07-07 11:56:59 +0200625void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400626{
627 u64 size_af, size_bf;
628
Christian Königed21c042017-07-06 22:26:05 +0200629 size_af = adev->mc.mc_mask - mc->vram_end;
630 size_bf = mc->vram_start;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400631 if (size_bf > size_af) {
Christian König6f02a692017-07-07 11:56:59 +0200632 if (mc->gart_size > size_bf) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400633 dev_warn(adev->dev, "limiting GTT\n");
Christian König6f02a692017-07-07 11:56:59 +0200634 mc->gart_size = size_bf;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400635 }
Christian König6f02a692017-07-07 11:56:59 +0200636 mc->gart_start = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400637 } else {
Christian König6f02a692017-07-07 11:56:59 +0200638 if (mc->gart_size > size_af) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400639 dev_warn(adev->dev, "limiting GTT\n");
Christian König6f02a692017-07-07 11:56:59 +0200640 mc->gart_size = size_af;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400641 }
Christian König6f02a692017-07-07 11:56:59 +0200642 mc->gart_start = mc->vram_end + 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400643 }
Christian König6f02a692017-07-07 11:56:59 +0200644 mc->gart_end = mc->gart_start + mc->gart_size - 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400645 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
Christian König6f02a692017-07-07 11:56:59 +0200646 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400647}
648
649/*
650 * GPU helpers function.
651 */
652/**
Jim Quc836fec2017-02-10 15:59:59 +0800653 * amdgpu_need_post - check if the hw need post or not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400654 *
655 * @adev: amdgpu_device pointer
656 *
Jim Quc836fec2017-02-10 15:59:59 +0800657 * Check if the asic has been initialized (all asics) at driver startup
658 * or post is needed if hw reset is performed.
659 * Returns true if need or false if not.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400660 */
Jim Quc836fec2017-02-10 15:59:59 +0800661bool amdgpu_need_post(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400662{
663 uint32_t reg;
664
Jim Quc836fec2017-02-10 15:59:59 +0800665 if (adev->has_hw_reset) {
666 adev->has_hw_reset = false;
667 return true;
668 }
Alex Deucher70d17a22017-06-30 17:26:47 -0400669
670 /* bios scratch used on CIK+ */
671 if (adev->asic_type >= CHIP_BONAIRE)
672 return amdgpu_atombios_scratch_need_asic_init(adev);
673
674 /* check MEM_SIZE for older asics */
Alex Deucherbbf282d2017-03-03 17:26:10 -0500675 reg = amdgpu_asic_get_config_memsize(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400676
Alex Deucherf2713e82017-03-28 12:19:31 -0400677 if ((reg != 0) && (reg != 0xffffffff))
Jim Quc836fec2017-02-10 15:59:59 +0800678 return false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400679
Jim Quc836fec2017-02-10 15:59:59 +0800680 return true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400681
682}
683
Monk Liubec86372016-09-14 19:38:08 +0800684static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
685{
686 if (amdgpu_sriov_vf(adev))
687 return false;
688
689 if (amdgpu_passthrough(adev)) {
Monk Liu1da2c322016-11-11 11:24:29 +0800690 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
691 * some old smc fw still need driver do vPost otherwise gpu hang, while
692 * those smc fw version above 22.15 doesn't have this flaw, so we force
693 * vpost executed for smc version below 22.15
Monk Liubec86372016-09-14 19:38:08 +0800694 */
695 if (adev->asic_type == CHIP_FIJI) {
696 int err;
697 uint32_t fw_ver;
698 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
699 /* force vPost if error occured */
700 if (err)
701 return true;
702
703 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
Monk Liu1da2c322016-11-11 11:24:29 +0800704 if (fw_ver < 0x00160e00)
705 return true;
Monk Liubec86372016-09-14 19:38:08 +0800706 }
Monk Liubec86372016-09-14 19:38:08 +0800707 }
Jim Quc836fec2017-02-10 15:59:59 +0800708 return amdgpu_need_post(adev);
Monk Liubec86372016-09-14 19:38:08 +0800709}
710
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400711/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400712 * amdgpu_dummy_page_init - init dummy page used by the driver
713 *
714 * @adev: amdgpu_device pointer
715 *
716 * Allocate the dummy page used by the driver (all asics).
717 * This dummy page is used by the driver as a filler for gart entries
718 * when pages are taken out of the GART
719 * Returns 0 on sucess, -ENOMEM on failure.
720 */
721int amdgpu_dummy_page_init(struct amdgpu_device *adev)
722{
723 if (adev->dummy_page.page)
724 return 0;
725 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
726 if (adev->dummy_page.page == NULL)
727 return -ENOMEM;
728 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
729 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
730 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
731 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
732 __free_page(adev->dummy_page.page);
733 adev->dummy_page.page = NULL;
734 return -ENOMEM;
735 }
736 return 0;
737}
738
739/**
740 * amdgpu_dummy_page_fini - free dummy page used by the driver
741 *
742 * @adev: amdgpu_device pointer
743 *
744 * Frees the dummy page used by the driver (all asics).
745 */
746void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
747{
748 if (adev->dummy_page.page == NULL)
749 return;
750 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
751 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
752 __free_page(adev->dummy_page.page);
753 adev->dummy_page.page = NULL;
754}
755
756
757/* ATOM accessor methods */
758/*
759 * ATOM is an interpreted byte code stored in tables in the vbios. The
760 * driver registers callbacks to access registers and the interpreter
761 * in the driver parses the tables and executes then to program specific
762 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
763 * atombios.h, and atom.c
764 */
765
766/**
767 * cail_pll_read - read PLL register
768 *
769 * @info: atom card_info pointer
770 * @reg: PLL register offset
771 *
772 * Provides a PLL register accessor for the atom interpreter (r4xx+).
773 * Returns the value of the PLL register.
774 */
775static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
776{
777 return 0;
778}
779
780/**
781 * cail_pll_write - write PLL register
782 *
783 * @info: atom card_info pointer
784 * @reg: PLL register offset
785 * @val: value to write to the pll register
786 *
787 * Provides a PLL register accessor for the atom interpreter (r4xx+).
788 */
789static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
790{
791
792}
793
794/**
795 * cail_mc_read - read MC (Memory Controller) register
796 *
797 * @info: atom card_info pointer
798 * @reg: MC register offset
799 *
800 * Provides an MC register accessor for the atom interpreter (r4xx+).
801 * Returns the value of the MC register.
802 */
803static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
804{
805 return 0;
806}
807
808/**
809 * cail_mc_write - write MC (Memory Controller) register
810 *
811 * @info: atom card_info pointer
812 * @reg: MC register offset
813 * @val: value to write to the pll register
814 *
815 * Provides a MC register accessor for the atom interpreter (r4xx+).
816 */
817static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
818{
819
820}
821
822/**
823 * cail_reg_write - write MMIO register
824 *
825 * @info: atom card_info pointer
826 * @reg: MMIO register offset
827 * @val: value to write to the pll register
828 *
829 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
830 */
831static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
832{
833 struct amdgpu_device *adev = info->dev->dev_private;
834
835 WREG32(reg, val);
836}
837
838/**
839 * cail_reg_read - read MMIO register
840 *
841 * @info: atom card_info pointer
842 * @reg: MMIO register offset
843 *
844 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
845 * Returns the value of the MMIO register.
846 */
847static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
848{
849 struct amdgpu_device *adev = info->dev->dev_private;
850 uint32_t r;
851
852 r = RREG32(reg);
853 return r;
854}
855
856/**
857 * cail_ioreg_write - write IO register
858 *
859 * @info: atom card_info pointer
860 * @reg: IO register offset
861 * @val: value to write to the pll register
862 *
863 * Provides a IO register accessor for the atom interpreter (r4xx+).
864 */
865static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
866{
867 struct amdgpu_device *adev = info->dev->dev_private;
868
869 WREG32_IO(reg, val);
870}
871
872/**
873 * cail_ioreg_read - read IO register
874 *
875 * @info: atom card_info pointer
876 * @reg: IO register offset
877 *
878 * Provides an IO register accessor for the atom interpreter (r4xx+).
879 * Returns the value of the IO register.
880 */
881static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
882{
883 struct amdgpu_device *adev = info->dev->dev_private;
884 uint32_t r;
885
886 r = RREG32_IO(reg);
887 return r;
888}
889
890/**
891 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
892 *
893 * @adev: amdgpu_device pointer
894 *
895 * Frees the driver info and register access callbacks for the ATOM
896 * interpreter (r4xx+).
897 * Called at driver shutdown.
898 */
899static void amdgpu_atombios_fini(struct amdgpu_device *adev)
900{
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800901 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400902 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800903 kfree(adev->mode_info.atom_context->iio);
904 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400905 kfree(adev->mode_info.atom_context);
906 adev->mode_info.atom_context = NULL;
907 kfree(adev->mode_info.atom_card_info);
908 adev->mode_info.atom_card_info = NULL;
909}
910
911/**
912 * amdgpu_atombios_init - init the driver info and callbacks for atombios
913 *
914 * @adev: amdgpu_device pointer
915 *
916 * Initializes the driver info and register access callbacks for the
917 * ATOM interpreter (r4xx+).
918 * Returns 0 on sucess, -ENOMEM on failure.
919 * Called at driver startup.
920 */
921static int amdgpu_atombios_init(struct amdgpu_device *adev)
922{
923 struct card_info *atom_card_info =
924 kzalloc(sizeof(struct card_info), GFP_KERNEL);
925
926 if (!atom_card_info)
927 return -ENOMEM;
928
929 adev->mode_info.atom_card_info = atom_card_info;
930 atom_card_info->dev = adev->ddev;
931 atom_card_info->reg_read = cail_reg_read;
932 atom_card_info->reg_write = cail_reg_write;
933 /* needed for iio ops */
934 if (adev->rio_mem) {
935 atom_card_info->ioreg_read = cail_ioreg_read;
936 atom_card_info->ioreg_write = cail_ioreg_write;
937 } else {
Amber Linb64a18c2017-01-04 08:06:58 -0500938 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400939 atom_card_info->ioreg_read = cail_reg_read;
940 atom_card_info->ioreg_write = cail_reg_write;
941 }
942 atom_card_info->mc_read = cail_mc_read;
943 atom_card_info->mc_write = cail_mc_write;
944 atom_card_info->pll_read = cail_pll_read;
945 atom_card_info->pll_write = cail_pll_write;
946
947 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
948 if (!adev->mode_info.atom_context) {
949 amdgpu_atombios_fini(adev);
950 return -ENOMEM;
951 }
952
953 mutex_init(&adev->mode_info.atom_context->mutex);
Alex Deuchera5bde2f2016-09-23 16:23:41 -0400954 if (adev->is_atom_fw) {
955 amdgpu_atomfirmware_scratch_regs_init(adev);
956 amdgpu_atomfirmware_allocate_fb_scratch(adev);
957 } else {
958 amdgpu_atombios_scratch_regs_init(adev);
959 amdgpu_atombios_allocate_fb_scratch(adev);
960 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400961 return 0;
962}
963
964/* if we get transitioned to only one device, take VGA back */
965/**
966 * amdgpu_vga_set_decode - enable/disable vga decode
967 *
968 * @cookie: amdgpu_device pointer
969 * @state: enable/disable vga decode
970 *
971 * Enable/disable vga decode (all asics).
972 * Returns VGA resource flags.
973 */
974static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
975{
976 struct amdgpu_device *adev = cookie;
977 amdgpu_asic_set_vga_state(adev, state);
978 if (state)
979 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
980 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
981 else
982 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
983}
984
Junwei Zhangbab4fee2017-04-05 13:54:56 +0800985static void amdgpu_check_block_size(struct amdgpu_device *adev)
Chunming Zhoua1adf8b2017-03-27 11:36:57 +0800986{
987 /* defines number of bits in page table versus page directory,
988 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
989 * page table and the remaining bits are in the page directory */
Junwei Zhangbab4fee2017-04-05 13:54:56 +0800990 if (amdgpu_vm_block_size == -1)
991 return;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +0800992
Junwei Zhangbab4fee2017-04-05 13:54:56 +0800993 if (amdgpu_vm_block_size < 9) {
Chunming Zhoua1adf8b2017-03-27 11:36:57 +0800994 dev_warn(adev->dev, "VM page table size (%d) too small\n",
995 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +0800996 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +0800997 }
998
999 if (amdgpu_vm_block_size > 24 ||
1000 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1001 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1002 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001003 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001004 }
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001005
1006 return;
1007
1008def_value:
1009 amdgpu_vm_block_size = -1;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001010}
1011
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001012static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1013{
Alex Deucher64dab072017-06-15 18:20:09 -04001014 /* no need to check the default value */
1015 if (amdgpu_vm_size == -1)
1016 return;
1017
Alex Deucher76117502017-06-21 12:31:41 -04001018 if (!is_power_of_2(amdgpu_vm_size)) {
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001019 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1020 amdgpu_vm_size);
1021 goto def_value;
1022 }
1023
1024 if (amdgpu_vm_size < 1) {
1025 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1026 amdgpu_vm_size);
1027 goto def_value;
1028 }
1029
1030 /*
1031 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1032 */
1033 if (amdgpu_vm_size > 1024) {
1034 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1035 amdgpu_vm_size);
1036 goto def_value;
1037 }
1038
1039 return;
1040
1041def_value:
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001042 amdgpu_vm_size = -1;
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001043}
1044
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001045/**
1046 * amdgpu_check_arguments - validate module params
1047 *
1048 * @adev: amdgpu_device pointer
1049 *
1050 * Validates certain module parameters and updates
1051 * the associated values used by the driver (all asics).
1052 */
1053static void amdgpu_check_arguments(struct amdgpu_device *adev)
1054{
Chunming Zhou5b011232015-12-10 17:34:33 +08001055 if (amdgpu_sched_jobs < 4) {
1056 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1057 amdgpu_sched_jobs);
1058 amdgpu_sched_jobs = 4;
Alex Deucher76117502017-06-21 12:31:41 -04001059 } else if (!is_power_of_2(amdgpu_sched_jobs)){
Chunming Zhou5b011232015-12-10 17:34:33 +08001060 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1061 amdgpu_sched_jobs);
1062 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1063 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001064
Alex Deucherdb630422017-08-21 11:58:25 -04001065 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
Christian Königf9321cc2017-07-07 13:44:05 +02001066 /* gart size must be greater or equal to 32M */
1067 dev_warn(adev->dev, "gart size (%d) too small\n",
1068 amdgpu_gart_size);
Alex Deucherdb630422017-08-21 11:58:25 -04001069 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001070 }
1071
Christian König36d38372017-07-07 13:17:45 +02001072 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001073 /* gtt size must be greater or equal to 32M */
Christian König36d38372017-07-07 13:17:45 +02001074 dev_warn(adev->dev, "gtt size (%d) too small\n",
1075 amdgpu_gtt_size);
1076 amdgpu_gtt_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001077 }
1078
Roger Hed07f14b2017-08-15 16:05:59 +08001079 /* valid range is between 4 and 9 inclusive */
1080 if (amdgpu_vm_fragment_size != -1 &&
1081 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1082 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1083 amdgpu_vm_fragment_size = -1;
1084 }
1085
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001086 amdgpu_check_vm_size(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001087
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001088 amdgpu_check_block_size(adev);
Christian König6a7f76e2016-08-24 15:51:49 +02001089
jimqu526bae32016-11-07 09:53:10 +08001090 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
Alex Deucher76117502017-06-21 12:31:41 -04001091 !is_power_of_2(amdgpu_vram_page_split))) {
Christian König6a7f76e2016-08-24 15:51:49 +02001092 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1093 amdgpu_vram_page_split);
1094 amdgpu_vram_page_split = 1024;
1095 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001096}
1097
1098/**
1099 * amdgpu_switcheroo_set_state - set switcheroo state
1100 *
1101 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001102 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001103 *
1104 * Callback for the switcheroo driver. Suspends or resumes the
1105 * the asics before or after it is powered up using ACPI methods.
1106 */
1107static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1108{
1109 struct drm_device *dev = pci_get_drvdata(pdev);
1110
1111 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1112 return;
1113
1114 if (state == VGA_SWITCHEROO_ON) {
Joe Perches7ca85292017-02-28 04:55:52 -08001115 pr_info("amdgpu: switched on\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001116 /* don't suspend or resume card normally */
1117 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1118
Alex Deucher810ddc32016-08-23 13:25:49 -04001119 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001120
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001121 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1122 drm_kms_helper_poll_enable(dev);
1123 } else {
Joe Perches7ca85292017-02-28 04:55:52 -08001124 pr_info("amdgpu: switched off\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001125 drm_kms_helper_poll_disable(dev);
1126 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001127 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001128 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1129 }
1130}
1131
1132/**
1133 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1134 *
1135 * @pdev: pci dev pointer
1136 *
1137 * Callback for the switcheroo driver. Check of the switcheroo
1138 * state can be changed.
1139 * Returns true if the state can be changed, false if not.
1140 */
1141static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1142{
1143 struct drm_device *dev = pci_get_drvdata(pdev);
1144
1145 /*
1146 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1147 * locking inversion with the driver load path. And the access here is
1148 * completely racy anyway. So don't bother with locking for now.
1149 */
1150 return dev->open_count == 0;
1151}
1152
1153static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1154 .set_gpu_state = amdgpu_switcheroo_set_state,
1155 .reprobe = NULL,
1156 .can_switch = amdgpu_switcheroo_can_switch,
1157};
1158
1159int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001160 enum amd_ip_block_type block_type,
1161 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001162{
1163 int i, r = 0;
1164
1165 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001166 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001167 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001168 if (adev->ip_blocks[i].version->type != block_type)
1169 continue;
1170 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1171 continue;
1172 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1173 (void *)adev, state);
1174 if (r)
1175 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1176 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001177 }
1178 return r;
1179}
1180
1181int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001182 enum amd_ip_block_type block_type,
1183 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001184{
1185 int i, r = 0;
1186
1187 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001188 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001189 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001190 if (adev->ip_blocks[i].version->type != block_type)
1191 continue;
1192 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1193 continue;
1194 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1195 (void *)adev, state);
1196 if (r)
1197 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1198 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001199 }
1200 return r;
1201}
1202
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001203void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1204{
1205 int i;
1206
1207 for (i = 0; i < adev->num_ip_blocks; i++) {
1208 if (!adev->ip_blocks[i].status.valid)
1209 continue;
1210 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1211 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1212 }
1213}
1214
Alex Deucher5dbbb602016-06-23 11:41:04 -04001215int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1216 enum amd_ip_block_type block_type)
1217{
1218 int i, r;
1219
1220 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001221 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001222 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001223 if (adev->ip_blocks[i].version->type == block_type) {
1224 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001225 if (r)
1226 return r;
1227 break;
1228 }
1229 }
1230 return 0;
1231
1232}
1233
1234bool amdgpu_is_idle(struct amdgpu_device *adev,
1235 enum amd_ip_block_type block_type)
1236{
1237 int i;
1238
1239 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001240 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001241 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001242 if (adev->ip_blocks[i].version->type == block_type)
1243 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001244 }
1245 return true;
1246
1247}
1248
Alex Deuchera1255102016-10-13 17:41:13 -04001249struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1250 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001251{
1252 int i;
1253
1254 for (i = 0; i < adev->num_ip_blocks; i++)
Alex Deuchera1255102016-10-13 17:41:13 -04001255 if (adev->ip_blocks[i].version->type == type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001256 return &adev->ip_blocks[i];
1257
1258 return NULL;
1259}
1260
1261/**
1262 * amdgpu_ip_block_version_cmp
1263 *
1264 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001265 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001266 * @major: major version
1267 * @minor: minor version
1268 *
1269 * return 0 if equal or greater
1270 * return 1 if smaller or the ip_block doesn't exist
1271 */
1272int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001273 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001274 u32 major, u32 minor)
1275{
Alex Deuchera1255102016-10-13 17:41:13 -04001276 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001277
Alex Deuchera1255102016-10-13 17:41:13 -04001278 if (ip_block && ((ip_block->version->major > major) ||
1279 ((ip_block->version->major == major) &&
1280 (ip_block->version->minor >= minor))))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001281 return 0;
1282
1283 return 1;
1284}
1285
Alex Deuchera1255102016-10-13 17:41:13 -04001286/**
1287 * amdgpu_ip_block_add
1288 *
1289 * @adev: amdgpu_device pointer
1290 * @ip_block_version: pointer to the IP to add
1291 *
1292 * Adds the IP block driver information to the collection of IPs
1293 * on the asic.
1294 */
1295int amdgpu_ip_block_add(struct amdgpu_device *adev,
1296 const struct amdgpu_ip_block_version *ip_block_version)
1297{
1298 if (!ip_block_version)
1299 return -EINVAL;
1300
Huang Ruia0bae352017-05-03 09:52:06 +08001301 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1302 ip_block_version->funcs->name);
1303
Alex Deuchera1255102016-10-13 17:41:13 -04001304 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1305
1306 return 0;
1307}
1308
Alex Deucher483ef982016-09-30 12:43:04 -04001309static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
Emily Deng9accf2f2016-08-10 16:01:25 +08001310{
1311 adev->enable_virtual_display = false;
1312
1313 if (amdgpu_virtual_display) {
1314 struct drm_device *ddev = adev->ddev;
1315 const char *pci_address_name = pci_name(ddev->pdev);
Emily Deng0f663562016-09-30 13:02:18 -04001316 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
Emily Deng9accf2f2016-08-10 16:01:25 +08001317
1318 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1319 pciaddstr_tmp = pciaddstr;
Emily Deng0f663562016-09-30 13:02:18 -04001320 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1321 pciaddname = strsep(&pciaddname_tmp, ",");
Yintian Tao967de2a2017-01-22 15:16:51 +08001322 if (!strcmp("all", pciaddname)
1323 || !strcmp(pci_address_name, pciaddname)) {
Emily Deng0f663562016-09-30 13:02:18 -04001324 long num_crtc;
1325 int res = -1;
1326
Emily Deng9accf2f2016-08-10 16:01:25 +08001327 adev->enable_virtual_display = true;
Emily Deng0f663562016-09-30 13:02:18 -04001328
1329 if (pciaddname_tmp)
1330 res = kstrtol(pciaddname_tmp, 10,
1331 &num_crtc);
1332
1333 if (!res) {
1334 if (num_crtc < 1)
1335 num_crtc = 1;
1336 if (num_crtc > 6)
1337 num_crtc = 6;
1338 adev->mode_info.num_crtc = num_crtc;
1339 } else {
1340 adev->mode_info.num_crtc = 1;
1341 }
Emily Deng9accf2f2016-08-10 16:01:25 +08001342 break;
1343 }
1344 }
1345
Emily Deng0f663562016-09-30 13:02:18 -04001346 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1347 amdgpu_virtual_display, pci_address_name,
1348 adev->enable_virtual_display, adev->mode_info.num_crtc);
Emily Deng9accf2f2016-08-10 16:01:25 +08001349
1350 kfree(pciaddstr);
1351 }
1352}
1353
Alex Deuchere2a75f82017-04-27 16:58:01 -04001354static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1355{
Alex Deuchere2a75f82017-04-27 16:58:01 -04001356 const char *chip_name;
1357 char fw_name[30];
1358 int err;
1359 const struct gpu_info_firmware_header_v1_0 *hdr;
1360
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001361 adev->firmware.gpu_info_fw = NULL;
1362
Alex Deuchere2a75f82017-04-27 16:58:01 -04001363 switch (adev->asic_type) {
1364 case CHIP_TOPAZ:
1365 case CHIP_TONGA:
1366 case CHIP_FIJI:
1367 case CHIP_POLARIS11:
1368 case CHIP_POLARIS10:
1369 case CHIP_POLARIS12:
1370 case CHIP_CARRIZO:
1371 case CHIP_STONEY:
1372#ifdef CONFIG_DRM_AMDGPU_SI
1373 case CHIP_VERDE:
1374 case CHIP_TAHITI:
1375 case CHIP_PITCAIRN:
1376 case CHIP_OLAND:
1377 case CHIP_HAINAN:
1378#endif
1379#ifdef CONFIG_DRM_AMDGPU_CIK
1380 case CHIP_BONAIRE:
1381 case CHIP_HAWAII:
1382 case CHIP_KAVERI:
1383 case CHIP_KABINI:
1384 case CHIP_MULLINS:
1385#endif
1386 default:
1387 return 0;
1388 case CHIP_VEGA10:
1389 chip_name = "vega10";
1390 break;
Alex Deucher2d2e5e72017-05-09 12:27:35 -04001391 case CHIP_RAVEN:
1392 chip_name = "raven";
1393 break;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001394 }
1395
1396 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001397 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001398 if (err) {
1399 dev_err(adev->dev,
1400 "Failed to load gpu_info firmware \"%s\"\n",
1401 fw_name);
1402 goto out;
1403 }
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001404 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001405 if (err) {
1406 dev_err(adev->dev,
1407 "Failed to validate gpu_info firmware \"%s\"\n",
1408 fw_name);
1409 goto out;
1410 }
1411
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001412 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001413 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1414
1415 switch (hdr->version_major) {
1416 case 1:
1417 {
1418 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001419 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
Alex Deuchere2a75f82017-04-27 16:58:01 -04001420 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1421
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001422 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1423 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1424 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1425 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001426 adev->gfx.config.max_texture_channel_caches =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001427 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1428 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1429 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1430 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1431 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001432 adev->gfx.config.double_offchip_lds_buf =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001433 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1434 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
Hawking Zhang51fd0372017-06-09 22:30:52 +08001435 adev->gfx.cu_info.max_waves_per_simd =
1436 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1437 adev->gfx.cu_info.max_scratch_slots_per_cu =
1438 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1439 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001440 break;
1441 }
1442 default:
1443 dev_err(adev->dev,
1444 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1445 err = -EINVAL;
1446 goto out;
1447 }
1448out:
Alex Deuchere2a75f82017-04-27 16:58:01 -04001449 return err;
1450}
1451
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001452static int amdgpu_early_init(struct amdgpu_device *adev)
1453{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001454 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001455
Alex Deucher483ef982016-09-30 12:43:04 -04001456 amdgpu_device_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001457
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001458 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001459 case CHIP_TOPAZ:
1460 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001461 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001462 case CHIP_POLARIS11:
1463 case CHIP_POLARIS10:
Junwei Zhangc4642a42016-12-14 15:32:28 -05001464 case CHIP_POLARIS12:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001465 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001466 case CHIP_STONEY:
1467 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001468 adev->family = AMDGPU_FAMILY_CZ;
1469 else
1470 adev->family = AMDGPU_FAMILY_VI;
1471
1472 r = vi_set_ip_blocks(adev);
1473 if (r)
1474 return r;
1475 break;
Ken Wang33f34802016-01-21 17:29:41 +08001476#ifdef CONFIG_DRM_AMDGPU_SI
1477 case CHIP_VERDE:
1478 case CHIP_TAHITI:
1479 case CHIP_PITCAIRN:
1480 case CHIP_OLAND:
1481 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001482 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001483 r = si_set_ip_blocks(adev);
1484 if (r)
1485 return r;
1486 break;
1487#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001488#ifdef CONFIG_DRM_AMDGPU_CIK
1489 case CHIP_BONAIRE:
1490 case CHIP_HAWAII:
1491 case CHIP_KAVERI:
1492 case CHIP_KABINI:
1493 case CHIP_MULLINS:
1494 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1495 adev->family = AMDGPU_FAMILY_CI;
1496 else
1497 adev->family = AMDGPU_FAMILY_KV;
1498
1499 r = cik_set_ip_blocks(adev);
1500 if (r)
1501 return r;
1502 break;
1503#endif
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +08001504 case CHIP_VEGA10:
1505 case CHIP_RAVEN:
1506 if (adev->asic_type == CHIP_RAVEN)
1507 adev->family = AMDGPU_FAMILY_RV;
1508 else
1509 adev->family = AMDGPU_FAMILY_AI;
Ken Wang460826e2017-03-06 14:53:16 -05001510
1511 r = soc15_set_ip_blocks(adev);
1512 if (r)
1513 return r;
1514 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001515 default:
1516 /* FIXME: not supported yet */
1517 return -EINVAL;
1518 }
1519
Alex Deuchere2a75f82017-04-27 16:58:01 -04001520 r = amdgpu_device_parse_gpu_info_fw(adev);
1521 if (r)
1522 return r;
1523
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001524 if (amdgpu_sriov_vf(adev)) {
1525 r = amdgpu_virt_request_full_gpu(adev, true);
1526 if (r)
1527 return r;
1528 }
1529
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001530 for (i = 0; i < adev->num_ip_blocks; i++) {
1531 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
Huang Ruied8cf002017-05-03 09:40:17 +08001532 DRM_ERROR("disabled ip block: %d <%s>\n",
1533 i, adev->ip_blocks[i].version->funcs->name);
Alex Deuchera1255102016-10-13 17:41:13 -04001534 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001535 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001536 if (adev->ip_blocks[i].version->funcs->early_init) {
1537 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001538 if (r == -ENOENT) {
Alex Deuchera1255102016-10-13 17:41:13 -04001539 adev->ip_blocks[i].status.valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001540 } else if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001541 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1542 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001543 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001544 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001545 adev->ip_blocks[i].status.valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001546 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001547 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001548 adev->ip_blocks[i].status.valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001549 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001550 }
1551 }
1552
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001553 adev->cg_flags &= amdgpu_cg_mask;
1554 adev->pg_flags &= amdgpu_pg_mask;
1555
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001556 return 0;
1557}
1558
1559static int amdgpu_init(struct amdgpu_device *adev)
1560{
1561 int i, r;
1562
1563 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001564 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001565 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001566 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001567 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001568 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1569 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001570 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001571 }
Alex Deuchera1255102016-10-13 17:41:13 -04001572 adev->ip_blocks[i].status.sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001573 /* need to do gmc hw init early so we can allocate gpu mem */
Alex Deuchera1255102016-10-13 17:41:13 -04001574 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001575 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001576 if (r) {
1577 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001578 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001579 }
Alex Deuchera1255102016-10-13 17:41:13 -04001580 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001581 if (r) {
1582 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001583 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001584 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001585 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001586 if (r) {
1587 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001588 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001589 }
Alex Deuchera1255102016-10-13 17:41:13 -04001590 adev->ip_blocks[i].status.hw = true;
Monk Liu24936642017-01-09 15:54:32 +08001591
1592 /* right after GMC hw init, we create CSA */
1593 if (amdgpu_sriov_vf(adev)) {
1594 r = amdgpu_allocate_static_csa(adev);
1595 if (r) {
1596 DRM_ERROR("allocate CSA failed %d\n", r);
1597 return r;
1598 }
1599 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001600 }
1601 }
1602
1603 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001604 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001605 continue;
1606 /* gmc hw init is done early */
Alex Deuchera1255102016-10-13 17:41:13 -04001607 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001608 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001609 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001610 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001611 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1612 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001613 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001614 }
Alex Deuchera1255102016-10-13 17:41:13 -04001615 adev->ip_blocks[i].status.hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001616 }
1617
1618 return 0;
1619}
1620
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001621static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1622{
1623 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1624}
1625
1626static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1627{
1628 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1629 AMDGPU_RESET_MAGIC_NUM);
1630}
1631
Shirish S2dc80b02017-05-25 10:05:25 +05301632static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
1633{
1634 int i = 0, r;
1635
1636 for (i = 0; i < adev->num_ip_blocks; i++) {
1637 if (!adev->ip_blocks[i].status.valid)
1638 continue;
1639 /* skip CG for VCE/UVD, it's handled specially */
1640 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1641 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1642 /* enable clockgating to save power */
1643 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1644 AMD_CG_STATE_GATE);
1645 if (r) {
1646 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1647 adev->ip_blocks[i].version->funcs->name, r);
1648 return r;
1649 }
1650 }
1651 }
1652 return 0;
1653}
1654
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001655static int amdgpu_late_init(struct amdgpu_device *adev)
1656{
1657 int i = 0, r;
1658
1659 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001660 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001661 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001662 if (adev->ip_blocks[i].version->funcs->late_init) {
1663 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001664 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001665 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1666 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001667 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001668 }
Alex Deuchera1255102016-10-13 17:41:13 -04001669 adev->ip_blocks[i].status.late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001670 }
1671 }
1672
Shirish S2dc80b02017-05-25 10:05:25 +05301673 mod_delayed_work(system_wq, &adev->late_init_work,
1674 msecs_to_jiffies(AMDGPU_RESUME_MS));
1675
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001676 amdgpu_fill_reset_magic(adev);
1677
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001678 return 0;
1679}
1680
1681static int amdgpu_fini(struct amdgpu_device *adev)
1682{
1683 int i, r;
1684
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001685 /* need to disable SMC first */
1686 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001687 if (!adev->ip_blocks[i].status.hw)
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001688 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001689 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001690 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
Alex Deuchera1255102016-10-13 17:41:13 -04001691 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1692 AMD_CG_STATE_UNGATE);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001693 if (r) {
1694 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001695 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001696 return r;
1697 }
Alex Deuchera1255102016-10-13 17:41:13 -04001698 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001699 /* XXX handle errors */
1700 if (r) {
1701 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001702 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001703 }
Alex Deuchera1255102016-10-13 17:41:13 -04001704 adev->ip_blocks[i].status.hw = false;
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001705 break;
1706 }
1707 }
1708
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001709 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001710 if (!adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001711 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001712 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001713 amdgpu_wb_fini(adev);
1714 amdgpu_vram_scratch_fini(adev);
1715 }
Rex Zhu8201a672016-11-24 21:44:44 +08001716
1717 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1718 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1719 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1720 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1721 AMD_CG_STATE_UNGATE);
1722 if (r) {
1723 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1724 adev->ip_blocks[i].version->funcs->name, r);
1725 return r;
1726 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001727 }
Rex Zhu8201a672016-11-24 21:44:44 +08001728
Alex Deuchera1255102016-10-13 17:41:13 -04001729 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001730 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001731 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001732 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1733 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001734 }
Rex Zhu8201a672016-11-24 21:44:44 +08001735
Alex Deuchera1255102016-10-13 17:41:13 -04001736 adev->ip_blocks[i].status.hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001737 }
1738
1739 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001740 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001741 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001742 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001743 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001744 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001745 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1746 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001747 }
Alex Deuchera1255102016-10-13 17:41:13 -04001748 adev->ip_blocks[i].status.sw = false;
1749 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001750 }
1751
Monk Liua6dcfd92016-05-19 14:36:34 +08001752 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001753 if (!adev->ip_blocks[i].status.late_initialized)
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001754 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001755 if (adev->ip_blocks[i].version->funcs->late_fini)
1756 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1757 adev->ip_blocks[i].status.late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001758 }
1759
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001760 if (amdgpu_sriov_vf(adev)) {
Monk Liu24936642017-01-09 15:54:32 +08001761 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001762 amdgpu_virt_release_full_gpu(adev, false);
1763 }
Monk Liu24936642017-01-09 15:54:32 +08001764
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001765 return 0;
1766}
1767
Shirish S2dc80b02017-05-25 10:05:25 +05301768static void amdgpu_late_init_func_handler(struct work_struct *work)
1769{
1770 struct amdgpu_device *adev =
1771 container_of(work, struct amdgpu_device, late_init_work.work);
1772 amdgpu_late_set_cg_state(adev);
1773}
1774
Alex Deucherfaefba92016-12-06 10:38:29 -05001775int amdgpu_suspend(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001776{
1777 int i, r;
1778
Xiangliang Yue941ea92017-01-18 12:47:55 +08001779 if (amdgpu_sriov_vf(adev))
1780 amdgpu_virt_request_full_gpu(adev, false);
1781
Flora Cuic5a93a22016-02-26 10:45:25 +08001782 /* ungate SMC block first */
1783 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1784 AMD_CG_STATE_UNGATE);
1785 if (r) {
1786 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1787 }
1788
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001789 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001790 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001791 continue;
1792 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001793 if (i != AMD_IP_BLOCK_TYPE_SMC) {
Alex Deuchera1255102016-10-13 17:41:13 -04001794 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1795 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001796 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001797 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1798 adev->ip_blocks[i].version->funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001799 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001800 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001801 /* XXX handle errors */
Alex Deuchera1255102016-10-13 17:41:13 -04001802 r = adev->ip_blocks[i].version->funcs->suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001803 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001804 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001805 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1806 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001807 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001808 }
1809
Xiangliang Yue941ea92017-01-18 12:47:55 +08001810 if (amdgpu_sriov_vf(adev))
1811 amdgpu_virt_release_full_gpu(adev, false);
1812
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001813 return 0;
1814}
1815
Monk Liue4f0fdc2017-02-09 11:55:49 +08001816static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001817{
1818 int i, r;
1819
Monk Liu2cb681b2017-04-26 12:00:49 +08001820 static enum amd_ip_block_type ip_order[] = {
1821 AMD_IP_BLOCK_TYPE_GMC,
1822 AMD_IP_BLOCK_TYPE_COMMON,
Monk Liu2cb681b2017-04-26 12:00:49 +08001823 AMD_IP_BLOCK_TYPE_IH,
1824 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001825
Monk Liu2cb681b2017-04-26 12:00:49 +08001826 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1827 int j;
1828 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001829
Monk Liu2cb681b2017-04-26 12:00:49 +08001830 for (j = 0; j < adev->num_ip_blocks; j++) {
1831 block = &adev->ip_blocks[j];
1832
1833 if (block->version->type != ip_order[i] ||
1834 !block->status.valid)
1835 continue;
1836
1837 r = block->version->funcs->hw_init(adev);
1838 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001839 }
1840 }
1841
1842 return 0;
1843}
1844
Monk Liue4f0fdc2017-02-09 11:55:49 +08001845static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001846{
1847 int i, r;
1848
Monk Liu2cb681b2017-04-26 12:00:49 +08001849 static enum amd_ip_block_type ip_order[] = {
1850 AMD_IP_BLOCK_TYPE_SMC,
1851 AMD_IP_BLOCK_TYPE_DCE,
1852 AMD_IP_BLOCK_TYPE_GFX,
1853 AMD_IP_BLOCK_TYPE_SDMA,
Frank Min257deb82017-06-15 20:07:36 +08001854 AMD_IP_BLOCK_TYPE_UVD,
1855 AMD_IP_BLOCK_TYPE_VCE
Monk Liu2cb681b2017-04-26 12:00:49 +08001856 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001857
Monk Liu2cb681b2017-04-26 12:00:49 +08001858 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1859 int j;
1860 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001861
Monk Liu2cb681b2017-04-26 12:00:49 +08001862 for (j = 0; j < adev->num_ip_blocks; j++) {
1863 block = &adev->ip_blocks[j];
1864
1865 if (block->version->type != ip_order[i] ||
1866 !block->status.valid)
1867 continue;
1868
1869 r = block->version->funcs->hw_init(adev);
1870 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001871 }
1872 }
1873
1874 return 0;
1875}
1876
Chunming Zhoufcf06492017-05-05 10:33:33 +08001877static int amdgpu_resume_phase1(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001878{
1879 int i, r;
1880
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001881 for (i = 0; i < adev->num_ip_blocks; i++) {
1882 if (!adev->ip_blocks[i].status.valid)
1883 continue;
Chunming Zhoufcf06492017-05-05 10:33:33 +08001884 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1885 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1886 adev->ip_blocks[i].version->type ==
1887 AMD_IP_BLOCK_TYPE_IH) {
1888 r = adev->ip_blocks[i].version->funcs->resume(adev);
1889 if (r) {
1890 DRM_ERROR("resume of IP block <%s> failed %d\n",
1891 adev->ip_blocks[i].version->funcs->name, r);
1892 return r;
1893 }
1894 }
1895 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001896
Chunming Zhoufcf06492017-05-05 10:33:33 +08001897 return 0;
1898}
1899
1900static int amdgpu_resume_phase2(struct amdgpu_device *adev)
1901{
1902 int i, r;
1903
1904 for (i = 0; i < adev->num_ip_blocks; i++) {
1905 if (!adev->ip_blocks[i].status.valid)
1906 continue;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001907 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1908 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1909 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1910 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001911 r = adev->ip_blocks[i].version->funcs->resume(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001912 if (r) {
1913 DRM_ERROR("resume of IP block <%s> failed %d\n",
1914 adev->ip_blocks[i].version->funcs->name, r);
1915 return r;
1916 }
1917 }
1918
1919 return 0;
1920}
1921
1922static int amdgpu_resume(struct amdgpu_device *adev)
1923{
Chunming Zhoufcf06492017-05-05 10:33:33 +08001924 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001925
Chunming Zhoufcf06492017-05-05 10:33:33 +08001926 r = amdgpu_resume_phase1(adev);
1927 if (r)
1928 return r;
1929 r = amdgpu_resume_phase2(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001930
Chunming Zhoufcf06492017-05-05 10:33:33 +08001931 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001932}
1933
Monk Liu4e99a442016-03-31 13:26:59 +08001934static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04001935{
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001936 if (adev->is_atom_fw) {
1937 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1938 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1939 } else {
1940 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1941 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1942 }
Andres Rodriguez048765a2016-06-11 02:51:32 -04001943}
1944
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001945/**
1946 * amdgpu_device_init - initialize the driver
1947 *
1948 * @adev: amdgpu_device pointer
1949 * @pdev: drm dev pointer
1950 * @pdev: pci dev pointer
1951 * @flags: driver flags
1952 *
1953 * Initializes the driver info and hw (all asics).
1954 * Returns 0 for success or an error on failure.
1955 * Called at driver startup.
1956 */
1957int amdgpu_device_init(struct amdgpu_device *adev,
1958 struct drm_device *ddev,
1959 struct pci_dev *pdev,
1960 uint32_t flags)
1961{
1962 int r, i;
1963 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02001964 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001965
1966 adev->shutdown = false;
1967 adev->dev = &pdev->dev;
1968 adev->ddev = ddev;
1969 adev->pdev = pdev;
1970 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08001971 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001972 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
Christian König6f02a692017-07-07 11:56:59 +02001973 adev->mc.gart_size = 512 * 1024 * 1024;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001974 adev->accel_working = false;
1975 adev->num_rings = 0;
1976 adev->mman.buffer_funcs = NULL;
1977 adev->mman.buffer_funcs_ring = NULL;
1978 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01001979 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001980 adev->gart.gart_funcs = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001981 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001982
1983 adev->smc_rreg = &amdgpu_invalid_rreg;
1984 adev->smc_wreg = &amdgpu_invalid_wreg;
1985 adev->pcie_rreg = &amdgpu_invalid_rreg;
1986 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08001987 adev->pciep_rreg = &amdgpu_invalid_rreg;
1988 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001989 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1990 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1991 adev->didt_rreg = &amdgpu_invalid_rreg;
1992 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08001993 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1994 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001995 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1996 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1997
Rex Zhuccdbb202016-06-08 12:47:41 +08001998
Alex Deucher3e39ab92015-06-05 15:04:33 -04001999 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2000 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2001 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002002
2003 /* mutex initialization are all done here so we
2004 * can recall function without having locking issues */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002005 atomic_set(&adev->irq.ih.lock, 0);
Huang Rui0e5ca0d2017-03-03 18:37:23 -05002006 mutex_init(&adev->firmware.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002007 mutex_init(&adev->pm.mutex);
2008 mutex_init(&adev->gfx.gpu_clock_mutex);
2009 mutex_init(&adev->srbm_mutex);
2010 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002011 mutex_init(&adev->mn_lock);
2012 hash_init(adev->mn_hash);
2013
2014 amdgpu_check_arguments(adev);
2015
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002016 spin_lock_init(&adev->mmio_idx_lock);
2017 spin_lock_init(&adev->smc_idx_lock);
2018 spin_lock_init(&adev->pcie_idx_lock);
2019 spin_lock_init(&adev->uvd_ctx_idx_lock);
2020 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08002021 spin_lock_init(&adev->gc_cac_idx_lock);
Evan Quan16abb5d2017-07-04 09:21:50 +08002022 spin_lock_init(&adev->se_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002023 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02002024 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002025
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08002026 INIT_LIST_HEAD(&adev->shadow_list);
2027 mutex_init(&adev->shadow_list_lock);
2028
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002029 INIT_LIST_HEAD(&adev->gtt_list);
2030 spin_lock_init(&adev->gtt_list_lock);
2031
Andres Rodriguez795f2812017-03-06 16:27:55 -05002032 INIT_LIST_HEAD(&adev->ring_lru_list);
2033 spin_lock_init(&adev->ring_lru_list_lock);
2034
Shirish S2dc80b02017-05-25 10:05:25 +05302035 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2036
Alex Xie0fa49552017-06-08 14:58:05 -04002037 /* Registers mapping */
2038 /* TODO: block userspace mapping of io register */
Ken Wangda69c1612016-01-21 19:08:55 +08002039 if (adev->asic_type >= CHIP_BONAIRE) {
2040 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2041 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2042 } else {
2043 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2044 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2045 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002046
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002047 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2048 if (adev->rmmio == NULL) {
2049 return -ENOMEM;
2050 }
2051 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2052 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2053
Ken Wangda69c1612016-01-21 19:08:55 +08002054 if (adev->asic_type >= CHIP_BONAIRE)
2055 /* doorbell bar mapping */
2056 amdgpu_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002057
2058 /* io port mapping */
2059 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2060 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2061 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2062 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2063 break;
2064 }
2065 }
2066 if (adev->rio_mem == NULL)
Amber Linb64a18c2017-01-04 08:06:58 -05002067 DRM_INFO("PCI I/O BAR is not found.\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002068
2069 /* early init functions */
2070 r = amdgpu_early_init(adev);
2071 if (r)
2072 return r;
2073
2074 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2075 /* this will fail for cards that aren't VGA class devices, just
2076 * ignore it */
2077 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2078
2079 if (amdgpu_runtime_pm == 1)
2080 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04002081 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002082 runtime = true;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002083 if (!pci_is_thunderbolt_attached(adev->pdev))
2084 vga_switcheroo_register_client(adev->pdev,
2085 &amdgpu_switcheroo_ops, runtime);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002086 if (runtime)
2087 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2088
2089 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04002090 if (!amdgpu_get_bios(adev)) {
2091 r = -EINVAL;
2092 goto failed;
2093 }
Nils Wallméniusf7e9e9f2016-12-14 21:52:45 +01002094
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002095 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002096 if (r) {
2097 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002098 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002099 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002100 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002101
Monk Liu4e99a442016-03-31 13:26:59 +08002102 /* detect if we are with an SRIOV vbios */
2103 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04002104
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002105 /* Post card if necessary */
Monk Liubec86372016-09-14 19:38:08 +08002106 if (amdgpu_vpost_needed(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002107 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08002108 dev_err(adev->dev, "no vBIOS found\n");
Gavin Wan89041942017-06-23 13:55:15 -04002109 amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002110 r = -EINVAL;
2111 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002112 }
Monk Liubec86372016-09-14 19:38:08 +08002113 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08002114 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2115 if (r) {
2116 dev_err(adev->dev, "gpu post error!\n");
Gavin Wan89041942017-06-23 13:55:15 -04002117 amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
Monk Liu4e99a442016-03-31 13:26:59 +08002118 goto failed;
2119 }
2120 } else {
2121 DRM_INFO("GPU post is not needed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002122 }
2123
Alex Deucher88b64e92017-07-10 10:43:10 -04002124 if (adev->is_atom_fw) {
2125 /* Initialize clocks */
2126 r = amdgpu_atomfirmware_get_clock_info(adev);
2127 if (r) {
2128 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
2129 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2130 goto failed;
2131 }
2132 } else {
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002133 /* Initialize clocks */
2134 r = amdgpu_atombios_get_clock_info(adev);
2135 if (r) {
2136 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002137 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2138 goto failed;
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002139 }
2140 /* init i2c buses */
2141 amdgpu_atombios_i2c_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002142 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002143
2144 /* Fence driver */
2145 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002146 if (r) {
2147 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002148 amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002149 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002150 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002151
2152 /* init the mode config */
2153 drm_mode_config_init(adev->ddev);
2154
2155 r = amdgpu_init(adev);
2156 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05002157 dev_err(adev->dev, "amdgpu_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002158 amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002159 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002160 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002161 }
2162
2163 adev->accel_working = true;
2164
Alex Xiee59c0202017-06-01 09:42:59 -04002165 amdgpu_vm_check_compute_bug(adev);
2166
Marek Olšák95844d22016-08-17 23:49:27 +02002167 /* Initialize the buffer migration limit. */
2168 if (amdgpu_moverate >= 0)
2169 max_MBps = amdgpu_moverate;
2170 else
2171 max_MBps = 8; /* Allow 8 MB/s. */
2172 /* Get a log2 for easy divisions. */
2173 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2174
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002175 r = amdgpu_ib_pool_init(adev);
2176 if (r) {
2177 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Gavin Wan89041942017-06-23 13:55:15 -04002178 amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002179 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002180 }
2181
2182 r = amdgpu_ib_ring_tests(adev);
2183 if (r)
2184 DRM_ERROR("ib ring test failed (%d).\n", r);
2185
Monk Liu9bc92b92017-02-08 17:38:13 +08002186 amdgpu_fbdev_init(adev);
2187
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002188 r = amdgpu_gem_debugfs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002189 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002190 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002191
2192 r = amdgpu_debugfs_regs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002193 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002194 DRM_ERROR("registering register debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002195
Huang Rui4f0955f2017-05-10 23:04:06 +08002196 r = amdgpu_debugfs_test_ib_ring_init(adev);
2197 if (r)
2198 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2199
Huang Rui50ab2532016-06-12 15:51:09 +08002200 r = amdgpu_debugfs_firmware_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002201 if (r)
Huang Rui50ab2532016-06-12 15:51:09 +08002202 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
Huang Rui50ab2532016-06-12 15:51:09 +08002203
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002204 if ((amdgpu_testing & 1)) {
2205 if (adev->accel_working)
2206 amdgpu_test_moves(adev);
2207 else
2208 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2209 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002210 if (amdgpu_benchmarking) {
2211 if (adev->accel_working)
2212 amdgpu_benchmark(adev, amdgpu_benchmarking);
2213 else
2214 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2215 }
2216
2217 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2218 * explicit gating rather than handling it automatically.
2219 */
2220 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002221 if (r) {
2222 dev_err(adev->dev, "amdgpu_late_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002223 amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002224 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002225 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002226
2227 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04002228
2229failed:
Gavin Wan89041942017-06-23 13:55:15 -04002230 amdgpu_vf_error_trans_all(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002231 if (runtime)
2232 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2233 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002234}
2235
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002236/**
2237 * amdgpu_device_fini - tear down the driver
2238 *
2239 * @adev: amdgpu_device pointer
2240 *
2241 * Tear down the driver info (all asics).
2242 * Called at driver shutdown.
2243 */
2244void amdgpu_device_fini(struct amdgpu_device *adev)
2245{
2246 int r;
2247
2248 DRM_INFO("amdgpu: finishing device.\n");
2249 adev->shutdown = true;
Pixel Dingdb2c2a92017-04-25 16:47:42 +08002250 if (adev->mode_info.mode_config_initialized)
2251 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002252 /* evict vram memory */
2253 amdgpu_bo_evict_vram(adev);
2254 amdgpu_ib_pool_fini(adev);
2255 amdgpu_fence_driver_fini(adev);
2256 amdgpu_fbdev_fini(adev);
2257 r = amdgpu_fini(adev);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08002258 if (adev->firmware.gpu_info_fw) {
2259 release_firmware(adev->firmware.gpu_info_fw);
2260 adev->firmware.gpu_info_fw = NULL;
2261 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002262 adev->accel_working = false;
Shirish S2dc80b02017-05-25 10:05:25 +05302263 cancel_delayed_work_sync(&adev->late_init_work);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002264 /* free i2c buses */
2265 amdgpu_i2c_fini(adev);
2266 amdgpu_atombios_fini(adev);
2267 kfree(adev->bios);
2268 adev->bios = NULL;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002269 if (!pci_is_thunderbolt_attached(adev->pdev))
2270 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002271 if (adev->flags & AMD_IS_PX)
2272 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002273 vga_client_register(adev->pdev, NULL, NULL, NULL);
2274 if (adev->rio_mem)
2275 pci_iounmap(adev->pdev, adev->rio_mem);
2276 adev->rio_mem = NULL;
2277 iounmap(adev->rmmio);
2278 adev->rmmio = NULL;
Ken Wangda69c1612016-01-21 19:08:55 +08002279 if (adev->asic_type >= CHIP_BONAIRE)
2280 amdgpu_doorbell_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002281 amdgpu_debugfs_regs_cleanup(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002282}
2283
2284
2285/*
2286 * Suspend & resume.
2287 */
2288/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002289 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002290 *
2291 * @pdev: drm dev pointer
2292 * @state: suspend state
2293 *
2294 * Puts the hw in the suspend state (all asics).
2295 * Returns 0 for success or an error on failure.
2296 * Called at driver suspend.
2297 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002298int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002299{
2300 struct amdgpu_device *adev;
2301 struct drm_crtc *crtc;
2302 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002303 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002304
2305 if (dev == NULL || dev->dev_private == NULL) {
2306 return -ENODEV;
2307 }
2308
2309 adev = dev->dev_private;
2310
2311 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2312 return 0;
2313
2314 drm_kms_helper_poll_disable(dev);
2315
2316 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002317 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002318 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2319 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2320 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002321 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002322
Yong Zhaoba997702015-11-09 17:21:45 -05002323 amdgpu_amdkfd_suspend(adev);
2324
Alex Deucher756e6882015-10-08 00:03:36 -04002325 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002326 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04002327 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002328 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2329 struct amdgpu_bo *robj;
2330
Alex Deucher756e6882015-10-08 00:03:36 -04002331 if (amdgpu_crtc->cursor_bo) {
2332 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002333 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002334 if (r == 0) {
2335 amdgpu_bo_unpin(aobj);
2336 amdgpu_bo_unreserve(aobj);
2337 }
2338 }
2339
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002340 if (rfb == NULL || rfb->obj == NULL) {
2341 continue;
2342 }
2343 robj = gem_to_amdgpu_bo(rfb->obj);
2344 /* don't unpin kernel fb objects */
2345 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
Alex Xie7a6901d2017-04-24 13:52:41 -04002346 r = amdgpu_bo_reserve(robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002347 if (r == 0) {
2348 amdgpu_bo_unpin(robj);
2349 amdgpu_bo_unreserve(robj);
2350 }
2351 }
2352 }
2353 /* evict vram memory */
2354 amdgpu_bo_evict_vram(adev);
2355
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002356 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002357
2358 r = amdgpu_suspend(adev);
2359
Alex Deuchera0a71e42016-10-10 12:41:36 -04002360 /* evict remaining vram memory
2361 * This second call to evict vram is to evict the gart page table
2362 * using the CPU.
2363 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002364 amdgpu_bo_evict_vram(adev);
2365
Alex Deucherd05da0e2017-06-30 17:08:45 -04002366 amdgpu_atombios_scratch_regs_save(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002367 pci_save_state(dev->pdev);
2368 if (suspend) {
2369 /* Shut down the device */
2370 pci_disable_device(dev->pdev);
2371 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08002372 } else {
2373 r = amdgpu_asic_reset(adev);
2374 if (r)
2375 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002376 }
2377
2378 if (fbcon) {
2379 console_lock();
2380 amdgpu_fbdev_set_suspend(adev, 1);
2381 console_unlock();
2382 }
2383 return 0;
2384}
2385
2386/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002387 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002388 *
2389 * @pdev: drm dev pointer
2390 *
2391 * Bring the hw back to operating state (all asics).
2392 * Returns 0 for success or an error on failure.
2393 * Called at driver resume.
2394 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002395int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002396{
2397 struct drm_connector *connector;
2398 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04002399 struct drm_crtc *crtc;
Huang Rui03161a62017-04-13 16:12:26 +08002400 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002401
2402 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2403 return 0;
2404
jimqu74b0b152016-09-07 17:09:12 +08002405 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002406 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08002407
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002408 if (resume) {
2409 pci_set_power_state(dev->pdev, PCI_D0);
2410 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08002411 r = pci_enable_device(dev->pdev);
Huang Rui03161a62017-04-13 16:12:26 +08002412 if (r)
2413 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002414 }
Alex Deucherd05da0e2017-06-30 17:08:45 -04002415 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002416
2417 /* post card */
Jim Quc836fec2017-02-10 15:59:59 +08002418 if (amdgpu_need_post(adev)) {
jimqu74b0b152016-09-07 17:09:12 +08002419 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2420 if (r)
2421 DRM_ERROR("amdgpu asic init failed\n");
2422 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002423
2424 r = amdgpu_resume(adev);
Rex Zhue6707212017-03-30 13:21:01 +08002425 if (r) {
Flora Cuica198522016-02-04 15:10:08 +08002426 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Huang Rui03161a62017-04-13 16:12:26 +08002427 goto unlock;
Rex Zhue6707212017-03-30 13:21:01 +08002428 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002429 amdgpu_fence_driver_resume(adev);
2430
Flora Cuica198522016-02-04 15:10:08 +08002431 if (resume) {
2432 r = amdgpu_ib_ring_tests(adev);
2433 if (r)
2434 DRM_ERROR("ib ring test failed (%d).\n", r);
2435 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002436
2437 r = amdgpu_late_init(adev);
Huang Rui03161a62017-04-13 16:12:26 +08002438 if (r)
2439 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002440
Alex Deucher756e6882015-10-08 00:03:36 -04002441 /* pin cursors */
2442 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2443 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2444
2445 if (amdgpu_crtc->cursor_bo) {
2446 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002447 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002448 if (r == 0) {
2449 r = amdgpu_bo_pin(aobj,
2450 AMDGPU_GEM_DOMAIN_VRAM,
2451 &amdgpu_crtc->cursor_addr);
2452 if (r != 0)
2453 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2454 amdgpu_bo_unreserve(aobj);
2455 }
2456 }
2457 }
Yong Zhaoba997702015-11-09 17:21:45 -05002458 r = amdgpu_amdkfd_resume(adev);
2459 if (r)
2460 return r;
Alex Deucher756e6882015-10-08 00:03:36 -04002461
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002462 /* blat the mode back in */
2463 if (fbcon) {
2464 drm_helper_resume_force_mode(dev);
2465 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002466 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002467 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2468 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2469 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002470 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002471 }
2472
2473 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002474
2475 /*
2476 * Most of the connector probing functions try to acquire runtime pm
2477 * refs to ensure that the GPU is powered on when connector polling is
2478 * performed. Since we're calling this from a runtime PM callback,
2479 * trying to acquire rpm refs will cause us to deadlock.
2480 *
2481 * Since we're guaranteed to be holding the rpm lock, it's safe to
2482 * temporarily disable the rpm helpers so this doesn't deadlock us.
2483 */
2484#ifdef CONFIG_PM
2485 dev->dev->power.disable_depth++;
2486#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05002487 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002488#ifdef CONFIG_PM
2489 dev->dev->power.disable_depth--;
2490#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002491
Huang Rui03161a62017-04-13 16:12:26 +08002492 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002493 amdgpu_fbdev_set_suspend(adev, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002494
Huang Rui03161a62017-04-13 16:12:26 +08002495unlock:
2496 if (fbcon)
2497 console_unlock();
2498
2499 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002500}
2501
Chunming Zhou63fbf422016-07-15 11:19:20 +08002502static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2503{
2504 int i;
2505 bool asic_hang = false;
2506
2507 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002508 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002509 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002510 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2511 adev->ip_blocks[i].status.hang =
2512 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2513 if (adev->ip_blocks[i].status.hang) {
2514 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
Chunming Zhou63fbf422016-07-15 11:19:20 +08002515 asic_hang = true;
2516 }
2517 }
2518 return asic_hang;
2519}
2520
Baoyou Xie4d446652016-09-18 22:09:35 +08002521static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002522{
2523 int i, r = 0;
2524
2525 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002526 if (!adev->ip_blocks[i].status.valid)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002527 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002528 if (adev->ip_blocks[i].status.hang &&
2529 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2530 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
Chunming Zhoud31a5012016-07-18 10:04:34 +08002531 if (r)
2532 return r;
2533 }
2534 }
2535
2536 return 0;
2537}
2538
Chunming Zhou35d782f2016-07-15 15:57:13 +08002539static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2540{
Alex Deucherda146d32016-10-13 16:07:03 -04002541 int i;
2542
2543 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002544 if (!adev->ip_blocks[i].status.valid)
Alex Deucherda146d32016-10-13 16:07:03 -04002545 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002546 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2547 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2548 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2549 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2550 if (adev->ip_blocks[i].status.hang) {
Alex Deucherda146d32016-10-13 16:07:03 -04002551 DRM_INFO("Some block need full reset!\n");
2552 return true;
2553 }
2554 }
Chunming Zhou35d782f2016-07-15 15:57:13 +08002555 }
2556 return false;
2557}
2558
2559static int amdgpu_soft_reset(struct amdgpu_device *adev)
2560{
2561 int i, r = 0;
2562
2563 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002564 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002565 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002566 if (adev->ip_blocks[i].status.hang &&
2567 adev->ip_blocks[i].version->funcs->soft_reset) {
2568 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002569 if (r)
2570 return r;
2571 }
2572 }
2573
2574 return 0;
2575}
2576
2577static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2578{
2579 int i, r = 0;
2580
2581 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002582 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002583 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002584 if (adev->ip_blocks[i].status.hang &&
2585 adev->ip_blocks[i].version->funcs->post_soft_reset)
2586 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002587 if (r)
2588 return r;
2589 }
2590
2591 return 0;
2592}
2593
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002594bool amdgpu_need_backup(struct amdgpu_device *adev)
2595{
2596 if (adev->flags & AMD_IS_APU)
2597 return false;
2598
2599 return amdgpu_lockup_timeout > 0 ? true : false;
2600}
2601
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002602static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2603 struct amdgpu_ring *ring,
2604 struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +01002605 struct dma_fence **fence)
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002606{
2607 uint32_t domain;
2608 int r;
2609
Roger.He23d2e502017-04-21 14:24:26 +08002610 if (!bo->shadow)
2611 return 0;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002612
Alex Xie1d284792017-04-24 13:53:04 -04002613 r = amdgpu_bo_reserve(bo, true);
Roger.He23d2e502017-04-21 14:24:26 +08002614 if (r)
2615 return r;
2616 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2617 /* if bo has been evicted, then no need to recover */
2618 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
Roger.He82521312017-04-21 13:08:43 +08002619 r = amdgpu_bo_validate(bo->shadow);
2620 if (r) {
2621 DRM_ERROR("bo validate failed!\n");
2622 goto err;
2623 }
2624
Roger.He23d2e502017-04-21 14:24:26 +08002625 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002626 NULL, fence, true);
Roger.He23d2e502017-04-21 14:24:26 +08002627 if (r) {
2628 DRM_ERROR("recover page table failed!\n");
2629 goto err;
2630 }
2631 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002632err:
Roger.He23d2e502017-04-21 14:24:26 +08002633 amdgpu_bo_unreserve(bo);
2634 return r;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002635}
2636
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002637/**
Monk Liua90ad3c2017-01-23 14:22:08 +08002638 * amdgpu_sriov_gpu_reset - reset the asic
2639 *
2640 * @adev: amdgpu device pointer
Monk Liu7225f872017-04-26 14:51:54 +08002641 * @job: which job trigger hang
Monk Liua90ad3c2017-01-23 14:22:08 +08002642 *
2643 * Attempt the reset the GPU if it has hung (all asics).
2644 * for SRIOV case.
2645 * Returns 0 for success or an error on failure.
2646 */
Monk Liu7225f872017-04-26 14:51:54 +08002647int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002648{
Monk Liu65781c72017-05-11 13:36:44 +08002649 int i, j, r = 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002650 int resched;
2651 struct amdgpu_bo *bo, *tmp;
2652 struct amdgpu_ring *ring;
2653 struct dma_fence *fence = NULL, *next = NULL;
2654
Monk Liu147b5982017-01-25 15:48:01 +08002655 mutex_lock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002656 atomic_inc(&adev->gpu_reset_counter);
Monk Liu1fb37a32017-01-26 15:36:37 +08002657 adev->gfx.in_reset = true;
Monk Liua90ad3c2017-01-23 14:22:08 +08002658
2659 /* block TTM */
2660 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2661
Monk Liu65781c72017-05-11 13:36:44 +08002662 /* we start from the ring trigger GPU hang */
2663 j = job ? job->ring->idx : 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002664
Monk Liu65781c72017-05-11 13:36:44 +08002665 /* block scheduler */
2666 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2667 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002668 if (!ring || !ring->sched.thread)
2669 continue;
2670
2671 kthread_park(ring->sched.thread);
Monk Liua90ad3c2017-01-23 14:22:08 +08002672
Monk Liu65781c72017-05-11 13:36:44 +08002673 if (job && j != i)
2674 continue;
2675
Monk Liu4f059ec2017-05-11 13:59:15 +08002676 /* here give the last chance to check if job removed from mirror-list
Monk Liu65781c72017-05-11 13:36:44 +08002677 * since we already pay some time on kthread_park */
Monk Liu4f059ec2017-05-11 13:59:15 +08002678 if (job && list_empty(&job->base.node)) {
Monk Liu65781c72017-05-11 13:36:44 +08002679 kthread_unpark(ring->sched.thread);
2680 goto give_up_reset;
2681 }
2682
2683 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2684 amd_sched_job_kickout(&job->base);
2685
2686 /* only do job_reset on the hang ring if @job not NULL */
Monk Liua90ad3c2017-01-23 14:22:08 +08002687 amd_sched_hw_job_reset(&ring->sched);
Monk Liu65781c72017-05-11 13:36:44 +08002688
2689 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2690 amdgpu_fence_driver_force_completion_ring(ring);
Monk Liua90ad3c2017-01-23 14:22:08 +08002691 }
2692
Monk Liua90ad3c2017-01-23 14:22:08 +08002693 /* request to take full control of GPU before re-initialization */
Monk Liu7225f872017-04-26 14:51:54 +08002694 if (job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002695 amdgpu_virt_reset_gpu(adev);
2696 else
2697 amdgpu_virt_request_full_gpu(adev, true);
2698
2699
2700 /* Resume IP prior to SMC */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002701 amdgpu_sriov_reinit_early(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002702
2703 /* we need recover gart prior to run SMC/CP/SDMA resume */
2704 amdgpu_ttm_recover_gart(adev);
2705
2706 /* now we are okay to resume SMC/CP/SDMA */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002707 amdgpu_sriov_reinit_late(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002708
2709 amdgpu_irq_gpu_reset_resume_helper(adev);
2710
2711 if (amdgpu_ib_ring_tests(adev))
2712 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2713
2714 /* release full control of GPU after ib test */
2715 amdgpu_virt_release_full_gpu(adev, true);
2716
2717 DRM_INFO("recover vram bo from shadow\n");
2718
2719 ring = adev->mman.buffer_funcs_ring;
2720 mutex_lock(&adev->shadow_list_lock);
2721 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002722 next = NULL;
Monk Liua90ad3c2017-01-23 14:22:08 +08002723 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2724 if (fence) {
2725 r = dma_fence_wait(fence, false);
2726 if (r) {
2727 WARN(r, "recovery from shadow isn't completed\n");
2728 break;
2729 }
2730 }
2731
2732 dma_fence_put(fence);
2733 fence = next;
2734 }
2735 mutex_unlock(&adev->shadow_list_lock);
2736
2737 if (fence) {
2738 r = dma_fence_wait(fence, false);
2739 if (r)
2740 WARN(r, "recovery from shadow isn't completed\n");
2741 }
2742 dma_fence_put(fence);
2743
Monk Liu65781c72017-05-11 13:36:44 +08002744 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2745 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002746 if (!ring || !ring->sched.thread)
2747 continue;
2748
Monk Liu65781c72017-05-11 13:36:44 +08002749 if (job && j != i) {
2750 kthread_unpark(ring->sched.thread);
2751 continue;
2752 }
2753
Monk Liua90ad3c2017-01-23 14:22:08 +08002754 amd_sched_job_recovery(&ring->sched);
2755 kthread_unpark(ring->sched.thread);
2756 }
2757
2758 drm_helper_resume_force_mode(adev->ddev);
Monk Liu65781c72017-05-11 13:36:44 +08002759give_up_reset:
Monk Liua90ad3c2017-01-23 14:22:08 +08002760 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2761 if (r) {
2762 /* bad news, how to tell it to userspace ? */
2763 dev_info(adev->dev, "GPU reset failed\n");
Monk Liu65781c72017-05-11 13:36:44 +08002764 } else {
2765 dev_info(adev->dev, "GPU reset successed!\n");
Monk Liua90ad3c2017-01-23 14:22:08 +08002766 }
2767
Monk Liu1fb37a32017-01-26 15:36:37 +08002768 adev->gfx.in_reset = false;
Monk Liu147b5982017-01-25 15:48:01 +08002769 mutex_unlock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002770 return r;
2771}
2772
2773/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002774 * amdgpu_gpu_reset - reset the asic
2775 *
2776 * @adev: amdgpu device pointer
2777 *
2778 * Attempt the reset the GPU if it has hung (all asics).
2779 * Returns 0 for success or an error on failure.
2780 */
2781int amdgpu_gpu_reset(struct amdgpu_device *adev)
2782{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002783 int i, r;
2784 int resched;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002785 bool need_full_reset, vram_lost = false;
Xiangliang Yufb140b22016-12-17 22:48:57 +08002786
Chunming Zhou63fbf422016-07-15 11:19:20 +08002787 if (!amdgpu_check_soft_reset(adev)) {
2788 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2789 return 0;
2790 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002791
Marek Olšákd94aed52015-05-05 21:13:49 +02002792 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002793
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002794 /* block TTM */
2795 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2796
Chunming Zhou0875dc92016-06-12 15:41:58 +08002797 /* block scheduler */
2798 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2799 struct amdgpu_ring *ring = adev->rings[i];
2800
Chunming Zhou51687752017-04-24 17:09:15 +08002801 if (!ring || !ring->sched.thread)
Chunming Zhou0875dc92016-06-12 15:41:58 +08002802 continue;
2803 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002804 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002805 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002806 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2807 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002808
Chunming Zhou35d782f2016-07-15 15:57:13 +08002809 need_full_reset = amdgpu_need_full_reset(adev);
2810
2811 if (!need_full_reset) {
2812 amdgpu_pre_soft_reset(adev);
2813 r = amdgpu_soft_reset(adev);
2814 amdgpu_post_soft_reset(adev);
2815 if (r || amdgpu_check_soft_reset(adev)) {
2816 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2817 need_full_reset = true;
2818 }
2819 }
2820
2821 if (need_full_reset) {
Chunming Zhou35d782f2016-07-15 15:57:13 +08002822 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002823
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002824retry:
Alex Deucherd05da0e2017-06-30 17:08:45 -04002825 amdgpu_atombios_scratch_regs_save(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002826 r = amdgpu_asic_reset(adev);
Alex Deucherd05da0e2017-06-30 17:08:45 -04002827 amdgpu_atombios_scratch_regs_restore(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002828 /* post card */
2829 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002830
Chunming Zhou35d782f2016-07-15 15:57:13 +08002831 if (!r) {
2832 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
Chunming Zhoufcf06492017-05-05 10:33:33 +08002833 r = amdgpu_resume_phase1(adev);
2834 if (r)
2835 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002836 vram_lost = amdgpu_check_vram_lost(adev);
Chunming Zhouf1892132017-05-15 16:48:27 +08002837 if (vram_lost) {
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002838 DRM_ERROR("VRAM is lost!\n");
Chunming Zhouf1892132017-05-15 16:48:27 +08002839 atomic_inc(&adev->vram_lost_counter);
2840 }
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002841 r = amdgpu_ttm_recover_gart(adev);
2842 if (r)
Chunming Zhoufcf06492017-05-05 10:33:33 +08002843 goto out;
2844 r = amdgpu_resume_phase2(adev);
2845 if (r)
2846 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002847 if (vram_lost)
2848 amdgpu_fill_reset_magic(adev);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002849 }
Chunming Zhoufcf06492017-05-05 10:33:33 +08002850 }
2851out:
2852 if (!r) {
2853 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou1f465082016-06-30 15:02:26 +08002854 r = amdgpu_ib_ring_tests(adev);
2855 if (r) {
2856 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002857 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002858 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002859 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002860 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002861 /**
2862 * recovery vm page tables, since we cannot depend on VRAM is
2863 * consistent after gpu full reset.
2864 */
2865 if (need_full_reset && amdgpu_need_backup(adev)) {
2866 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2867 struct amdgpu_bo *bo, *tmp;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002868 struct dma_fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002869
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002870 DRM_INFO("recover vram bo from shadow\n");
2871 mutex_lock(&adev->shadow_list_lock);
2872 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002873 next = NULL;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002874 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2875 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002876 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002877 if (r) {
Monk Liu1d7b17b2017-01-22 18:52:56 +08002878 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002879 break;
2880 }
2881 }
2882
Chris Wilsonf54d1862016-10-25 13:00:45 +01002883 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002884 fence = next;
2885 }
2886 mutex_unlock(&adev->shadow_list_lock);
2887 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002888 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002889 if (r)
Monk Liu1d7b17b2017-01-22 18:52:56 +08002890 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002891 }
Chris Wilsonf54d1862016-10-25 13:00:45 +01002892 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002893 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002894 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2895 struct amdgpu_ring *ring = adev->rings[i];
Chunming Zhou51687752017-04-24 17:09:15 +08002896
2897 if (!ring || !ring->sched.thread)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002898 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002899
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002900 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002901 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002902 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002903 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08002904 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Gavin Wan89041942017-06-23 13:55:15 -04002905 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002906 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou51687752017-04-24 17:09:15 +08002907 if (adev->rings[i] && adev->rings[i]->sched.thread) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08002908 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002909 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002910 }
2911 }
2912
2913 drm_helper_resume_force_mode(adev->ddev);
2914
2915 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
Gavin Wan89041942017-06-23 13:55:15 -04002916 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002917 /* bad news, how to tell it to userspace ? */
2918 dev_info(adev->dev, "GPU reset failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002919 amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
2920 }
2921 else {
Chunming Zhou6643be62017-05-05 10:50:09 +08002922 dev_info(adev->dev, "GPU reset successed!\n");
Gavin Wan89041942017-06-23 13:55:15 -04002923 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002924
Gavin Wan89041942017-06-23 13:55:15 -04002925 amdgpu_vf_error_trans_all(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002926 return r;
2927}
2928
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002929void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2930{
2931 u32 mask;
2932 int ret;
2933
Alex Deuchercd474ba2016-02-04 10:21:23 -05002934 if (amdgpu_pcie_gen_cap)
2935 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2936
2937 if (amdgpu_pcie_lane_cap)
2938 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2939
2940 /* covers APUs as well */
2941 if (pci_is_root_bus(adev->pdev->bus)) {
2942 if (adev->pm.pcie_gen_mask == 0)
2943 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2944 if (adev->pm.pcie_mlw_mask == 0)
2945 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002946 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002947 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05002948
2949 if (adev->pm.pcie_gen_mask == 0) {
2950 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2951 if (!ret) {
2952 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2953 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2954 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2955
2956 if (mask & DRM_PCIE_SPEED_25)
2957 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2958 if (mask & DRM_PCIE_SPEED_50)
2959 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2960 if (mask & DRM_PCIE_SPEED_80)
2961 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2962 } else {
2963 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2964 }
2965 }
2966 if (adev->pm.pcie_mlw_mask == 0) {
2967 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2968 if (!ret) {
2969 switch (mask) {
2970 case 32:
2971 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2972 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2973 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2974 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2975 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2976 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2977 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2978 break;
2979 case 16:
2980 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2981 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2982 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2983 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2984 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2985 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2986 break;
2987 case 12:
2988 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2989 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2990 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2991 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2992 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2993 break;
2994 case 8:
2995 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2996 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2997 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2998 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2999 break;
3000 case 4:
3001 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3002 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3003 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3004 break;
3005 case 2:
3006 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3007 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3008 break;
3009 case 1:
3010 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3011 break;
3012 default:
3013 break;
3014 }
3015 } else {
3016 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003017 }
3018 }
3019}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003020
3021/*
3022 * Debugfs
3023 */
3024int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04003025 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003026 unsigned nfiles)
3027{
3028 unsigned i;
3029
3030 for (i = 0; i < adev->debugfs_count; i++) {
3031 if (adev->debugfs[i].files == files) {
3032 /* Already registered */
3033 return 0;
3034 }
3035 }
3036
3037 i = adev->debugfs_count + 1;
3038 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3039 DRM_ERROR("Reached maximum number of debugfs components.\n");
3040 DRM_ERROR("Report so we increase "
3041 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3042 return -EINVAL;
3043 }
3044 adev->debugfs[adev->debugfs_count].files = files;
3045 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3046 adev->debugfs_count = i;
3047#if defined(CONFIG_DEBUG_FS)
3048 drm_debugfs_create_files(files, nfiles,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003049 adev->ddev->primary->debugfs_root,
3050 adev->ddev->primary);
3051#endif
3052 return 0;
3053}
3054
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003055#if defined(CONFIG_DEBUG_FS)
3056
3057static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3058 size_t size, loff_t *pos)
3059{
Al Viro45063092016-12-04 18:24:56 -05003060 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003061 ssize_t result = 0;
3062 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04003063 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04003064 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003065
3066 if (size & 0x3 || *pos & 0x3)
3067 return -EINVAL;
3068
Tom St Denisbd122672016-07-28 09:39:22 -04003069 /* are we reading registers for which a PG lock is necessary? */
3070 pm_pg_lock = (*pos >> 23) & 1;
3071
Tom St Denis566281592016-06-27 11:55:07 -04003072 if (*pos & (1ULL << 62)) {
3073 se_bank = (*pos >> 24) & 0x3FF;
3074 sh_bank = (*pos >> 34) & 0x3FF;
3075 instance_bank = (*pos >> 44) & 0x3FF;
Tom St Denis32977f92016-10-09 07:41:26 -04003076
3077 if (se_bank == 0x3FF)
3078 se_bank = 0xFFFFFFFF;
3079 if (sh_bank == 0x3FF)
3080 sh_bank = 0xFFFFFFFF;
3081 if (instance_bank == 0x3FF)
3082 instance_bank = 0xFFFFFFFF;
Tom St Denis566281592016-06-27 11:55:07 -04003083 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04003084 } else {
3085 use_bank = 0;
3086 }
3087
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003088 *pos &= (1UL << 22) - 1;
Tom St Denisbd122672016-07-28 09:39:22 -04003089
Tom St Denis566281592016-06-27 11:55:07 -04003090 if (use_bank) {
Tom St Denis32977f92016-10-09 07:41:26 -04003091 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3092 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
Tom St Denis566281592016-06-27 11:55:07 -04003093 return -EINVAL;
3094 mutex_lock(&adev->grbm_idx_mutex);
3095 amdgpu_gfx_select_se_sh(adev, se_bank,
3096 sh_bank, instance_bank);
3097 }
3098
Tom St Denisbd122672016-07-28 09:39:22 -04003099 if (pm_pg_lock)
3100 mutex_lock(&adev->pm.mutex);
3101
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003102 while (size) {
3103 uint32_t value;
3104
3105 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04003106 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003107
3108 value = RREG32(*pos >> 2);
3109 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04003110 if (r) {
3111 result = r;
3112 goto end;
3113 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003114
3115 result += 4;
3116 buf += 4;
3117 *pos += 4;
3118 size -= 4;
3119 }
3120
Tom St Denis566281592016-06-27 11:55:07 -04003121end:
3122 if (use_bank) {
3123 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3124 mutex_unlock(&adev->grbm_idx_mutex);
3125 }
3126
Tom St Denisbd122672016-07-28 09:39:22 -04003127 if (pm_pg_lock)
3128 mutex_unlock(&adev->pm.mutex);
3129
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003130 return result;
3131}
3132
3133static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3134 size_t size, loff_t *pos)
3135{
Al Viro45063092016-12-04 18:24:56 -05003136 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003137 ssize_t result = 0;
3138 int r;
Tom St Denis394fdde2016-10-10 07:31:23 -04003139 bool pm_pg_lock, use_bank;
3140 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003141
3142 if (size & 0x3 || *pos & 0x3)
3143 return -EINVAL;
3144
Tom St Denis394fdde2016-10-10 07:31:23 -04003145 /* are we reading registers for which a PG lock is necessary? */
3146 pm_pg_lock = (*pos >> 23) & 1;
3147
3148 if (*pos & (1ULL << 62)) {
3149 se_bank = (*pos >> 24) & 0x3FF;
3150 sh_bank = (*pos >> 34) & 0x3FF;
3151 instance_bank = (*pos >> 44) & 0x3FF;
3152
3153 if (se_bank == 0x3FF)
3154 se_bank = 0xFFFFFFFF;
3155 if (sh_bank == 0x3FF)
3156 sh_bank = 0xFFFFFFFF;
3157 if (instance_bank == 0x3FF)
3158 instance_bank = 0xFFFFFFFF;
3159 use_bank = 1;
3160 } else {
3161 use_bank = 0;
3162 }
3163
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003164 *pos &= (1UL << 22) - 1;
Tom St Denis394fdde2016-10-10 07:31:23 -04003165
3166 if (use_bank) {
3167 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3168 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3169 return -EINVAL;
3170 mutex_lock(&adev->grbm_idx_mutex);
3171 amdgpu_gfx_select_se_sh(adev, se_bank,
3172 sh_bank, instance_bank);
3173 }
3174
3175 if (pm_pg_lock)
3176 mutex_lock(&adev->pm.mutex);
3177
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003178 while (size) {
3179 uint32_t value;
3180
3181 if (*pos > adev->rmmio_size)
3182 return result;
3183
3184 r = get_user(value, (uint32_t *)buf);
3185 if (r)
3186 return r;
3187
3188 WREG32(*pos >> 2, value);
3189
3190 result += 4;
3191 buf += 4;
3192 *pos += 4;
3193 size -= 4;
3194 }
3195
Tom St Denis394fdde2016-10-10 07:31:23 -04003196 if (use_bank) {
3197 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3198 mutex_unlock(&adev->grbm_idx_mutex);
3199 }
3200
3201 if (pm_pg_lock)
3202 mutex_unlock(&adev->pm.mutex);
3203
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003204 return result;
3205}
3206
Tom St Denisadcec282016-04-15 13:08:44 -04003207static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3208 size_t size, loff_t *pos)
3209{
Al Viro45063092016-12-04 18:24:56 -05003210 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003211 ssize_t result = 0;
3212 int r;
3213
3214 if (size & 0x3 || *pos & 0x3)
3215 return -EINVAL;
3216
3217 while (size) {
3218 uint32_t value;
3219
3220 value = RREG32_PCIE(*pos >> 2);
3221 r = put_user(value, (uint32_t *)buf);
3222 if (r)
3223 return r;
3224
3225 result += 4;
3226 buf += 4;
3227 *pos += 4;
3228 size -= 4;
3229 }
3230
3231 return result;
3232}
3233
3234static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3235 size_t size, loff_t *pos)
3236{
Al Viro45063092016-12-04 18:24:56 -05003237 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003238 ssize_t result = 0;
3239 int r;
3240
3241 if (size & 0x3 || *pos & 0x3)
3242 return -EINVAL;
3243
3244 while (size) {
3245 uint32_t value;
3246
3247 r = get_user(value, (uint32_t *)buf);
3248 if (r)
3249 return r;
3250
3251 WREG32_PCIE(*pos >> 2, value);
3252
3253 result += 4;
3254 buf += 4;
3255 *pos += 4;
3256 size -= 4;
3257 }
3258
3259 return result;
3260}
3261
3262static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3263 size_t size, loff_t *pos)
3264{
Al Viro45063092016-12-04 18:24:56 -05003265 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003266 ssize_t result = 0;
3267 int r;
3268
3269 if (size & 0x3 || *pos & 0x3)
3270 return -EINVAL;
3271
3272 while (size) {
3273 uint32_t value;
3274
3275 value = RREG32_DIDT(*pos >> 2);
3276 r = put_user(value, (uint32_t *)buf);
3277 if (r)
3278 return r;
3279
3280 result += 4;
3281 buf += 4;
3282 *pos += 4;
3283 size -= 4;
3284 }
3285
3286 return result;
3287}
3288
3289static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3290 size_t size, loff_t *pos)
3291{
Al Viro45063092016-12-04 18:24:56 -05003292 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003293 ssize_t result = 0;
3294 int r;
3295
3296 if (size & 0x3 || *pos & 0x3)
3297 return -EINVAL;
3298
3299 while (size) {
3300 uint32_t value;
3301
3302 r = get_user(value, (uint32_t *)buf);
3303 if (r)
3304 return r;
3305
3306 WREG32_DIDT(*pos >> 2, value);
3307
3308 result += 4;
3309 buf += 4;
3310 *pos += 4;
3311 size -= 4;
3312 }
3313
3314 return result;
3315}
3316
3317static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3318 size_t size, loff_t *pos)
3319{
Al Viro45063092016-12-04 18:24:56 -05003320 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003321 ssize_t result = 0;
3322 int r;
3323
3324 if (size & 0x3 || *pos & 0x3)
3325 return -EINVAL;
3326
3327 while (size) {
3328 uint32_t value;
3329
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003330 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04003331 r = put_user(value, (uint32_t *)buf);
3332 if (r)
3333 return r;
3334
3335 result += 4;
3336 buf += 4;
3337 *pos += 4;
3338 size -= 4;
3339 }
3340
3341 return result;
3342}
3343
3344static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3345 size_t size, loff_t *pos)
3346{
Al Viro45063092016-12-04 18:24:56 -05003347 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003348 ssize_t result = 0;
3349 int r;
3350
3351 if (size & 0x3 || *pos & 0x3)
3352 return -EINVAL;
3353
3354 while (size) {
3355 uint32_t value;
3356
3357 r = get_user(value, (uint32_t *)buf);
3358 if (r)
3359 return r;
3360
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003361 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04003362
3363 result += 4;
3364 buf += 4;
3365 *pos += 4;
3366 size -= 4;
3367 }
3368
3369 return result;
3370}
3371
Tom St Denis1e051412016-06-27 09:57:18 -04003372static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3373 size_t size, loff_t *pos)
3374{
Al Viro45063092016-12-04 18:24:56 -05003375 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis1e051412016-06-27 09:57:18 -04003376 ssize_t result = 0;
3377 int r;
3378 uint32_t *config, no_regs = 0;
3379
3380 if (size & 0x3 || *pos & 0x3)
3381 return -EINVAL;
3382
Markus Elfringecab7662016-09-18 17:00:52 +02003383 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
Tom St Denis1e051412016-06-27 09:57:18 -04003384 if (!config)
3385 return -ENOMEM;
3386
3387 /* version, increment each time something is added */
Tom St Denis9a999352017-01-18 13:01:25 -05003388 config[no_regs++] = 3;
Tom St Denis1e051412016-06-27 09:57:18 -04003389 config[no_regs++] = adev->gfx.config.max_shader_engines;
3390 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3391 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3392 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3393 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3394 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3395 config[no_regs++] = adev->gfx.config.max_gprs;
3396 config[no_regs++] = adev->gfx.config.max_gs_threads;
3397 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3398 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3399 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3400 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3401 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3402 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3403 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3404 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3405 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3406 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3407 config[no_regs++] = adev->gfx.config.num_gpus;
3408 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3409 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3410 config[no_regs++] = adev->gfx.config.gb_addr_config;
3411 config[no_regs++] = adev->gfx.config.num_rbs;
3412
Tom St Denis89a8f302016-08-12 15:14:31 -04003413 /* rev==1 */
3414 config[no_regs++] = adev->rev_id;
3415 config[no_regs++] = adev->pg_flags;
3416 config[no_regs++] = adev->cg_flags;
3417
Tom St Denise9f11dc2016-08-17 12:00:51 -04003418 /* rev==2 */
3419 config[no_regs++] = adev->family;
3420 config[no_regs++] = adev->external_rev_id;
3421
Tom St Denis9a999352017-01-18 13:01:25 -05003422 /* rev==3 */
3423 config[no_regs++] = adev->pdev->device;
3424 config[no_regs++] = adev->pdev->revision;
3425 config[no_regs++] = adev->pdev->subsystem_device;
3426 config[no_regs++] = adev->pdev->subsystem_vendor;
3427
Tom St Denis1e051412016-06-27 09:57:18 -04003428 while (size && (*pos < no_regs * 4)) {
3429 uint32_t value;
3430
3431 value = config[*pos >> 2];
3432 r = put_user(value, (uint32_t *)buf);
3433 if (r) {
3434 kfree(config);
3435 return r;
3436 }
3437
3438 result += 4;
3439 buf += 4;
3440 *pos += 4;
3441 size -= 4;
3442 }
3443
3444 kfree(config);
3445 return result;
3446}
3447
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003448static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3449 size_t size, loff_t *pos)
3450{
Al Viro45063092016-12-04 18:24:56 -05003451 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003452 int idx, x, outsize, r, valuesize;
3453 uint32_t values[16];
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003454
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003455 if (size & 3 || *pos & 0x3)
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003456 return -EINVAL;
3457
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003458 if (amdgpu_dpm == 0)
3459 return -EINVAL;
3460
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003461 /* convert offset to sensor number */
3462 idx = *pos >> 2;
3463
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003464 valuesize = sizeof(values);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003465 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003466 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003467 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3468 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3469 &valuesize);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003470 else
3471 return -EINVAL;
3472
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003473 if (size > valuesize)
3474 return -EINVAL;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003475
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003476 outsize = 0;
3477 x = 0;
3478 if (!r) {
3479 while (size) {
3480 r = put_user(values[x++], (int32_t *)buf);
3481 buf += 4;
3482 size -= 4;
3483 outsize += 4;
3484 }
3485 }
3486
3487 return !r ? outsize : r;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003488}
Tom St Denis1e051412016-06-27 09:57:18 -04003489
Tom St Denis273d7aa2016-10-11 14:48:55 -04003490static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3491 size_t size, loff_t *pos)
3492{
3493 struct amdgpu_device *adev = f->f_inode->i_private;
3494 int r, x;
3495 ssize_t result=0;
Tom St Denis472259f2016-10-14 09:49:09 -04003496 uint32_t offset, se, sh, cu, wave, simd, data[32];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003497
3498 if (size & 3 || *pos & 3)
3499 return -EINVAL;
3500
3501 /* decode offset */
3502 offset = (*pos & 0x7F);
3503 se = ((*pos >> 7) & 0xFF);
3504 sh = ((*pos >> 15) & 0xFF);
3505 cu = ((*pos >> 23) & 0xFF);
3506 wave = ((*pos >> 31) & 0xFF);
3507 simd = ((*pos >> 37) & 0xFF);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003508
3509 /* switch to the specific se/sh/cu */
3510 mutex_lock(&adev->grbm_idx_mutex);
3511 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3512
3513 x = 0;
Tom St Denis472259f2016-10-14 09:49:09 -04003514 if (adev->gfx.funcs->read_wave_data)
3515 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003516
3517 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3518 mutex_unlock(&adev->grbm_idx_mutex);
3519
Tom St Denis5ecfb3b2016-10-13 12:15:03 -04003520 if (!x)
3521 return -EINVAL;
3522
Tom St Denis472259f2016-10-14 09:49:09 -04003523 while (size && (offset < x * 4)) {
Tom St Denis273d7aa2016-10-11 14:48:55 -04003524 uint32_t value;
3525
Tom St Denis472259f2016-10-14 09:49:09 -04003526 value = data[offset >> 2];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003527 r = put_user(value, (uint32_t *)buf);
3528 if (r)
3529 return r;
3530
3531 result += 4;
3532 buf += 4;
Tom St Denis472259f2016-10-14 09:49:09 -04003533 offset += 4;
Tom St Denis273d7aa2016-10-11 14:48:55 -04003534 size -= 4;
3535 }
3536
3537 return result;
3538}
3539
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003540static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3541 size_t size, loff_t *pos)
3542{
3543 struct amdgpu_device *adev = f->f_inode->i_private;
3544 int r;
3545 ssize_t result = 0;
3546 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3547
3548 if (size & 3 || *pos & 3)
3549 return -EINVAL;
3550
3551 /* decode offset */
3552 offset = (*pos & 0xFFF); /* in dwords */
3553 se = ((*pos >> 12) & 0xFF);
3554 sh = ((*pos >> 20) & 0xFF);
3555 cu = ((*pos >> 28) & 0xFF);
3556 wave = ((*pos >> 36) & 0xFF);
3557 simd = ((*pos >> 44) & 0xFF);
3558 thread = ((*pos >> 52) & 0xFF);
3559 bank = ((*pos >> 60) & 1);
3560
3561 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3562 if (!data)
3563 return -ENOMEM;
3564
3565 /* switch to the specific se/sh/cu */
3566 mutex_lock(&adev->grbm_idx_mutex);
3567 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3568
3569 if (bank == 0) {
3570 if (adev->gfx.funcs->read_wave_vgprs)
3571 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3572 } else {
3573 if (adev->gfx.funcs->read_wave_sgprs)
3574 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3575 }
3576
3577 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3578 mutex_unlock(&adev->grbm_idx_mutex);
3579
3580 while (size) {
3581 uint32_t value;
3582
3583 value = data[offset++];
3584 r = put_user(value, (uint32_t *)buf);
3585 if (r) {
3586 result = r;
3587 goto err;
3588 }
3589
3590 result += 4;
3591 buf += 4;
3592 size -= 4;
3593 }
3594
3595err:
3596 kfree(data);
3597 return result;
3598}
3599
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003600static const struct file_operations amdgpu_debugfs_regs_fops = {
3601 .owner = THIS_MODULE,
3602 .read = amdgpu_debugfs_regs_read,
3603 .write = amdgpu_debugfs_regs_write,
3604 .llseek = default_llseek
3605};
Tom St Denisadcec282016-04-15 13:08:44 -04003606static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3607 .owner = THIS_MODULE,
3608 .read = amdgpu_debugfs_regs_didt_read,
3609 .write = amdgpu_debugfs_regs_didt_write,
3610 .llseek = default_llseek
3611};
3612static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3613 .owner = THIS_MODULE,
3614 .read = amdgpu_debugfs_regs_pcie_read,
3615 .write = amdgpu_debugfs_regs_pcie_write,
3616 .llseek = default_llseek
3617};
3618static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3619 .owner = THIS_MODULE,
3620 .read = amdgpu_debugfs_regs_smc_read,
3621 .write = amdgpu_debugfs_regs_smc_write,
3622 .llseek = default_llseek
3623};
3624
Tom St Denis1e051412016-06-27 09:57:18 -04003625static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3626 .owner = THIS_MODULE,
3627 .read = amdgpu_debugfs_gca_config_read,
3628 .llseek = default_llseek
3629};
3630
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003631static const struct file_operations amdgpu_debugfs_sensors_fops = {
3632 .owner = THIS_MODULE,
3633 .read = amdgpu_debugfs_sensor_read,
3634 .llseek = default_llseek
3635};
3636
Tom St Denis273d7aa2016-10-11 14:48:55 -04003637static const struct file_operations amdgpu_debugfs_wave_fops = {
3638 .owner = THIS_MODULE,
3639 .read = amdgpu_debugfs_wave_read,
3640 .llseek = default_llseek
3641};
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003642static const struct file_operations amdgpu_debugfs_gpr_fops = {
3643 .owner = THIS_MODULE,
3644 .read = amdgpu_debugfs_gpr_read,
3645 .llseek = default_llseek
3646};
Tom St Denis273d7aa2016-10-11 14:48:55 -04003647
Tom St Denisadcec282016-04-15 13:08:44 -04003648static const struct file_operations *debugfs_regs[] = {
3649 &amdgpu_debugfs_regs_fops,
3650 &amdgpu_debugfs_regs_didt_fops,
3651 &amdgpu_debugfs_regs_pcie_fops,
3652 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04003653 &amdgpu_debugfs_gca_config_fops,
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003654 &amdgpu_debugfs_sensors_fops,
Tom St Denis273d7aa2016-10-11 14:48:55 -04003655 &amdgpu_debugfs_wave_fops,
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003656 &amdgpu_debugfs_gpr_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04003657};
3658
3659static const char *debugfs_regs_names[] = {
3660 "amdgpu_regs",
3661 "amdgpu_regs_didt",
3662 "amdgpu_regs_pcie",
3663 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04003664 "amdgpu_gca_config",
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003665 "amdgpu_sensors",
Tom St Denis273d7aa2016-10-11 14:48:55 -04003666 "amdgpu_wave",
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003667 "amdgpu_gpr",
Tom St Denisadcec282016-04-15 13:08:44 -04003668};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003669
3670static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3671{
3672 struct drm_minor *minor = adev->ddev->primary;
3673 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04003674 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003675
Tom St Denisadcec282016-04-15 13:08:44 -04003676 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3677 ent = debugfs_create_file(debugfs_regs_names[i],
3678 S_IFREG | S_IRUGO, root,
3679 adev, debugfs_regs[i]);
3680 if (IS_ERR(ent)) {
3681 for (j = 0; j < i; j++) {
3682 debugfs_remove(adev->debugfs_regs[i]);
3683 adev->debugfs_regs[i] = NULL;
3684 }
3685 return PTR_ERR(ent);
3686 }
3687
3688 if (!i)
3689 i_size_write(ent->d_inode, adev->rmmio_size);
3690 adev->debugfs_regs[i] = ent;
3691 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003692
3693 return 0;
3694}
3695
3696static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3697{
Tom St Denisadcec282016-04-15 13:08:44 -04003698 unsigned i;
3699
3700 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3701 if (adev->debugfs_regs[i]) {
3702 debugfs_remove(adev->debugfs_regs[i]);
3703 adev->debugfs_regs[i] = NULL;
3704 }
3705 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003706}
3707
Huang Rui4f0955f2017-05-10 23:04:06 +08003708static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3709{
3710 struct drm_info_node *node = (struct drm_info_node *) m->private;
3711 struct drm_device *dev = node->minor->dev;
3712 struct amdgpu_device *adev = dev->dev_private;
3713 int r = 0, i;
3714
3715 /* hold on the scheduler */
3716 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3717 struct amdgpu_ring *ring = adev->rings[i];
3718
3719 if (!ring || !ring->sched.thread)
3720 continue;
3721 kthread_park(ring->sched.thread);
3722 }
3723
3724 seq_printf(m, "run ib test:\n");
3725 r = amdgpu_ib_ring_tests(adev);
3726 if (r)
3727 seq_printf(m, "ib ring tests failed (%d).\n", r);
3728 else
3729 seq_printf(m, "ib ring tests passed.\n");
3730
3731 /* go on the scheduler */
3732 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3733 struct amdgpu_ring *ring = adev->rings[i];
3734
3735 if (!ring || !ring->sched.thread)
3736 continue;
3737 kthread_unpark(ring->sched.thread);
3738 }
3739
3740 return 0;
3741}
3742
3743static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3744 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3745};
3746
3747static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3748{
3749 return amdgpu_debugfs_add_files(adev,
3750 amdgpu_debugfs_test_ib_ring_list, 1);
3751}
3752
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003753int amdgpu_debugfs_init(struct drm_minor *minor)
3754{
3755 return 0;
3756}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003757#else
Arnd Bergmann27bad5b2017-06-21 23:51:02 +02003758static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
Huang Rui4f0955f2017-05-10 23:04:06 +08003759{
3760 return 0;
3761}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003762static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3763{
3764 return 0;
3765}
3766static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003767#endif