blob: a9eeaad49d182804b7ddd72847947d3b6169668a [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deuchera5bde2f2016-09-23 16:23:41 -040043#include "amdgpu_atomfirmware.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050044#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080045#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040048#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040051#include "vi.h"
Ken Wang460826e2017-03-06 14:53:16 -050052#include "soc15.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080054#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080055#include <linux/firmware.h>
Gavin Wan89041942017-06-23 13:55:15 -040056#include "amdgpu_vf_error.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040057
Yong Zhaoba997702015-11-09 17:21:45 -050058#include "amdgpu_amdkfd.h"
59
Alex Deuchere2a75f82017-04-27 16:58:01 -040060MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
Alex Deucher2d2e5e72017-05-09 12:27:35 -040061MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
Alex Deuchere2a75f82017-04-27 16:58:01 -040062
Shirish S2dc80b02017-05-25 10:05:25 +053063#define AMDGPU_RESUME_MS 2000
64
Alex Deucherd38ceaf2015-04-20 16:55:21 -040065static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
66static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
Huang Rui4f0955f2017-05-10 23:04:06 +080067static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040068
69static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080070 "TAHITI",
71 "PITCAIRN",
72 "VERDE",
73 "OLAND",
74 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040075 "BONAIRE",
76 "KAVERI",
77 "KABINI",
78 "HAWAII",
79 "MULLINS",
80 "TOPAZ",
81 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080082 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040083 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040084 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040085 "POLARIS10",
86 "POLARIS11",
Junwei Zhangc4642a42016-12-14 15:32:28 -050087 "POLARIS12",
Ken Wangd4196f02016-03-09 09:28:32 +080088 "VEGA10",
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +080089 "RAVEN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040090 "LAST",
91};
92
93bool amdgpu_device_is_px(struct drm_device *dev)
94{
95 struct amdgpu_device *adev = dev->dev_private;
96
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080097 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040098 return true;
99 return false;
100}
101
102/*
103 * MMIO register access helper functions.
104 */
105uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
Monk Liu15d72fd2017-01-25 15:07:40 +0800106 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400107{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400108 uint32_t ret;
109
Monk Liu15d72fd2017-01-25 15:07:40 +0800110 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800111 BUG_ON(in_interrupt());
112 return amdgpu_virt_kiq_rreg(adev, reg);
113 }
114
Monk Liu15d72fd2017-01-25 15:07:40 +0800115 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Tom St Denisf4b373f2016-05-31 08:02:27 -0400116 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400117 else {
118 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400119
120 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
121 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
122 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
123 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400124 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400125 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
126 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400127}
128
129void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
Monk Liu15d72fd2017-01-25 15:07:40 +0800130 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400131{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400132 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800133
Ken Wang47ed4e12017-07-04 13:11:52 +0800134 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
135 adev->last_mm_index = v;
136 }
137
Monk Liu15d72fd2017-01-25 15:07:40 +0800138 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800139 BUG_ON(in_interrupt());
140 return amdgpu_virt_kiq_wreg(adev, reg, v);
141 }
142
Monk Liu15d72fd2017-01-25 15:07:40 +0800143 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400144 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
145 else {
146 unsigned long flags;
147
148 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
149 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
150 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
151 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
152 }
Ken Wang47ed4e12017-07-04 13:11:52 +0800153
154 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
155 udelay(500);
156 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400157}
158
159u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
160{
161 if ((reg * 4) < adev->rio_mem_size)
162 return ioread32(adev->rio_mem + (reg * 4));
163 else {
164 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
165 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
166 }
167}
168
169void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
170{
Ken Wang47ed4e12017-07-04 13:11:52 +0800171 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
172 adev->last_mm_index = v;
173 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400174
175 if ((reg * 4) < adev->rio_mem_size)
176 iowrite32(v, adev->rio_mem + (reg * 4));
177 else {
178 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
179 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
180 }
Ken Wang47ed4e12017-07-04 13:11:52 +0800181
182 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
183 udelay(500);
184 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400185}
186
187/**
188 * amdgpu_mm_rdoorbell - read a doorbell dword
189 *
190 * @adev: amdgpu_device pointer
191 * @index: doorbell index
192 *
193 * Returns the value in the doorbell aperture at the
194 * requested doorbell index (CIK).
195 */
196u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
197{
198 if (index < adev->doorbell.num_doorbells) {
199 return readl(adev->doorbell.ptr + index);
200 } else {
201 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
202 return 0;
203 }
204}
205
206/**
207 * amdgpu_mm_wdoorbell - write a doorbell dword
208 *
209 * @adev: amdgpu_device pointer
210 * @index: doorbell index
211 * @v: value to write
212 *
213 * Writes @v to the doorbell aperture at the
214 * requested doorbell index (CIK).
215 */
216void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
217{
218 if (index < adev->doorbell.num_doorbells) {
219 writel(v, adev->doorbell.ptr + index);
220 } else {
221 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
222 }
223}
224
225/**
Ken Wang832be402016-03-18 15:23:08 +0800226 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
227 *
228 * @adev: amdgpu_device pointer
229 * @index: doorbell index
230 *
231 * Returns the value in the doorbell aperture at the
232 * requested doorbell index (VEGA10+).
233 */
234u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
235{
236 if (index < adev->doorbell.num_doorbells) {
237 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
238 } else {
239 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
240 return 0;
241 }
242}
243
244/**
245 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
246 *
247 * @adev: amdgpu_device pointer
248 * @index: doorbell index
249 * @v: value to write
250 *
251 * Writes @v to the doorbell aperture at the
252 * requested doorbell index (VEGA10+).
253 */
254void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
255{
256 if (index < adev->doorbell.num_doorbells) {
257 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
258 } else {
259 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
260 }
261}
262
263/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400264 * amdgpu_invalid_rreg - dummy reg read function
265 *
266 * @adev: amdgpu device pointer
267 * @reg: offset of register
268 *
269 * Dummy register read function. Used for register blocks
270 * that certain asics don't have (all asics).
271 * Returns the value in the register.
272 */
273static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
274{
275 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
276 BUG();
277 return 0;
278}
279
280/**
281 * amdgpu_invalid_wreg - dummy reg write function
282 *
283 * @adev: amdgpu device pointer
284 * @reg: offset of register
285 * @v: value to write to the register
286 *
287 * Dummy register read function. Used for register blocks
288 * that certain asics don't have (all asics).
289 */
290static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
291{
292 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
293 reg, v);
294 BUG();
295}
296
297/**
298 * amdgpu_block_invalid_rreg - dummy reg read function
299 *
300 * @adev: amdgpu device pointer
301 * @block: offset of instance
302 * @reg: offset of register
303 *
304 * Dummy register read function. Used for register blocks
305 * that certain asics don't have (all asics).
306 * Returns the value in the register.
307 */
308static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
309 uint32_t block, uint32_t reg)
310{
311 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
312 reg, block);
313 BUG();
314 return 0;
315}
316
317/**
318 * amdgpu_block_invalid_wreg - dummy reg write function
319 *
320 * @adev: amdgpu device pointer
321 * @block: offset of instance
322 * @reg: offset of register
323 * @v: value to write to the register
324 *
325 * Dummy register read function. Used for register blocks
326 * that certain asics don't have (all asics).
327 */
328static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
329 uint32_t block,
330 uint32_t reg, uint32_t v)
331{
332 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
333 reg, block, v);
334 BUG();
335}
336
337static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
338{
339 int r;
340
341 if (adev->vram_scratch.robj == NULL) {
342 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
Alex Deucher857d9132015-08-27 00:14:16 -0400343 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
Christian König03f48dd2016-08-15 17:00:22 +0200344 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
345 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
Christian König72d76682015-09-03 17:34:59 +0200346 NULL, NULL, &adev->vram_scratch.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400347 if (r) {
348 return r;
349 }
350 }
351
352 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
353 if (unlikely(r != 0))
354 return r;
355 r = amdgpu_bo_pin(adev->vram_scratch.robj,
356 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
357 if (r) {
358 amdgpu_bo_unreserve(adev->vram_scratch.robj);
359 return r;
360 }
361 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
362 (void **)&adev->vram_scratch.ptr);
363 if (r)
364 amdgpu_bo_unpin(adev->vram_scratch.robj);
365 amdgpu_bo_unreserve(adev->vram_scratch.robj);
366
367 return r;
368}
369
370static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
371{
372 int r;
373
374 if (adev->vram_scratch.robj == NULL) {
375 return;
376 }
Alex Xie8ab25b42017-04-24 13:30:43 -0400377 r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400378 if (likely(r == 0)) {
379 amdgpu_bo_kunmap(adev->vram_scratch.robj);
380 amdgpu_bo_unpin(adev->vram_scratch.robj);
381 amdgpu_bo_unreserve(adev->vram_scratch.robj);
382 }
383 amdgpu_bo_unref(&adev->vram_scratch.robj);
384}
385
386/**
387 * amdgpu_program_register_sequence - program an array of registers.
388 *
389 * @adev: amdgpu_device pointer
390 * @registers: pointer to the register array
391 * @array_size: size of the register array
392 *
393 * Programs an array or registers with and and or masks.
394 * This is a helper for setting golden registers.
395 */
396void amdgpu_program_register_sequence(struct amdgpu_device *adev,
397 const u32 *registers,
398 const u32 array_size)
399{
400 u32 tmp, reg, and_mask, or_mask;
401 int i;
402
403 if (array_size % 3)
404 return;
405
406 for (i = 0; i < array_size; i +=3) {
407 reg = registers[i + 0];
408 and_mask = registers[i + 1];
409 or_mask = registers[i + 2];
410
411 if (and_mask == 0xffffffff) {
412 tmp = or_mask;
413 } else {
414 tmp = RREG32(reg);
415 tmp &= ~and_mask;
416 tmp |= or_mask;
417 }
418 WREG32(reg, tmp);
419 }
420}
421
422void amdgpu_pci_config_reset(struct amdgpu_device *adev)
423{
424 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
425}
426
427/*
428 * GPU doorbell aperture helpers function.
429 */
430/**
431 * amdgpu_doorbell_init - Init doorbell driver information.
432 *
433 * @adev: amdgpu_device pointer
434 *
435 * Init doorbell driver information (CIK)
436 * Returns 0 on success, error on failure.
437 */
438static int amdgpu_doorbell_init(struct amdgpu_device *adev)
439{
440 /* doorbell bar mapping */
441 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
442 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
443
Christian Königedf600d2016-05-03 15:54:54 +0200444 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400445 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
446 if (adev->doorbell.num_doorbells == 0)
447 return -EINVAL;
448
Christian König8972e5d2017-03-06 13:34:57 +0100449 adev->doorbell.ptr = ioremap(adev->doorbell.base,
450 adev->doorbell.num_doorbells *
451 sizeof(u32));
452 if (adev->doorbell.ptr == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400453 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400454
455 return 0;
456}
457
458/**
459 * amdgpu_doorbell_fini - Tear down doorbell driver information.
460 *
461 * @adev: amdgpu_device pointer
462 *
463 * Tear down doorbell driver information (CIK)
464 */
465static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
466{
467 iounmap(adev->doorbell.ptr);
468 adev->doorbell.ptr = NULL;
469}
470
471/**
472 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
473 * setup amdkfd
474 *
475 * @adev: amdgpu_device pointer
476 * @aperture_base: output returning doorbell aperture base physical address
477 * @aperture_size: output returning doorbell aperture size in bytes
478 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
479 *
480 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
481 * takes doorbells required for its own rings and reports the setup to amdkfd.
482 * amdgpu reserved doorbells are at the start of the doorbell aperture.
483 */
484void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
485 phys_addr_t *aperture_base,
486 size_t *aperture_size,
487 size_t *start_offset)
488{
489 /*
490 * The first num_doorbells are used by amdgpu.
491 * amdkfd takes whatever's left in the aperture.
492 */
493 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
494 *aperture_base = adev->doorbell.base;
495 *aperture_size = adev->doorbell.size;
496 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
497 } else {
498 *aperture_base = 0;
499 *aperture_size = 0;
500 *start_offset = 0;
501 }
502}
503
504/*
505 * amdgpu_wb_*()
Alex Xie455a7bc2017-05-08 21:36:03 -0400506 * Writeback is the method by which the GPU updates special pages in memory
Alex Xieea81a172017-05-08 13:41:11 -0400507 * with the status of certain GPU events (fences, ring pointers,etc.).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400508 */
509
510/**
511 * amdgpu_wb_fini - Disable Writeback and free memory
512 *
513 * @adev: amdgpu_device pointer
514 *
515 * Disables Writeback and frees the Writeback memory (all asics).
516 * Used at driver shutdown.
517 */
518static void amdgpu_wb_fini(struct amdgpu_device *adev)
519{
520 if (adev->wb.wb_obj) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400521 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
522 &adev->wb.gpu_addr,
523 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400524 adev->wb.wb_obj = NULL;
525 }
526}
527
528/**
529 * amdgpu_wb_init- Init Writeback driver info and allocate memory
530 *
531 * @adev: amdgpu_device pointer
532 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400533 * Initializes writeback and allocates writeback memory (all asics).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400534 * Used at driver startup.
535 * Returns 0 on success or an -error on failure.
536 */
537static int amdgpu_wb_init(struct amdgpu_device *adev)
538{
539 int r;
540
541 if (adev->wb.wb_obj == NULL) {
Huang Rui60a970a62017-03-15 10:13:32 +0800542 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
Alex Deuchera76ed482016-10-21 15:30:36 -0400543 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
544 &adev->wb.wb_obj, &adev->wb.gpu_addr,
545 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400546 if (r) {
547 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
548 return r;
549 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400550
551 adev->wb.num_wb = AMDGPU_MAX_WB;
552 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
553
554 /* clear wb memory */
Huang Rui60a970a62017-03-15 10:13:32 +0800555 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400556 }
557
558 return 0;
559}
560
561/**
562 * amdgpu_wb_get - Allocate a wb entry
563 *
564 * @adev: amdgpu_device pointer
565 * @wb: wb index
566 *
567 * Allocate a wb slot for use by the driver (all asics).
568 * Returns 0 on success or -EINVAL on failure.
569 */
570int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
571{
572 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
573 if (offset < adev->wb.num_wb) {
574 __set_bit(offset, adev->wb.used);
575 *wb = offset;
576 return 0;
577 } else {
578 return -EINVAL;
579 }
580}
581
582/**
Ken Wang70142852016-03-18 15:08:49 +0800583 * amdgpu_wb_get_64bit - Allocate a wb entry
584 *
585 * @adev: amdgpu_device pointer
586 * @wb: wb index
587 *
588 * Allocate a wb slot for use by the driver (all asics).
589 * Returns 0 on success or -EINVAL on failure.
590 */
591int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
592{
593 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
594 adev->wb.num_wb, 0, 2, 7, 0);
595 if ((offset + 1) < adev->wb.num_wb) {
596 __set_bit(offset, adev->wb.used);
597 __set_bit(offset + 1, adev->wb.used);
598 *wb = offset;
599 return 0;
600 } else {
601 return -EINVAL;
602 }
603}
604
605/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400606 * amdgpu_wb_free - Free a wb entry
607 *
608 * @adev: amdgpu_device pointer
609 * @wb: wb index
610 *
611 * Free a wb slot allocated for use by the driver (all asics)
612 */
613void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
614{
615 if (wb < adev->wb.num_wb)
616 __clear_bit(wb, adev->wb.used);
617}
618
619/**
Ken Wang70142852016-03-18 15:08:49 +0800620 * amdgpu_wb_free_64bit - Free a wb entry
621 *
622 * @adev: amdgpu_device pointer
623 * @wb: wb index
624 *
625 * Free a wb slot allocated for use by the driver (all asics)
626 */
627void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
628{
629 if ((wb + 1) < adev->wb.num_wb) {
630 __clear_bit(wb, adev->wb.used);
631 __clear_bit(wb + 1, adev->wb.used);
632 }
633}
634
635/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400636 * amdgpu_vram_location - try to find VRAM location
637 * @adev: amdgpu device structure holding all necessary informations
638 * @mc: memory controller structure holding memory informations
639 * @base: base address at which to put VRAM
640 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400641 * Function will try to place VRAM at base address provided
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400642 * as parameter (which is so far either PCI aperture address or
643 * for IGP TOM base address).
644 *
645 * If there is not enough space to fit the unvisible VRAM in the 32bits
646 * address space then we limit the VRAM size to the aperture.
647 *
648 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
649 * this shouldn't be a problem as we are using the PCI aperture as a reference.
650 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
651 * not IGP.
652 *
653 * Note: we use mc_vram_size as on some board we need to program the mc to
654 * cover the whole aperture even if VRAM size is inferior to aperture size
655 * Novell bug 204882 + along with lots of ubuntu ones
656 *
657 * Note: when limiting vram it's safe to overwritte real_vram_size because
658 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
659 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
660 * ones)
661 *
662 * Note: IGP TOM addr should be the same as the aperture addr, we don't
Alex Xie455a7bc2017-05-08 21:36:03 -0400663 * explicitly check for that though.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400664 *
665 * FIXME: when reducing VRAM size align new size on power of 2.
666 */
667void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
668{
669 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
670
671 mc->vram_start = base;
672 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
673 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
674 mc->real_vram_size = mc->aper_size;
675 mc->mc_vram_size = mc->aper_size;
676 }
677 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
678 if (limit && limit < mc->real_vram_size)
679 mc->real_vram_size = limit;
680 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
681 mc->mc_vram_size >> 20, mc->vram_start,
682 mc->vram_end, mc->real_vram_size >> 20);
683}
684
685/**
Christian König6f02a692017-07-07 11:56:59 +0200686 * amdgpu_gart_location - try to find GTT location
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400687 * @adev: amdgpu device structure holding all necessary informations
688 * @mc: memory controller structure holding memory informations
689 *
690 * Function will place try to place GTT before or after VRAM.
691 *
692 * If GTT size is bigger than space left then we ajust GTT size.
693 * Thus function will never fails.
694 *
695 * FIXME: when reducing GTT size align new size on power of 2.
696 */
Christian König6f02a692017-07-07 11:56:59 +0200697void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400698{
699 u64 size_af, size_bf;
700
Christian Königed21c042017-07-06 22:26:05 +0200701 size_af = adev->mc.mc_mask - mc->vram_end;
702 size_bf = mc->vram_start;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400703 if (size_bf > size_af) {
Christian König6f02a692017-07-07 11:56:59 +0200704 if (mc->gart_size > size_bf) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400705 dev_warn(adev->dev, "limiting GTT\n");
Christian König6f02a692017-07-07 11:56:59 +0200706 mc->gart_size = size_bf;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400707 }
Christian König6f02a692017-07-07 11:56:59 +0200708 mc->gart_start = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400709 } else {
Christian König6f02a692017-07-07 11:56:59 +0200710 if (mc->gart_size > size_af) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400711 dev_warn(adev->dev, "limiting GTT\n");
Christian König6f02a692017-07-07 11:56:59 +0200712 mc->gart_size = size_af;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400713 }
Christian König6f02a692017-07-07 11:56:59 +0200714 mc->gart_start = mc->vram_end + 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400715 }
Christian König6f02a692017-07-07 11:56:59 +0200716 mc->gart_end = mc->gart_start + mc->gart_size - 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400717 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
Christian König6f02a692017-07-07 11:56:59 +0200718 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400719}
720
721/*
722 * GPU helpers function.
723 */
724/**
Jim Quc836fec2017-02-10 15:59:59 +0800725 * amdgpu_need_post - check if the hw need post or not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400726 *
727 * @adev: amdgpu_device pointer
728 *
Jim Quc836fec2017-02-10 15:59:59 +0800729 * Check if the asic has been initialized (all asics) at driver startup
730 * or post is needed if hw reset is performed.
731 * Returns true if need or false if not.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400732 */
Jim Quc836fec2017-02-10 15:59:59 +0800733bool amdgpu_need_post(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400734{
735 uint32_t reg;
736
Jim Quc836fec2017-02-10 15:59:59 +0800737 if (adev->has_hw_reset) {
738 adev->has_hw_reset = false;
739 return true;
740 }
Alex Deucher70d17a22017-06-30 17:26:47 -0400741
742 /* bios scratch used on CIK+ */
743 if (adev->asic_type >= CHIP_BONAIRE)
744 return amdgpu_atombios_scratch_need_asic_init(adev);
745
746 /* check MEM_SIZE for older asics */
Alex Deucherbbf282d2017-03-03 17:26:10 -0500747 reg = amdgpu_asic_get_config_memsize(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400748
Alex Deucherf2713e82017-03-28 12:19:31 -0400749 if ((reg != 0) && (reg != 0xffffffff))
Jim Quc836fec2017-02-10 15:59:59 +0800750 return false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400751
Jim Quc836fec2017-02-10 15:59:59 +0800752 return true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400753
754}
755
Monk Liubec86372016-09-14 19:38:08 +0800756static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
757{
758 if (amdgpu_sriov_vf(adev))
759 return false;
760
761 if (amdgpu_passthrough(adev)) {
Monk Liu1da2c322016-11-11 11:24:29 +0800762 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
763 * some old smc fw still need driver do vPost otherwise gpu hang, while
764 * those smc fw version above 22.15 doesn't have this flaw, so we force
765 * vpost executed for smc version below 22.15
Monk Liubec86372016-09-14 19:38:08 +0800766 */
767 if (adev->asic_type == CHIP_FIJI) {
768 int err;
769 uint32_t fw_ver;
770 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
771 /* force vPost if error occured */
772 if (err)
773 return true;
774
775 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
Monk Liu1da2c322016-11-11 11:24:29 +0800776 if (fw_ver < 0x00160e00)
777 return true;
Monk Liubec86372016-09-14 19:38:08 +0800778 }
Monk Liubec86372016-09-14 19:38:08 +0800779 }
Jim Quc836fec2017-02-10 15:59:59 +0800780 return amdgpu_need_post(adev);
Monk Liubec86372016-09-14 19:38:08 +0800781}
782
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400783/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400784 * amdgpu_dummy_page_init - init dummy page used by the driver
785 *
786 * @adev: amdgpu_device pointer
787 *
788 * Allocate the dummy page used by the driver (all asics).
789 * This dummy page is used by the driver as a filler for gart entries
790 * when pages are taken out of the GART
791 * Returns 0 on sucess, -ENOMEM on failure.
792 */
793int amdgpu_dummy_page_init(struct amdgpu_device *adev)
794{
795 if (adev->dummy_page.page)
796 return 0;
797 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
798 if (adev->dummy_page.page == NULL)
799 return -ENOMEM;
800 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
801 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
802 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
803 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
804 __free_page(adev->dummy_page.page);
805 adev->dummy_page.page = NULL;
806 return -ENOMEM;
807 }
808 return 0;
809}
810
811/**
812 * amdgpu_dummy_page_fini - free dummy page used by the driver
813 *
814 * @adev: amdgpu_device pointer
815 *
816 * Frees the dummy page used by the driver (all asics).
817 */
818void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
819{
820 if (adev->dummy_page.page == NULL)
821 return;
822 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
823 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
824 __free_page(adev->dummy_page.page);
825 adev->dummy_page.page = NULL;
826}
827
828
829/* ATOM accessor methods */
830/*
831 * ATOM is an interpreted byte code stored in tables in the vbios. The
832 * driver registers callbacks to access registers and the interpreter
833 * in the driver parses the tables and executes then to program specific
834 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
835 * atombios.h, and atom.c
836 */
837
838/**
839 * cail_pll_read - read PLL register
840 *
841 * @info: atom card_info pointer
842 * @reg: PLL register offset
843 *
844 * Provides a PLL register accessor for the atom interpreter (r4xx+).
845 * Returns the value of the PLL register.
846 */
847static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
848{
849 return 0;
850}
851
852/**
853 * cail_pll_write - write PLL register
854 *
855 * @info: atom card_info pointer
856 * @reg: PLL register offset
857 * @val: value to write to the pll register
858 *
859 * Provides a PLL register accessor for the atom interpreter (r4xx+).
860 */
861static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
862{
863
864}
865
866/**
867 * cail_mc_read - read MC (Memory Controller) register
868 *
869 * @info: atom card_info pointer
870 * @reg: MC register offset
871 *
872 * Provides an MC register accessor for the atom interpreter (r4xx+).
873 * Returns the value of the MC register.
874 */
875static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
876{
877 return 0;
878}
879
880/**
881 * cail_mc_write - write MC (Memory Controller) register
882 *
883 * @info: atom card_info pointer
884 * @reg: MC register offset
885 * @val: value to write to the pll register
886 *
887 * Provides a MC register accessor for the atom interpreter (r4xx+).
888 */
889static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
890{
891
892}
893
894/**
895 * cail_reg_write - write MMIO register
896 *
897 * @info: atom card_info pointer
898 * @reg: MMIO register offset
899 * @val: value to write to the pll register
900 *
901 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
902 */
903static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
904{
905 struct amdgpu_device *adev = info->dev->dev_private;
906
907 WREG32(reg, val);
908}
909
910/**
911 * cail_reg_read - read MMIO register
912 *
913 * @info: atom card_info pointer
914 * @reg: MMIO register offset
915 *
916 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
917 * Returns the value of the MMIO register.
918 */
919static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
920{
921 struct amdgpu_device *adev = info->dev->dev_private;
922 uint32_t r;
923
924 r = RREG32(reg);
925 return r;
926}
927
928/**
929 * cail_ioreg_write - write IO register
930 *
931 * @info: atom card_info pointer
932 * @reg: IO register offset
933 * @val: value to write to the pll register
934 *
935 * Provides a IO register accessor for the atom interpreter (r4xx+).
936 */
937static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
938{
939 struct amdgpu_device *adev = info->dev->dev_private;
940
941 WREG32_IO(reg, val);
942}
943
944/**
945 * cail_ioreg_read - read IO register
946 *
947 * @info: atom card_info pointer
948 * @reg: IO register offset
949 *
950 * Provides an IO register accessor for the atom interpreter (r4xx+).
951 * Returns the value of the IO register.
952 */
953static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
954{
955 struct amdgpu_device *adev = info->dev->dev_private;
956 uint32_t r;
957
958 r = RREG32_IO(reg);
959 return r;
960}
961
962/**
963 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
964 *
965 * @adev: amdgpu_device pointer
966 *
967 * Frees the driver info and register access callbacks for the ATOM
968 * interpreter (r4xx+).
969 * Called at driver shutdown.
970 */
971static void amdgpu_atombios_fini(struct amdgpu_device *adev)
972{
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800973 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400974 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800975 kfree(adev->mode_info.atom_context->iio);
976 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400977 kfree(adev->mode_info.atom_context);
978 adev->mode_info.atom_context = NULL;
979 kfree(adev->mode_info.atom_card_info);
980 adev->mode_info.atom_card_info = NULL;
981}
982
983/**
984 * amdgpu_atombios_init - init the driver info and callbacks for atombios
985 *
986 * @adev: amdgpu_device pointer
987 *
988 * Initializes the driver info and register access callbacks for the
989 * ATOM interpreter (r4xx+).
990 * Returns 0 on sucess, -ENOMEM on failure.
991 * Called at driver startup.
992 */
993static int amdgpu_atombios_init(struct amdgpu_device *adev)
994{
995 struct card_info *atom_card_info =
996 kzalloc(sizeof(struct card_info), GFP_KERNEL);
997
998 if (!atom_card_info)
999 return -ENOMEM;
1000
1001 adev->mode_info.atom_card_info = atom_card_info;
1002 atom_card_info->dev = adev->ddev;
1003 atom_card_info->reg_read = cail_reg_read;
1004 atom_card_info->reg_write = cail_reg_write;
1005 /* needed for iio ops */
1006 if (adev->rio_mem) {
1007 atom_card_info->ioreg_read = cail_ioreg_read;
1008 atom_card_info->ioreg_write = cail_ioreg_write;
1009 } else {
Amber Linb64a18c2017-01-04 08:06:58 -05001010 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001011 atom_card_info->ioreg_read = cail_reg_read;
1012 atom_card_info->ioreg_write = cail_reg_write;
1013 }
1014 atom_card_info->mc_read = cail_mc_read;
1015 atom_card_info->mc_write = cail_mc_write;
1016 atom_card_info->pll_read = cail_pll_read;
1017 atom_card_info->pll_write = cail_pll_write;
1018
1019 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
1020 if (!adev->mode_info.atom_context) {
1021 amdgpu_atombios_fini(adev);
1022 return -ENOMEM;
1023 }
1024
1025 mutex_init(&adev->mode_info.atom_context->mutex);
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001026 if (adev->is_atom_fw) {
1027 amdgpu_atomfirmware_scratch_regs_init(adev);
1028 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1029 } else {
1030 amdgpu_atombios_scratch_regs_init(adev);
1031 amdgpu_atombios_allocate_fb_scratch(adev);
1032 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001033 return 0;
1034}
1035
1036/* if we get transitioned to only one device, take VGA back */
1037/**
1038 * amdgpu_vga_set_decode - enable/disable vga decode
1039 *
1040 * @cookie: amdgpu_device pointer
1041 * @state: enable/disable vga decode
1042 *
1043 * Enable/disable vga decode (all asics).
1044 * Returns VGA resource flags.
1045 */
1046static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1047{
1048 struct amdgpu_device *adev = cookie;
1049 amdgpu_asic_set_vga_state(adev, state);
1050 if (state)
1051 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1052 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1053 else
1054 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1055}
1056
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001057static void amdgpu_check_block_size(struct amdgpu_device *adev)
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001058{
1059 /* defines number of bits in page table versus page directory,
1060 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1061 * page table and the remaining bits are in the page directory */
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001062 if (amdgpu_vm_block_size == -1)
1063 return;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001064
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001065 if (amdgpu_vm_block_size < 9) {
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001066 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1067 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001068 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001069 }
1070
1071 if (amdgpu_vm_block_size > 24 ||
1072 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1073 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1074 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001075 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001076 }
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001077
1078 return;
1079
1080def_value:
1081 amdgpu_vm_block_size = -1;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001082}
1083
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001084static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1085{
Alex Deucher64dab072017-06-15 18:20:09 -04001086 /* no need to check the default value */
1087 if (amdgpu_vm_size == -1)
1088 return;
1089
Alex Deucher76117502017-06-21 12:31:41 -04001090 if (!is_power_of_2(amdgpu_vm_size)) {
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001091 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1092 amdgpu_vm_size);
1093 goto def_value;
1094 }
1095
1096 if (amdgpu_vm_size < 1) {
1097 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1098 amdgpu_vm_size);
1099 goto def_value;
1100 }
1101
1102 /*
1103 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1104 */
1105 if (amdgpu_vm_size > 1024) {
1106 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1107 amdgpu_vm_size);
1108 goto def_value;
1109 }
1110
1111 return;
1112
1113def_value:
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001114 amdgpu_vm_size = -1;
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001115}
1116
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001117/**
1118 * amdgpu_check_arguments - validate module params
1119 *
1120 * @adev: amdgpu_device pointer
1121 *
1122 * Validates certain module parameters and updates
1123 * the associated values used by the driver (all asics).
1124 */
1125static void amdgpu_check_arguments(struct amdgpu_device *adev)
1126{
Chunming Zhou5b011232015-12-10 17:34:33 +08001127 if (amdgpu_sched_jobs < 4) {
1128 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1129 amdgpu_sched_jobs);
1130 amdgpu_sched_jobs = 4;
Alex Deucher76117502017-06-21 12:31:41 -04001131 } else if (!is_power_of_2(amdgpu_sched_jobs)){
Chunming Zhou5b011232015-12-10 17:34:33 +08001132 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1133 amdgpu_sched_jobs);
1134 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1135 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001136
Christian Königf9321cc2017-07-07 13:44:05 +02001137 if (amdgpu_gart_size < 32) {
1138 /* gart size must be greater or equal to 32M */
1139 dev_warn(adev->dev, "gart size (%d) too small\n",
1140 amdgpu_gart_size);
1141 amdgpu_gart_size = 32;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001142 }
1143
Christian König36d38372017-07-07 13:17:45 +02001144 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1145 /* gtt size must be greater or equal to 32M */
1146 dev_warn(adev->dev, "gtt size (%d) too small\n",
1147 amdgpu_gtt_size);
1148 amdgpu_gtt_size = -1;
1149 }
1150
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001151 amdgpu_check_vm_size(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001152
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001153 amdgpu_check_block_size(adev);
Christian König6a7f76e2016-08-24 15:51:49 +02001154
jimqu526bae32016-11-07 09:53:10 +08001155 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
Alex Deucher76117502017-06-21 12:31:41 -04001156 !is_power_of_2(amdgpu_vram_page_split))) {
Christian König6a7f76e2016-08-24 15:51:49 +02001157 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1158 amdgpu_vram_page_split);
1159 amdgpu_vram_page_split = 1024;
1160 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001161}
1162
1163/**
1164 * amdgpu_switcheroo_set_state - set switcheroo state
1165 *
1166 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001167 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001168 *
1169 * Callback for the switcheroo driver. Suspends or resumes the
1170 * the asics before or after it is powered up using ACPI methods.
1171 */
1172static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1173{
1174 struct drm_device *dev = pci_get_drvdata(pdev);
1175
1176 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1177 return;
1178
1179 if (state == VGA_SWITCHEROO_ON) {
1180 unsigned d3_delay = dev->pdev->d3_delay;
1181
Joe Perches7ca85292017-02-28 04:55:52 -08001182 pr_info("amdgpu: switched on\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001183 /* don't suspend or resume card normally */
1184 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1185
Alex Deucher810ddc32016-08-23 13:25:49 -04001186 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001187
1188 dev->pdev->d3_delay = d3_delay;
1189
1190 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1191 drm_kms_helper_poll_enable(dev);
1192 } else {
Joe Perches7ca85292017-02-28 04:55:52 -08001193 pr_info("amdgpu: switched off\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001194 drm_kms_helper_poll_disable(dev);
1195 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001196 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001197 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1198 }
1199}
1200
1201/**
1202 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1203 *
1204 * @pdev: pci dev pointer
1205 *
1206 * Callback for the switcheroo driver. Check of the switcheroo
1207 * state can be changed.
1208 * Returns true if the state can be changed, false if not.
1209 */
1210static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1211{
1212 struct drm_device *dev = pci_get_drvdata(pdev);
1213
1214 /*
1215 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1216 * locking inversion with the driver load path. And the access here is
1217 * completely racy anyway. So don't bother with locking for now.
1218 */
1219 return dev->open_count == 0;
1220}
1221
1222static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1223 .set_gpu_state = amdgpu_switcheroo_set_state,
1224 .reprobe = NULL,
1225 .can_switch = amdgpu_switcheroo_can_switch,
1226};
1227
1228int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001229 enum amd_ip_block_type block_type,
1230 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001231{
1232 int i, r = 0;
1233
1234 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001235 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001236 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001237 if (adev->ip_blocks[i].version->type != block_type)
1238 continue;
1239 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1240 continue;
1241 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1242 (void *)adev, state);
1243 if (r)
1244 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1245 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001246 }
1247 return r;
1248}
1249
1250int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001251 enum amd_ip_block_type block_type,
1252 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001253{
1254 int i, r = 0;
1255
1256 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001257 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001258 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001259 if (adev->ip_blocks[i].version->type != block_type)
1260 continue;
1261 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1262 continue;
1263 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1264 (void *)adev, state);
1265 if (r)
1266 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1267 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001268 }
1269 return r;
1270}
1271
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001272void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1273{
1274 int i;
1275
1276 for (i = 0; i < adev->num_ip_blocks; i++) {
1277 if (!adev->ip_blocks[i].status.valid)
1278 continue;
1279 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1280 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1281 }
1282}
1283
Alex Deucher5dbbb602016-06-23 11:41:04 -04001284int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1285 enum amd_ip_block_type block_type)
1286{
1287 int i, r;
1288
1289 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001290 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001291 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001292 if (adev->ip_blocks[i].version->type == block_type) {
1293 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001294 if (r)
1295 return r;
1296 break;
1297 }
1298 }
1299 return 0;
1300
1301}
1302
1303bool amdgpu_is_idle(struct amdgpu_device *adev,
1304 enum amd_ip_block_type block_type)
1305{
1306 int i;
1307
1308 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001309 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001310 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001311 if (adev->ip_blocks[i].version->type == block_type)
1312 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001313 }
1314 return true;
1315
1316}
1317
Alex Deuchera1255102016-10-13 17:41:13 -04001318struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1319 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001320{
1321 int i;
1322
1323 for (i = 0; i < adev->num_ip_blocks; i++)
Alex Deuchera1255102016-10-13 17:41:13 -04001324 if (adev->ip_blocks[i].version->type == type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001325 return &adev->ip_blocks[i];
1326
1327 return NULL;
1328}
1329
1330/**
1331 * amdgpu_ip_block_version_cmp
1332 *
1333 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001334 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001335 * @major: major version
1336 * @minor: minor version
1337 *
1338 * return 0 if equal or greater
1339 * return 1 if smaller or the ip_block doesn't exist
1340 */
1341int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001342 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001343 u32 major, u32 minor)
1344{
Alex Deuchera1255102016-10-13 17:41:13 -04001345 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001346
Alex Deuchera1255102016-10-13 17:41:13 -04001347 if (ip_block && ((ip_block->version->major > major) ||
1348 ((ip_block->version->major == major) &&
1349 (ip_block->version->minor >= minor))))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001350 return 0;
1351
1352 return 1;
1353}
1354
Alex Deuchera1255102016-10-13 17:41:13 -04001355/**
1356 * amdgpu_ip_block_add
1357 *
1358 * @adev: amdgpu_device pointer
1359 * @ip_block_version: pointer to the IP to add
1360 *
1361 * Adds the IP block driver information to the collection of IPs
1362 * on the asic.
1363 */
1364int amdgpu_ip_block_add(struct amdgpu_device *adev,
1365 const struct amdgpu_ip_block_version *ip_block_version)
1366{
1367 if (!ip_block_version)
1368 return -EINVAL;
1369
Huang Ruia0bae352017-05-03 09:52:06 +08001370 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1371 ip_block_version->funcs->name);
1372
Alex Deuchera1255102016-10-13 17:41:13 -04001373 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1374
1375 return 0;
1376}
1377
Alex Deucher483ef982016-09-30 12:43:04 -04001378static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
Emily Deng9accf2f2016-08-10 16:01:25 +08001379{
1380 adev->enable_virtual_display = false;
1381
1382 if (amdgpu_virtual_display) {
1383 struct drm_device *ddev = adev->ddev;
1384 const char *pci_address_name = pci_name(ddev->pdev);
Emily Deng0f663562016-09-30 13:02:18 -04001385 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
Emily Deng9accf2f2016-08-10 16:01:25 +08001386
1387 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1388 pciaddstr_tmp = pciaddstr;
Emily Deng0f663562016-09-30 13:02:18 -04001389 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1390 pciaddname = strsep(&pciaddname_tmp, ",");
Yintian Tao967de2a2017-01-22 15:16:51 +08001391 if (!strcmp("all", pciaddname)
1392 || !strcmp(pci_address_name, pciaddname)) {
Emily Deng0f663562016-09-30 13:02:18 -04001393 long num_crtc;
1394 int res = -1;
1395
Emily Deng9accf2f2016-08-10 16:01:25 +08001396 adev->enable_virtual_display = true;
Emily Deng0f663562016-09-30 13:02:18 -04001397
1398 if (pciaddname_tmp)
1399 res = kstrtol(pciaddname_tmp, 10,
1400 &num_crtc);
1401
1402 if (!res) {
1403 if (num_crtc < 1)
1404 num_crtc = 1;
1405 if (num_crtc > 6)
1406 num_crtc = 6;
1407 adev->mode_info.num_crtc = num_crtc;
1408 } else {
1409 adev->mode_info.num_crtc = 1;
1410 }
Emily Deng9accf2f2016-08-10 16:01:25 +08001411 break;
1412 }
1413 }
1414
Emily Deng0f663562016-09-30 13:02:18 -04001415 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1416 amdgpu_virtual_display, pci_address_name,
1417 adev->enable_virtual_display, adev->mode_info.num_crtc);
Emily Deng9accf2f2016-08-10 16:01:25 +08001418
1419 kfree(pciaddstr);
1420 }
1421}
1422
Alex Deuchere2a75f82017-04-27 16:58:01 -04001423static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1424{
Alex Deuchere2a75f82017-04-27 16:58:01 -04001425 const char *chip_name;
1426 char fw_name[30];
1427 int err;
1428 const struct gpu_info_firmware_header_v1_0 *hdr;
1429
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001430 adev->firmware.gpu_info_fw = NULL;
1431
Alex Deuchere2a75f82017-04-27 16:58:01 -04001432 switch (adev->asic_type) {
1433 case CHIP_TOPAZ:
1434 case CHIP_TONGA:
1435 case CHIP_FIJI:
1436 case CHIP_POLARIS11:
1437 case CHIP_POLARIS10:
1438 case CHIP_POLARIS12:
1439 case CHIP_CARRIZO:
1440 case CHIP_STONEY:
1441#ifdef CONFIG_DRM_AMDGPU_SI
1442 case CHIP_VERDE:
1443 case CHIP_TAHITI:
1444 case CHIP_PITCAIRN:
1445 case CHIP_OLAND:
1446 case CHIP_HAINAN:
1447#endif
1448#ifdef CONFIG_DRM_AMDGPU_CIK
1449 case CHIP_BONAIRE:
1450 case CHIP_HAWAII:
1451 case CHIP_KAVERI:
1452 case CHIP_KABINI:
1453 case CHIP_MULLINS:
1454#endif
1455 default:
1456 return 0;
1457 case CHIP_VEGA10:
1458 chip_name = "vega10";
1459 break;
Alex Deucher2d2e5e72017-05-09 12:27:35 -04001460 case CHIP_RAVEN:
1461 chip_name = "raven";
1462 break;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001463 }
1464
1465 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001466 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001467 if (err) {
1468 dev_err(adev->dev,
1469 "Failed to load gpu_info firmware \"%s\"\n",
1470 fw_name);
1471 goto out;
1472 }
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001473 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001474 if (err) {
1475 dev_err(adev->dev,
1476 "Failed to validate gpu_info firmware \"%s\"\n",
1477 fw_name);
1478 goto out;
1479 }
1480
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001481 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001482 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1483
1484 switch (hdr->version_major) {
1485 case 1:
1486 {
1487 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001488 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
Alex Deuchere2a75f82017-04-27 16:58:01 -04001489 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1490
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001491 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1492 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1493 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1494 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001495 adev->gfx.config.max_texture_channel_caches =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001496 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1497 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1498 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1499 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1500 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001501 adev->gfx.config.double_offchip_lds_buf =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001502 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1503 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
Hawking Zhang51fd0372017-06-09 22:30:52 +08001504 adev->gfx.cu_info.max_waves_per_simd =
1505 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1506 adev->gfx.cu_info.max_scratch_slots_per_cu =
1507 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1508 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001509 break;
1510 }
1511 default:
1512 dev_err(adev->dev,
1513 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1514 err = -EINVAL;
1515 goto out;
1516 }
1517out:
Alex Deuchere2a75f82017-04-27 16:58:01 -04001518 return err;
1519}
1520
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001521static int amdgpu_early_init(struct amdgpu_device *adev)
1522{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001523 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001524
Alex Deucher483ef982016-09-30 12:43:04 -04001525 amdgpu_device_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001526
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001527 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001528 case CHIP_TOPAZ:
1529 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001530 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001531 case CHIP_POLARIS11:
1532 case CHIP_POLARIS10:
Junwei Zhangc4642a42016-12-14 15:32:28 -05001533 case CHIP_POLARIS12:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001534 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001535 case CHIP_STONEY:
1536 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001537 adev->family = AMDGPU_FAMILY_CZ;
1538 else
1539 adev->family = AMDGPU_FAMILY_VI;
1540
1541 r = vi_set_ip_blocks(adev);
1542 if (r)
1543 return r;
1544 break;
Ken Wang33f34802016-01-21 17:29:41 +08001545#ifdef CONFIG_DRM_AMDGPU_SI
1546 case CHIP_VERDE:
1547 case CHIP_TAHITI:
1548 case CHIP_PITCAIRN:
1549 case CHIP_OLAND:
1550 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001551 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001552 r = si_set_ip_blocks(adev);
1553 if (r)
1554 return r;
1555 break;
1556#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001557#ifdef CONFIG_DRM_AMDGPU_CIK
1558 case CHIP_BONAIRE:
1559 case CHIP_HAWAII:
1560 case CHIP_KAVERI:
1561 case CHIP_KABINI:
1562 case CHIP_MULLINS:
1563 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1564 adev->family = AMDGPU_FAMILY_CI;
1565 else
1566 adev->family = AMDGPU_FAMILY_KV;
1567
1568 r = cik_set_ip_blocks(adev);
1569 if (r)
1570 return r;
1571 break;
1572#endif
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +08001573 case CHIP_VEGA10:
1574 case CHIP_RAVEN:
1575 if (adev->asic_type == CHIP_RAVEN)
1576 adev->family = AMDGPU_FAMILY_RV;
1577 else
1578 adev->family = AMDGPU_FAMILY_AI;
Ken Wang460826e2017-03-06 14:53:16 -05001579
1580 r = soc15_set_ip_blocks(adev);
1581 if (r)
1582 return r;
1583 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001584 default:
1585 /* FIXME: not supported yet */
1586 return -EINVAL;
1587 }
1588
Alex Deuchere2a75f82017-04-27 16:58:01 -04001589 r = amdgpu_device_parse_gpu_info_fw(adev);
1590 if (r)
1591 return r;
1592
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001593 if (amdgpu_sriov_vf(adev)) {
1594 r = amdgpu_virt_request_full_gpu(adev, true);
1595 if (r)
1596 return r;
1597 }
1598
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001599 for (i = 0; i < adev->num_ip_blocks; i++) {
1600 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
Huang Ruied8cf002017-05-03 09:40:17 +08001601 DRM_ERROR("disabled ip block: %d <%s>\n",
1602 i, adev->ip_blocks[i].version->funcs->name);
Alex Deuchera1255102016-10-13 17:41:13 -04001603 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001604 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001605 if (adev->ip_blocks[i].version->funcs->early_init) {
1606 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001607 if (r == -ENOENT) {
Alex Deuchera1255102016-10-13 17:41:13 -04001608 adev->ip_blocks[i].status.valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001609 } else if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001610 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1611 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001612 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001613 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001614 adev->ip_blocks[i].status.valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001615 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001616 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001617 adev->ip_blocks[i].status.valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001618 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001619 }
1620 }
1621
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001622 adev->cg_flags &= amdgpu_cg_mask;
1623 adev->pg_flags &= amdgpu_pg_mask;
1624
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001625 return 0;
1626}
1627
1628static int amdgpu_init(struct amdgpu_device *adev)
1629{
1630 int i, r;
1631
1632 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001633 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001634 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001635 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001636 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001637 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1638 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001639 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001640 }
Alex Deuchera1255102016-10-13 17:41:13 -04001641 adev->ip_blocks[i].status.sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001642 /* need to do gmc hw init early so we can allocate gpu mem */
Alex Deuchera1255102016-10-13 17:41:13 -04001643 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001644 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001645 if (r) {
1646 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001647 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001648 }
Alex Deuchera1255102016-10-13 17:41:13 -04001649 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001650 if (r) {
1651 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001652 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001653 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001654 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001655 if (r) {
1656 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001657 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001658 }
Alex Deuchera1255102016-10-13 17:41:13 -04001659 adev->ip_blocks[i].status.hw = true;
Monk Liu24936642017-01-09 15:54:32 +08001660
1661 /* right after GMC hw init, we create CSA */
1662 if (amdgpu_sriov_vf(adev)) {
1663 r = amdgpu_allocate_static_csa(adev);
1664 if (r) {
1665 DRM_ERROR("allocate CSA failed %d\n", r);
1666 return r;
1667 }
1668 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001669 }
1670 }
1671
1672 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001673 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001674 continue;
1675 /* gmc hw init is done early */
Alex Deuchera1255102016-10-13 17:41:13 -04001676 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001677 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001678 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001679 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001680 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1681 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001682 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001683 }
Alex Deuchera1255102016-10-13 17:41:13 -04001684 adev->ip_blocks[i].status.hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001685 }
1686
1687 return 0;
1688}
1689
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001690static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1691{
1692 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1693}
1694
1695static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1696{
1697 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1698 AMDGPU_RESET_MAGIC_NUM);
1699}
1700
Shirish S2dc80b02017-05-25 10:05:25 +05301701static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
1702{
1703 int i = 0, r;
1704
1705 for (i = 0; i < adev->num_ip_blocks; i++) {
1706 if (!adev->ip_blocks[i].status.valid)
1707 continue;
1708 /* skip CG for VCE/UVD, it's handled specially */
1709 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1710 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1711 /* enable clockgating to save power */
1712 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1713 AMD_CG_STATE_GATE);
1714 if (r) {
1715 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1716 adev->ip_blocks[i].version->funcs->name, r);
1717 return r;
1718 }
1719 }
1720 }
1721 return 0;
1722}
1723
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001724static int amdgpu_late_init(struct amdgpu_device *adev)
1725{
1726 int i = 0, r;
1727
1728 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001729 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001730 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001731 if (adev->ip_blocks[i].version->funcs->late_init) {
1732 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001733 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001734 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1735 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001736 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001737 }
Alex Deuchera1255102016-10-13 17:41:13 -04001738 adev->ip_blocks[i].status.late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001739 }
1740 }
1741
Shirish S2dc80b02017-05-25 10:05:25 +05301742 mod_delayed_work(system_wq, &adev->late_init_work,
1743 msecs_to_jiffies(AMDGPU_RESUME_MS));
1744
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001745 amdgpu_fill_reset_magic(adev);
1746
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001747 return 0;
1748}
1749
1750static int amdgpu_fini(struct amdgpu_device *adev)
1751{
1752 int i, r;
1753
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001754 /* need to disable SMC first */
1755 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001756 if (!adev->ip_blocks[i].status.hw)
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001757 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001758 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001759 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
Alex Deuchera1255102016-10-13 17:41:13 -04001760 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1761 AMD_CG_STATE_UNGATE);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001762 if (r) {
1763 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001764 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001765 return r;
1766 }
Alex Deuchera1255102016-10-13 17:41:13 -04001767 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001768 /* XXX handle errors */
1769 if (r) {
1770 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001771 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001772 }
Alex Deuchera1255102016-10-13 17:41:13 -04001773 adev->ip_blocks[i].status.hw = false;
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001774 break;
1775 }
1776 }
1777
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001778 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001779 if (!adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001780 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001781 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001782 amdgpu_wb_fini(adev);
1783 amdgpu_vram_scratch_fini(adev);
1784 }
Rex Zhu8201a672016-11-24 21:44:44 +08001785
1786 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1787 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1788 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1789 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1790 AMD_CG_STATE_UNGATE);
1791 if (r) {
1792 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1793 adev->ip_blocks[i].version->funcs->name, r);
1794 return r;
1795 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001796 }
Rex Zhu8201a672016-11-24 21:44:44 +08001797
Alex Deuchera1255102016-10-13 17:41:13 -04001798 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001799 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001800 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001801 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1802 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001803 }
Rex Zhu8201a672016-11-24 21:44:44 +08001804
Alex Deuchera1255102016-10-13 17:41:13 -04001805 adev->ip_blocks[i].status.hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001806 }
1807
1808 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001809 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001810 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001811 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001812 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001813 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001814 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1815 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001816 }
Alex Deuchera1255102016-10-13 17:41:13 -04001817 adev->ip_blocks[i].status.sw = false;
1818 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001819 }
1820
Monk Liua6dcfd92016-05-19 14:36:34 +08001821 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001822 if (!adev->ip_blocks[i].status.late_initialized)
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001823 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001824 if (adev->ip_blocks[i].version->funcs->late_fini)
1825 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1826 adev->ip_blocks[i].status.late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001827 }
1828
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001829 if (amdgpu_sriov_vf(adev)) {
Monk Liu24936642017-01-09 15:54:32 +08001830 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001831 amdgpu_virt_release_full_gpu(adev, false);
1832 }
Monk Liu24936642017-01-09 15:54:32 +08001833
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001834 return 0;
1835}
1836
Shirish S2dc80b02017-05-25 10:05:25 +05301837static void amdgpu_late_init_func_handler(struct work_struct *work)
1838{
1839 struct amdgpu_device *adev =
1840 container_of(work, struct amdgpu_device, late_init_work.work);
1841 amdgpu_late_set_cg_state(adev);
1842}
1843
Alex Deucherfaefba92016-12-06 10:38:29 -05001844int amdgpu_suspend(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001845{
1846 int i, r;
1847
Xiangliang Yue941ea92017-01-18 12:47:55 +08001848 if (amdgpu_sriov_vf(adev))
1849 amdgpu_virt_request_full_gpu(adev, false);
1850
Flora Cuic5a93a22016-02-26 10:45:25 +08001851 /* ungate SMC block first */
1852 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1853 AMD_CG_STATE_UNGATE);
1854 if (r) {
1855 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1856 }
1857
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001858 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001859 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001860 continue;
1861 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001862 if (i != AMD_IP_BLOCK_TYPE_SMC) {
Alex Deuchera1255102016-10-13 17:41:13 -04001863 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1864 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001865 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001866 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1867 adev->ip_blocks[i].version->funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001868 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001869 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001870 /* XXX handle errors */
Alex Deuchera1255102016-10-13 17:41:13 -04001871 r = adev->ip_blocks[i].version->funcs->suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001872 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001873 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001874 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1875 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001876 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001877 }
1878
Xiangliang Yue941ea92017-01-18 12:47:55 +08001879 if (amdgpu_sriov_vf(adev))
1880 amdgpu_virt_release_full_gpu(adev, false);
1881
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001882 return 0;
1883}
1884
Monk Liue4f0fdc2017-02-09 11:55:49 +08001885static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001886{
1887 int i, r;
1888
Monk Liu2cb681b2017-04-26 12:00:49 +08001889 static enum amd_ip_block_type ip_order[] = {
1890 AMD_IP_BLOCK_TYPE_GMC,
1891 AMD_IP_BLOCK_TYPE_COMMON,
Monk Liu2cb681b2017-04-26 12:00:49 +08001892 AMD_IP_BLOCK_TYPE_IH,
1893 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001894
Monk Liu2cb681b2017-04-26 12:00:49 +08001895 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1896 int j;
1897 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001898
Monk Liu2cb681b2017-04-26 12:00:49 +08001899 for (j = 0; j < adev->num_ip_blocks; j++) {
1900 block = &adev->ip_blocks[j];
1901
1902 if (block->version->type != ip_order[i] ||
1903 !block->status.valid)
1904 continue;
1905
1906 r = block->version->funcs->hw_init(adev);
1907 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001908 }
1909 }
1910
1911 return 0;
1912}
1913
Monk Liue4f0fdc2017-02-09 11:55:49 +08001914static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001915{
1916 int i, r;
1917
Monk Liu2cb681b2017-04-26 12:00:49 +08001918 static enum amd_ip_block_type ip_order[] = {
1919 AMD_IP_BLOCK_TYPE_SMC,
1920 AMD_IP_BLOCK_TYPE_DCE,
1921 AMD_IP_BLOCK_TYPE_GFX,
1922 AMD_IP_BLOCK_TYPE_SDMA,
1923 AMD_IP_BLOCK_TYPE_VCE,
1924 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001925
Monk Liu2cb681b2017-04-26 12:00:49 +08001926 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1927 int j;
1928 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001929
Monk Liu2cb681b2017-04-26 12:00:49 +08001930 for (j = 0; j < adev->num_ip_blocks; j++) {
1931 block = &adev->ip_blocks[j];
1932
1933 if (block->version->type != ip_order[i] ||
1934 !block->status.valid)
1935 continue;
1936
1937 r = block->version->funcs->hw_init(adev);
1938 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001939 }
1940 }
1941
1942 return 0;
1943}
1944
Chunming Zhoufcf06492017-05-05 10:33:33 +08001945static int amdgpu_resume_phase1(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001946{
1947 int i, r;
1948
1949 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001950 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001951 continue;
Chunming Zhoufcf06492017-05-05 10:33:33 +08001952 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1953 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1954 adev->ip_blocks[i].version->type ==
1955 AMD_IP_BLOCK_TYPE_IH) {
1956 r = adev->ip_blocks[i].version->funcs->resume(adev);
1957 if (r) {
1958 DRM_ERROR("resume of IP block <%s> failed %d\n",
1959 adev->ip_blocks[i].version->funcs->name, r);
1960 return r;
1961 }
1962 }
1963 }
1964
1965 return 0;
1966}
1967
1968static int amdgpu_resume_phase2(struct amdgpu_device *adev)
1969{
1970 int i, r;
1971
1972 for (i = 0; i < adev->num_ip_blocks; i++) {
1973 if (!adev->ip_blocks[i].status.valid)
1974 continue;
1975 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1976 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1977 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1978 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001979 r = adev->ip_blocks[i].version->funcs->resume(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001980 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001981 DRM_ERROR("resume of IP block <%s> failed %d\n",
1982 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001983 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001984 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001985 }
1986
1987 return 0;
1988}
1989
Chunming Zhoufcf06492017-05-05 10:33:33 +08001990static int amdgpu_resume(struct amdgpu_device *adev)
1991{
1992 int r;
1993
1994 r = amdgpu_resume_phase1(adev);
1995 if (r)
1996 return r;
1997 r = amdgpu_resume_phase2(adev);
1998
1999 return r;
2000}
2001
Monk Liu4e99a442016-03-31 13:26:59 +08002002static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04002003{
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002004 if (adev->is_atom_fw) {
2005 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2006 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2007 } else {
2008 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2009 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2010 }
Andres Rodriguez048765a2016-06-11 02:51:32 -04002011}
2012
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002013/**
2014 * amdgpu_device_init - initialize the driver
2015 *
2016 * @adev: amdgpu_device pointer
2017 * @pdev: drm dev pointer
2018 * @pdev: pci dev pointer
2019 * @flags: driver flags
2020 *
2021 * Initializes the driver info and hw (all asics).
2022 * Returns 0 for success or an error on failure.
2023 * Called at driver startup.
2024 */
2025int amdgpu_device_init(struct amdgpu_device *adev,
2026 struct drm_device *ddev,
2027 struct pci_dev *pdev,
2028 uint32_t flags)
2029{
2030 int r, i;
2031 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02002032 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002033
2034 adev->shutdown = false;
2035 adev->dev = &pdev->dev;
2036 adev->ddev = ddev;
2037 adev->pdev = pdev;
2038 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08002039 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002040 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
Christian König6f02a692017-07-07 11:56:59 +02002041 adev->mc.gart_size = 512 * 1024 * 1024;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002042 adev->accel_working = false;
2043 adev->num_rings = 0;
2044 adev->mman.buffer_funcs = NULL;
2045 adev->mman.buffer_funcs_ring = NULL;
2046 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01002047 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002048 adev->gart.gart_funcs = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002049 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002050
2051 adev->smc_rreg = &amdgpu_invalid_rreg;
2052 adev->smc_wreg = &amdgpu_invalid_wreg;
2053 adev->pcie_rreg = &amdgpu_invalid_rreg;
2054 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08002055 adev->pciep_rreg = &amdgpu_invalid_rreg;
2056 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002057 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2058 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2059 adev->didt_rreg = &amdgpu_invalid_rreg;
2060 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08002061 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2062 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002063 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2064 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2065
Rex Zhuccdbb202016-06-08 12:47:41 +08002066
Alex Deucher3e39ab92015-06-05 15:04:33 -04002067 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2068 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2069 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002070
2071 /* mutex initialization are all done here so we
2072 * can recall function without having locking issues */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002073 atomic_set(&adev->irq.ih.lock, 0);
Huang Rui0e5ca0d2017-03-03 18:37:23 -05002074 mutex_init(&adev->firmware.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002075 mutex_init(&adev->pm.mutex);
2076 mutex_init(&adev->gfx.gpu_clock_mutex);
2077 mutex_init(&adev->srbm_mutex);
2078 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002079 mutex_init(&adev->mn_lock);
2080 hash_init(adev->mn_hash);
2081
2082 amdgpu_check_arguments(adev);
2083
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002084 spin_lock_init(&adev->mmio_idx_lock);
2085 spin_lock_init(&adev->smc_idx_lock);
2086 spin_lock_init(&adev->pcie_idx_lock);
2087 spin_lock_init(&adev->uvd_ctx_idx_lock);
2088 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08002089 spin_lock_init(&adev->gc_cac_idx_lock);
Evan Quan16abb5d2017-07-04 09:21:50 +08002090 spin_lock_init(&adev->se_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002091 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02002092 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002093
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08002094 INIT_LIST_HEAD(&adev->shadow_list);
2095 mutex_init(&adev->shadow_list_lock);
2096
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002097 INIT_LIST_HEAD(&adev->gtt_list);
2098 spin_lock_init(&adev->gtt_list_lock);
2099
Andres Rodriguez795f2812017-03-06 16:27:55 -05002100 INIT_LIST_HEAD(&adev->ring_lru_list);
2101 spin_lock_init(&adev->ring_lru_list_lock);
2102
Shirish S2dc80b02017-05-25 10:05:25 +05302103 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2104
Alex Xie0fa49552017-06-08 14:58:05 -04002105 /* Registers mapping */
2106 /* TODO: block userspace mapping of io register */
Ken Wangda69c1612016-01-21 19:08:55 +08002107 if (adev->asic_type >= CHIP_BONAIRE) {
2108 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2109 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2110 } else {
2111 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2112 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2113 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002114
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002115 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2116 if (adev->rmmio == NULL) {
2117 return -ENOMEM;
2118 }
2119 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2120 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2121
Ken Wangda69c1612016-01-21 19:08:55 +08002122 if (adev->asic_type >= CHIP_BONAIRE)
2123 /* doorbell bar mapping */
2124 amdgpu_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002125
2126 /* io port mapping */
2127 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2128 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2129 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2130 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2131 break;
2132 }
2133 }
2134 if (adev->rio_mem == NULL)
Amber Linb64a18c2017-01-04 08:06:58 -05002135 DRM_INFO("PCI I/O BAR is not found.\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002136
2137 /* early init functions */
2138 r = amdgpu_early_init(adev);
2139 if (r)
2140 return r;
2141
2142 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2143 /* this will fail for cards that aren't VGA class devices, just
2144 * ignore it */
2145 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2146
2147 if (amdgpu_runtime_pm == 1)
2148 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04002149 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002150 runtime = true;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002151 if (!pci_is_thunderbolt_attached(adev->pdev))
2152 vga_switcheroo_register_client(adev->pdev,
2153 &amdgpu_switcheroo_ops, runtime);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002154 if (runtime)
2155 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2156
2157 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04002158 if (!amdgpu_get_bios(adev)) {
2159 r = -EINVAL;
2160 goto failed;
2161 }
Nils Wallméniusf7e9e9f2016-12-14 21:52:45 +01002162
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002163 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002164 if (r) {
2165 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002166 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002167 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002168 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002169
Monk Liu4e99a442016-03-31 13:26:59 +08002170 /* detect if we are with an SRIOV vbios */
2171 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04002172
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002173 /* Post card if necessary */
Monk Liubec86372016-09-14 19:38:08 +08002174 if (amdgpu_vpost_needed(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002175 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08002176 dev_err(adev->dev, "no vBIOS found\n");
Gavin Wan89041942017-06-23 13:55:15 -04002177 amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002178 r = -EINVAL;
2179 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002180 }
Monk Liubec86372016-09-14 19:38:08 +08002181 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08002182 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2183 if (r) {
2184 dev_err(adev->dev, "gpu post error!\n");
Gavin Wan89041942017-06-23 13:55:15 -04002185 amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
Monk Liu4e99a442016-03-31 13:26:59 +08002186 goto failed;
2187 }
2188 } else {
2189 DRM_INFO("GPU post is not needed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002190 }
2191
Alex Deucher88b64e92017-07-10 10:43:10 -04002192 if (adev->is_atom_fw) {
2193 /* Initialize clocks */
2194 r = amdgpu_atomfirmware_get_clock_info(adev);
2195 if (r) {
2196 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
2197 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2198 goto failed;
2199 }
2200 } else {
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002201 /* Initialize clocks */
2202 r = amdgpu_atombios_get_clock_info(adev);
2203 if (r) {
2204 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002205 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2206 goto failed;
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002207 }
2208 /* init i2c buses */
2209 amdgpu_atombios_i2c_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002210 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002211
2212 /* Fence driver */
2213 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002214 if (r) {
2215 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002216 amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002217 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002218 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002219
2220 /* init the mode config */
2221 drm_mode_config_init(adev->ddev);
2222
2223 r = amdgpu_init(adev);
2224 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05002225 dev_err(adev->dev, "amdgpu_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002226 amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002227 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002228 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002229 }
2230
2231 adev->accel_working = true;
2232
Alex Xiee59c0202017-06-01 09:42:59 -04002233 amdgpu_vm_check_compute_bug(adev);
2234
Marek Olšák95844d22016-08-17 23:49:27 +02002235 /* Initialize the buffer migration limit. */
2236 if (amdgpu_moverate >= 0)
2237 max_MBps = amdgpu_moverate;
2238 else
2239 max_MBps = 8; /* Allow 8 MB/s. */
2240 /* Get a log2 for easy divisions. */
2241 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2242
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002243 r = amdgpu_ib_pool_init(adev);
2244 if (r) {
2245 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Gavin Wan89041942017-06-23 13:55:15 -04002246 amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002247 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002248 }
2249
2250 r = amdgpu_ib_ring_tests(adev);
2251 if (r)
2252 DRM_ERROR("ib ring test failed (%d).\n", r);
2253
Monk Liu9bc92b92017-02-08 17:38:13 +08002254 amdgpu_fbdev_init(adev);
2255
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002256 r = amdgpu_gem_debugfs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002257 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002258 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002259
2260 r = amdgpu_debugfs_regs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002261 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002262 DRM_ERROR("registering register debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002263
Huang Rui4f0955f2017-05-10 23:04:06 +08002264 r = amdgpu_debugfs_test_ib_ring_init(adev);
2265 if (r)
2266 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2267
Huang Rui50ab2532016-06-12 15:51:09 +08002268 r = amdgpu_debugfs_firmware_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002269 if (r)
Huang Rui50ab2532016-06-12 15:51:09 +08002270 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
Huang Rui50ab2532016-06-12 15:51:09 +08002271
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002272 if ((amdgpu_testing & 1)) {
2273 if (adev->accel_working)
2274 amdgpu_test_moves(adev);
2275 else
2276 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2277 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002278 if (amdgpu_benchmarking) {
2279 if (adev->accel_working)
2280 amdgpu_benchmark(adev, amdgpu_benchmarking);
2281 else
2282 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2283 }
2284
2285 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2286 * explicit gating rather than handling it automatically.
2287 */
2288 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002289 if (r) {
2290 dev_err(adev->dev, "amdgpu_late_init failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002291 amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002292 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002293 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002294
2295 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04002296
2297failed:
Gavin Wan89041942017-06-23 13:55:15 -04002298 amdgpu_vf_error_trans_all(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002299 if (runtime)
2300 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2301 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002302}
2303
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002304/**
2305 * amdgpu_device_fini - tear down the driver
2306 *
2307 * @adev: amdgpu_device pointer
2308 *
2309 * Tear down the driver info (all asics).
2310 * Called at driver shutdown.
2311 */
2312void amdgpu_device_fini(struct amdgpu_device *adev)
2313{
2314 int r;
2315
2316 DRM_INFO("amdgpu: finishing device.\n");
2317 adev->shutdown = true;
Pixel Dingdb2c2a92017-04-25 16:47:42 +08002318 if (adev->mode_info.mode_config_initialized)
2319 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002320 /* evict vram memory */
2321 amdgpu_bo_evict_vram(adev);
2322 amdgpu_ib_pool_fini(adev);
2323 amdgpu_fence_driver_fini(adev);
2324 amdgpu_fbdev_fini(adev);
2325 r = amdgpu_fini(adev);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08002326 if (adev->firmware.gpu_info_fw) {
2327 release_firmware(adev->firmware.gpu_info_fw);
2328 adev->firmware.gpu_info_fw = NULL;
2329 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002330 adev->accel_working = false;
Shirish S2dc80b02017-05-25 10:05:25 +05302331 cancel_delayed_work_sync(&adev->late_init_work);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002332 /* free i2c buses */
2333 amdgpu_i2c_fini(adev);
2334 amdgpu_atombios_fini(adev);
2335 kfree(adev->bios);
2336 adev->bios = NULL;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002337 if (!pci_is_thunderbolt_attached(adev->pdev))
2338 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002339 if (adev->flags & AMD_IS_PX)
2340 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002341 vga_client_register(adev->pdev, NULL, NULL, NULL);
2342 if (adev->rio_mem)
2343 pci_iounmap(adev->pdev, adev->rio_mem);
2344 adev->rio_mem = NULL;
2345 iounmap(adev->rmmio);
2346 adev->rmmio = NULL;
Ken Wangda69c1612016-01-21 19:08:55 +08002347 if (adev->asic_type >= CHIP_BONAIRE)
2348 amdgpu_doorbell_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002349 amdgpu_debugfs_regs_cleanup(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002350}
2351
2352
2353/*
2354 * Suspend & resume.
2355 */
2356/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002357 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002358 *
2359 * @pdev: drm dev pointer
2360 * @state: suspend state
2361 *
2362 * Puts the hw in the suspend state (all asics).
2363 * Returns 0 for success or an error on failure.
2364 * Called at driver suspend.
2365 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002366int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002367{
2368 struct amdgpu_device *adev;
2369 struct drm_crtc *crtc;
2370 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002371 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002372
2373 if (dev == NULL || dev->dev_private == NULL) {
2374 return -ENODEV;
2375 }
2376
2377 adev = dev->dev_private;
2378
2379 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2380 return 0;
2381
2382 drm_kms_helper_poll_disable(dev);
2383
2384 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002385 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002386 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2387 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2388 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002389 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002390
Yong Zhaoba997702015-11-09 17:21:45 -05002391 amdgpu_amdkfd_suspend(adev);
2392
Alex Deucher756e6882015-10-08 00:03:36 -04002393 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002394 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04002395 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002396 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2397 struct amdgpu_bo *robj;
2398
Alex Deucher756e6882015-10-08 00:03:36 -04002399 if (amdgpu_crtc->cursor_bo) {
2400 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002401 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002402 if (r == 0) {
2403 amdgpu_bo_unpin(aobj);
2404 amdgpu_bo_unreserve(aobj);
2405 }
2406 }
2407
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002408 if (rfb == NULL || rfb->obj == NULL) {
2409 continue;
2410 }
2411 robj = gem_to_amdgpu_bo(rfb->obj);
2412 /* don't unpin kernel fb objects */
2413 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
Alex Xie7a6901d2017-04-24 13:52:41 -04002414 r = amdgpu_bo_reserve(robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002415 if (r == 0) {
2416 amdgpu_bo_unpin(robj);
2417 amdgpu_bo_unreserve(robj);
2418 }
2419 }
2420 }
2421 /* evict vram memory */
2422 amdgpu_bo_evict_vram(adev);
2423
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002424 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002425
2426 r = amdgpu_suspend(adev);
2427
Alex Deuchera0a71e42016-10-10 12:41:36 -04002428 /* evict remaining vram memory
2429 * This second call to evict vram is to evict the gart page table
2430 * using the CPU.
2431 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002432 amdgpu_bo_evict_vram(adev);
2433
Alex Deucherd05da0e2017-06-30 17:08:45 -04002434 amdgpu_atombios_scratch_regs_save(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002435 pci_save_state(dev->pdev);
2436 if (suspend) {
2437 /* Shut down the device */
2438 pci_disable_device(dev->pdev);
2439 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08002440 } else {
2441 r = amdgpu_asic_reset(adev);
2442 if (r)
2443 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002444 }
2445
2446 if (fbcon) {
2447 console_lock();
2448 amdgpu_fbdev_set_suspend(adev, 1);
2449 console_unlock();
2450 }
2451 return 0;
2452}
2453
2454/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002455 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002456 *
2457 * @pdev: drm dev pointer
2458 *
2459 * Bring the hw back to operating state (all asics).
2460 * Returns 0 for success or an error on failure.
2461 * Called at driver resume.
2462 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002463int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002464{
2465 struct drm_connector *connector;
2466 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04002467 struct drm_crtc *crtc;
Huang Rui03161a62017-04-13 16:12:26 +08002468 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002469
2470 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2471 return 0;
2472
jimqu74b0b152016-09-07 17:09:12 +08002473 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002474 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08002475
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002476 if (resume) {
2477 pci_set_power_state(dev->pdev, PCI_D0);
2478 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08002479 r = pci_enable_device(dev->pdev);
Huang Rui03161a62017-04-13 16:12:26 +08002480 if (r)
2481 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002482 }
Alex Deucherd05da0e2017-06-30 17:08:45 -04002483 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002484
2485 /* post card */
Jim Quc836fec2017-02-10 15:59:59 +08002486 if (amdgpu_need_post(adev)) {
jimqu74b0b152016-09-07 17:09:12 +08002487 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2488 if (r)
2489 DRM_ERROR("amdgpu asic init failed\n");
2490 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002491
2492 r = amdgpu_resume(adev);
Rex Zhue6707212017-03-30 13:21:01 +08002493 if (r) {
Flora Cuica198522016-02-04 15:10:08 +08002494 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Huang Rui03161a62017-04-13 16:12:26 +08002495 goto unlock;
Rex Zhue6707212017-03-30 13:21:01 +08002496 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002497 amdgpu_fence_driver_resume(adev);
2498
Flora Cuica198522016-02-04 15:10:08 +08002499 if (resume) {
2500 r = amdgpu_ib_ring_tests(adev);
2501 if (r)
2502 DRM_ERROR("ib ring test failed (%d).\n", r);
2503 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002504
2505 r = amdgpu_late_init(adev);
Huang Rui03161a62017-04-13 16:12:26 +08002506 if (r)
2507 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002508
Alex Deucher756e6882015-10-08 00:03:36 -04002509 /* pin cursors */
2510 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2511 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2512
2513 if (amdgpu_crtc->cursor_bo) {
2514 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002515 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002516 if (r == 0) {
2517 r = amdgpu_bo_pin(aobj,
2518 AMDGPU_GEM_DOMAIN_VRAM,
2519 &amdgpu_crtc->cursor_addr);
2520 if (r != 0)
2521 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2522 amdgpu_bo_unreserve(aobj);
2523 }
2524 }
2525 }
Yong Zhaoba997702015-11-09 17:21:45 -05002526 r = amdgpu_amdkfd_resume(adev);
2527 if (r)
2528 return r;
Alex Deucher756e6882015-10-08 00:03:36 -04002529
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002530 /* blat the mode back in */
2531 if (fbcon) {
2532 drm_helper_resume_force_mode(dev);
2533 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002534 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002535 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2536 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2537 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002538 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002539 }
2540
2541 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002542
2543 /*
2544 * Most of the connector probing functions try to acquire runtime pm
2545 * refs to ensure that the GPU is powered on when connector polling is
2546 * performed. Since we're calling this from a runtime PM callback,
2547 * trying to acquire rpm refs will cause us to deadlock.
2548 *
2549 * Since we're guaranteed to be holding the rpm lock, it's safe to
2550 * temporarily disable the rpm helpers so this doesn't deadlock us.
2551 */
2552#ifdef CONFIG_PM
2553 dev->dev->power.disable_depth++;
2554#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05002555 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002556#ifdef CONFIG_PM
2557 dev->dev->power.disable_depth--;
2558#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002559
Huang Rui03161a62017-04-13 16:12:26 +08002560 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002561 amdgpu_fbdev_set_suspend(adev, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002562
Huang Rui03161a62017-04-13 16:12:26 +08002563unlock:
2564 if (fbcon)
2565 console_unlock();
2566
2567 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002568}
2569
Chunming Zhou63fbf422016-07-15 11:19:20 +08002570static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2571{
2572 int i;
2573 bool asic_hang = false;
2574
2575 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002576 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002577 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002578 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2579 adev->ip_blocks[i].status.hang =
2580 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2581 if (adev->ip_blocks[i].status.hang) {
2582 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
Chunming Zhou63fbf422016-07-15 11:19:20 +08002583 asic_hang = true;
2584 }
2585 }
2586 return asic_hang;
2587}
2588
Baoyou Xie4d446652016-09-18 22:09:35 +08002589static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002590{
2591 int i, r = 0;
2592
2593 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002594 if (!adev->ip_blocks[i].status.valid)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002595 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002596 if (adev->ip_blocks[i].status.hang &&
2597 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2598 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
Chunming Zhoud31a5012016-07-18 10:04:34 +08002599 if (r)
2600 return r;
2601 }
2602 }
2603
2604 return 0;
2605}
2606
Chunming Zhou35d782f2016-07-15 15:57:13 +08002607static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2608{
Alex Deucherda146d32016-10-13 16:07:03 -04002609 int i;
2610
2611 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002612 if (!adev->ip_blocks[i].status.valid)
Alex Deucherda146d32016-10-13 16:07:03 -04002613 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002614 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2615 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2616 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2617 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2618 if (adev->ip_blocks[i].status.hang) {
Alex Deucherda146d32016-10-13 16:07:03 -04002619 DRM_INFO("Some block need full reset!\n");
2620 return true;
2621 }
2622 }
Chunming Zhou35d782f2016-07-15 15:57:13 +08002623 }
2624 return false;
2625}
2626
2627static int amdgpu_soft_reset(struct amdgpu_device *adev)
2628{
2629 int i, r = 0;
2630
2631 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002632 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002633 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002634 if (adev->ip_blocks[i].status.hang &&
2635 adev->ip_blocks[i].version->funcs->soft_reset) {
2636 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002637 if (r)
2638 return r;
2639 }
2640 }
2641
2642 return 0;
2643}
2644
2645static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2646{
2647 int i, r = 0;
2648
2649 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002650 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002651 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002652 if (adev->ip_blocks[i].status.hang &&
2653 adev->ip_blocks[i].version->funcs->post_soft_reset)
2654 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002655 if (r)
2656 return r;
2657 }
2658
2659 return 0;
2660}
2661
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002662bool amdgpu_need_backup(struct amdgpu_device *adev)
2663{
2664 if (adev->flags & AMD_IS_APU)
2665 return false;
2666
2667 return amdgpu_lockup_timeout > 0 ? true : false;
2668}
2669
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002670static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2671 struct amdgpu_ring *ring,
2672 struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +01002673 struct dma_fence **fence)
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002674{
2675 uint32_t domain;
2676 int r;
2677
Roger.He23d2e502017-04-21 14:24:26 +08002678 if (!bo->shadow)
2679 return 0;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002680
Alex Xie1d284792017-04-24 13:53:04 -04002681 r = amdgpu_bo_reserve(bo, true);
Roger.He23d2e502017-04-21 14:24:26 +08002682 if (r)
2683 return r;
2684 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2685 /* if bo has been evicted, then no need to recover */
2686 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
Roger.He82521312017-04-21 13:08:43 +08002687 r = amdgpu_bo_validate(bo->shadow);
2688 if (r) {
2689 DRM_ERROR("bo validate failed!\n");
2690 goto err;
2691 }
2692
2693 r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
2694 if (r) {
2695 DRM_ERROR("%p bind failed\n", bo->shadow);
2696 goto err;
2697 }
2698
Roger.He23d2e502017-04-21 14:24:26 +08002699 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002700 NULL, fence, true);
Roger.He23d2e502017-04-21 14:24:26 +08002701 if (r) {
2702 DRM_ERROR("recover page table failed!\n");
2703 goto err;
2704 }
2705 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002706err:
Roger.He23d2e502017-04-21 14:24:26 +08002707 amdgpu_bo_unreserve(bo);
2708 return r;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002709}
2710
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002711/**
Monk Liua90ad3c2017-01-23 14:22:08 +08002712 * amdgpu_sriov_gpu_reset - reset the asic
2713 *
2714 * @adev: amdgpu device pointer
Monk Liu7225f872017-04-26 14:51:54 +08002715 * @job: which job trigger hang
Monk Liua90ad3c2017-01-23 14:22:08 +08002716 *
2717 * Attempt the reset the GPU if it has hung (all asics).
2718 * for SRIOV case.
2719 * Returns 0 for success or an error on failure.
2720 */
Monk Liu7225f872017-04-26 14:51:54 +08002721int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002722{
Monk Liu65781c72017-05-11 13:36:44 +08002723 int i, j, r = 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002724 int resched;
2725 struct amdgpu_bo *bo, *tmp;
2726 struct amdgpu_ring *ring;
2727 struct dma_fence *fence = NULL, *next = NULL;
2728
Monk Liu147b5982017-01-25 15:48:01 +08002729 mutex_lock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002730 atomic_inc(&adev->gpu_reset_counter);
Monk Liu1fb37a32017-01-26 15:36:37 +08002731 adev->gfx.in_reset = true;
Monk Liua90ad3c2017-01-23 14:22:08 +08002732
2733 /* block TTM */
2734 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2735
Monk Liu65781c72017-05-11 13:36:44 +08002736 /* we start from the ring trigger GPU hang */
2737 j = job ? job->ring->idx : 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002738
Monk Liu65781c72017-05-11 13:36:44 +08002739 /* block scheduler */
2740 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2741 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002742 if (!ring || !ring->sched.thread)
2743 continue;
2744
2745 kthread_park(ring->sched.thread);
Monk Liua90ad3c2017-01-23 14:22:08 +08002746
Monk Liu65781c72017-05-11 13:36:44 +08002747 if (job && j != i)
2748 continue;
2749
Monk Liu4f059ec2017-05-11 13:59:15 +08002750 /* here give the last chance to check if job removed from mirror-list
Monk Liu65781c72017-05-11 13:36:44 +08002751 * since we already pay some time on kthread_park */
Monk Liu4f059ec2017-05-11 13:59:15 +08002752 if (job && list_empty(&job->base.node)) {
Monk Liu65781c72017-05-11 13:36:44 +08002753 kthread_unpark(ring->sched.thread);
2754 goto give_up_reset;
2755 }
2756
2757 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2758 amd_sched_job_kickout(&job->base);
2759
2760 /* only do job_reset on the hang ring if @job not NULL */
2761 amd_sched_hw_job_reset(&ring->sched);
2762
2763 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2764 amdgpu_fence_driver_force_completion_ring(ring);
2765 }
Monk Liua90ad3c2017-01-23 14:22:08 +08002766
2767 /* request to take full control of GPU before re-initialization */
Monk Liu7225f872017-04-26 14:51:54 +08002768 if (job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002769 amdgpu_virt_reset_gpu(adev);
2770 else
2771 amdgpu_virt_request_full_gpu(adev, true);
2772
2773
2774 /* Resume IP prior to SMC */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002775 amdgpu_sriov_reinit_early(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002776
2777 /* we need recover gart prior to run SMC/CP/SDMA resume */
2778 amdgpu_ttm_recover_gart(adev);
2779
2780 /* now we are okay to resume SMC/CP/SDMA */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002781 amdgpu_sriov_reinit_late(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002782
2783 amdgpu_irq_gpu_reset_resume_helper(adev);
2784
2785 if (amdgpu_ib_ring_tests(adev))
2786 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2787
2788 /* release full control of GPU after ib test */
2789 amdgpu_virt_release_full_gpu(adev, true);
2790
2791 DRM_INFO("recover vram bo from shadow\n");
2792
2793 ring = adev->mman.buffer_funcs_ring;
2794 mutex_lock(&adev->shadow_list_lock);
2795 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002796 next = NULL;
Monk Liua90ad3c2017-01-23 14:22:08 +08002797 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2798 if (fence) {
2799 r = dma_fence_wait(fence, false);
2800 if (r) {
2801 WARN(r, "recovery from shadow isn't completed\n");
2802 break;
2803 }
2804 }
2805
2806 dma_fence_put(fence);
2807 fence = next;
2808 }
2809 mutex_unlock(&adev->shadow_list_lock);
2810
2811 if (fence) {
2812 r = dma_fence_wait(fence, false);
2813 if (r)
2814 WARN(r, "recovery from shadow isn't completed\n");
2815 }
2816 dma_fence_put(fence);
2817
Monk Liu65781c72017-05-11 13:36:44 +08002818 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2819 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002820 if (!ring || !ring->sched.thread)
2821 continue;
2822
Monk Liu65781c72017-05-11 13:36:44 +08002823 if (job && j != i) {
2824 kthread_unpark(ring->sched.thread);
2825 continue;
2826 }
2827
Monk Liua90ad3c2017-01-23 14:22:08 +08002828 amd_sched_job_recovery(&ring->sched);
2829 kthread_unpark(ring->sched.thread);
2830 }
2831
2832 drm_helper_resume_force_mode(adev->ddev);
Monk Liu65781c72017-05-11 13:36:44 +08002833give_up_reset:
Monk Liua90ad3c2017-01-23 14:22:08 +08002834 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2835 if (r) {
2836 /* bad news, how to tell it to userspace ? */
2837 dev_info(adev->dev, "GPU reset failed\n");
Monk Liu65781c72017-05-11 13:36:44 +08002838 } else {
2839 dev_info(adev->dev, "GPU reset successed!\n");
Monk Liua90ad3c2017-01-23 14:22:08 +08002840 }
2841
Monk Liu1fb37a32017-01-26 15:36:37 +08002842 adev->gfx.in_reset = false;
Monk Liu147b5982017-01-25 15:48:01 +08002843 mutex_unlock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002844 return r;
2845}
2846
2847/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002848 * amdgpu_gpu_reset - reset the asic
2849 *
2850 * @adev: amdgpu device pointer
2851 *
2852 * Attempt the reset the GPU if it has hung (all asics).
2853 * Returns 0 for success or an error on failure.
2854 */
2855int amdgpu_gpu_reset(struct amdgpu_device *adev)
2856{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002857 int i, r;
2858 int resched;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002859 bool need_full_reset, vram_lost = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002860
Chunming Zhou63fbf422016-07-15 11:19:20 +08002861 if (!amdgpu_check_soft_reset(adev)) {
2862 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2863 return 0;
2864 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002865
Marek Olšákd94aed52015-05-05 21:13:49 +02002866 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002867
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002868 /* block TTM */
2869 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2870
Chunming Zhou0875dc92016-06-12 15:41:58 +08002871 /* block scheduler */
2872 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2873 struct amdgpu_ring *ring = adev->rings[i];
2874
Chunming Zhou51687752017-04-24 17:09:15 +08002875 if (!ring || !ring->sched.thread)
Chunming Zhou0875dc92016-06-12 15:41:58 +08002876 continue;
2877 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002878 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002879 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002880 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2881 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002882
Chunming Zhou35d782f2016-07-15 15:57:13 +08002883 need_full_reset = amdgpu_need_full_reset(adev);
2884
2885 if (!need_full_reset) {
2886 amdgpu_pre_soft_reset(adev);
2887 r = amdgpu_soft_reset(adev);
2888 amdgpu_post_soft_reset(adev);
2889 if (r || amdgpu_check_soft_reset(adev)) {
2890 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2891 need_full_reset = true;
2892 }
2893 }
2894
2895 if (need_full_reset) {
Chunming Zhou35d782f2016-07-15 15:57:13 +08002896 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002897
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002898retry:
Alex Deucherd05da0e2017-06-30 17:08:45 -04002899 amdgpu_atombios_scratch_regs_save(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002900 r = amdgpu_asic_reset(adev);
Alex Deucherd05da0e2017-06-30 17:08:45 -04002901 amdgpu_atombios_scratch_regs_restore(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002902 /* post card */
2903 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002904
Chunming Zhou35d782f2016-07-15 15:57:13 +08002905 if (!r) {
2906 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
Chunming Zhoufcf06492017-05-05 10:33:33 +08002907 r = amdgpu_resume_phase1(adev);
2908 if (r)
2909 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002910 vram_lost = amdgpu_check_vram_lost(adev);
Chunming Zhouf1892132017-05-15 16:48:27 +08002911 if (vram_lost) {
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002912 DRM_ERROR("VRAM is lost!\n");
Chunming Zhouf1892132017-05-15 16:48:27 +08002913 atomic_inc(&adev->vram_lost_counter);
2914 }
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002915 r = amdgpu_ttm_recover_gart(adev);
2916 if (r)
Chunming Zhoufcf06492017-05-05 10:33:33 +08002917 goto out;
2918 r = amdgpu_resume_phase2(adev);
2919 if (r)
2920 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002921 if (vram_lost)
2922 amdgpu_fill_reset_magic(adev);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002923 }
Chunming Zhoufcf06492017-05-05 10:33:33 +08002924 }
2925out:
2926 if (!r) {
2927 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou1f465082016-06-30 15:02:26 +08002928 r = amdgpu_ib_ring_tests(adev);
2929 if (r) {
2930 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002931 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002932 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002933 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002934 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002935 /**
2936 * recovery vm page tables, since we cannot depend on VRAM is
2937 * consistent after gpu full reset.
2938 */
2939 if (need_full_reset && amdgpu_need_backup(adev)) {
2940 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2941 struct amdgpu_bo *bo, *tmp;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002942 struct dma_fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002943
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002944 DRM_INFO("recover vram bo from shadow\n");
2945 mutex_lock(&adev->shadow_list_lock);
2946 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002947 next = NULL;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002948 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2949 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002950 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002951 if (r) {
Monk Liu1d7b17b2017-01-22 18:52:56 +08002952 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002953 break;
2954 }
2955 }
2956
Chris Wilsonf54d1862016-10-25 13:00:45 +01002957 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002958 fence = next;
2959 }
2960 mutex_unlock(&adev->shadow_list_lock);
2961 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002962 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002963 if (r)
Monk Liu1d7b17b2017-01-22 18:52:56 +08002964 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002965 }
Chris Wilsonf54d1862016-10-25 13:00:45 +01002966 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002967 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002968 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2969 struct amdgpu_ring *ring = adev->rings[i];
Chunming Zhou51687752017-04-24 17:09:15 +08002970
2971 if (!ring || !ring->sched.thread)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002972 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002973
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002974 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002975 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002976 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002977 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08002978 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Gavin Wan89041942017-06-23 13:55:15 -04002979 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002980 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou51687752017-04-24 17:09:15 +08002981 if (adev->rings[i] && adev->rings[i]->sched.thread) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08002982 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002983 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002984 }
2985 }
2986
2987 drm_helper_resume_force_mode(adev->ddev);
2988
2989 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
Gavin Wan89041942017-06-23 13:55:15 -04002990 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002991 /* bad news, how to tell it to userspace ? */
2992 dev_info(adev->dev, "GPU reset failed\n");
Gavin Wan89041942017-06-23 13:55:15 -04002993 amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
2994 }
2995 else {
Chunming Zhou6643be62017-05-05 10:50:09 +08002996 dev_info(adev->dev, "GPU reset successed!\n");
Gavin Wan89041942017-06-23 13:55:15 -04002997 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002998
Gavin Wan89041942017-06-23 13:55:15 -04002999 amdgpu_vf_error_trans_all(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003000 return r;
3001}
3002
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003003void amdgpu_get_pcie_info(struct amdgpu_device *adev)
3004{
3005 u32 mask;
3006 int ret;
3007
Alex Deuchercd474ba2016-02-04 10:21:23 -05003008 if (amdgpu_pcie_gen_cap)
3009 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
3010
3011 if (amdgpu_pcie_lane_cap)
3012 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
3013
3014 /* covers APUs as well */
3015 if (pci_is_root_bus(adev->pdev->bus)) {
3016 if (adev->pm.pcie_gen_mask == 0)
3017 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3018 if (adev->pm.pcie_mlw_mask == 0)
3019 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003020 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003021 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05003022
3023 if (adev->pm.pcie_gen_mask == 0) {
3024 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3025 if (!ret) {
3026 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3027 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3028 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3029
3030 if (mask & DRM_PCIE_SPEED_25)
3031 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3032 if (mask & DRM_PCIE_SPEED_50)
3033 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3034 if (mask & DRM_PCIE_SPEED_80)
3035 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3036 } else {
3037 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3038 }
3039 }
3040 if (adev->pm.pcie_mlw_mask == 0) {
3041 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3042 if (!ret) {
3043 switch (mask) {
3044 case 32:
3045 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3046 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3047 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3048 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3049 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3050 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3051 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3052 break;
3053 case 16:
3054 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3055 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3056 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3057 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3058 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3059 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3060 break;
3061 case 12:
3062 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3063 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3064 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3065 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3066 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3067 break;
3068 case 8:
3069 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3070 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3071 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3072 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3073 break;
3074 case 4:
3075 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3076 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3077 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3078 break;
3079 case 2:
3080 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3081 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3082 break;
3083 case 1:
3084 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3085 break;
3086 default:
3087 break;
3088 }
3089 } else {
3090 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003091 }
3092 }
3093}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003094
3095/*
3096 * Debugfs
3097 */
3098int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04003099 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003100 unsigned nfiles)
3101{
3102 unsigned i;
3103
3104 for (i = 0; i < adev->debugfs_count; i++) {
3105 if (adev->debugfs[i].files == files) {
3106 /* Already registered */
3107 return 0;
3108 }
3109 }
3110
3111 i = adev->debugfs_count + 1;
3112 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3113 DRM_ERROR("Reached maximum number of debugfs components.\n");
3114 DRM_ERROR("Report so we increase "
3115 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3116 return -EINVAL;
3117 }
3118 adev->debugfs[adev->debugfs_count].files = files;
3119 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3120 adev->debugfs_count = i;
3121#if defined(CONFIG_DEBUG_FS)
3122 drm_debugfs_create_files(files, nfiles,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003123 adev->ddev->primary->debugfs_root,
3124 adev->ddev->primary);
3125#endif
3126 return 0;
3127}
3128
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003129#if defined(CONFIG_DEBUG_FS)
3130
3131static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3132 size_t size, loff_t *pos)
3133{
Al Viro45063092016-12-04 18:24:56 -05003134 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003135 ssize_t result = 0;
3136 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04003137 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04003138 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003139
3140 if (size & 0x3 || *pos & 0x3)
3141 return -EINVAL;
3142
Tom St Denisbd122672016-07-28 09:39:22 -04003143 /* are we reading registers for which a PG lock is necessary? */
3144 pm_pg_lock = (*pos >> 23) & 1;
3145
Tom St Denis566281592016-06-27 11:55:07 -04003146 if (*pos & (1ULL << 62)) {
3147 se_bank = (*pos >> 24) & 0x3FF;
3148 sh_bank = (*pos >> 34) & 0x3FF;
3149 instance_bank = (*pos >> 44) & 0x3FF;
Tom St Denis32977f92016-10-09 07:41:26 -04003150
3151 if (se_bank == 0x3FF)
3152 se_bank = 0xFFFFFFFF;
3153 if (sh_bank == 0x3FF)
3154 sh_bank = 0xFFFFFFFF;
3155 if (instance_bank == 0x3FF)
3156 instance_bank = 0xFFFFFFFF;
Tom St Denis566281592016-06-27 11:55:07 -04003157 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04003158 } else {
3159 use_bank = 0;
3160 }
3161
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003162 *pos &= (1UL << 22) - 1;
Tom St Denisbd122672016-07-28 09:39:22 -04003163
Tom St Denis566281592016-06-27 11:55:07 -04003164 if (use_bank) {
Tom St Denis32977f92016-10-09 07:41:26 -04003165 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3166 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
Tom St Denis566281592016-06-27 11:55:07 -04003167 return -EINVAL;
3168 mutex_lock(&adev->grbm_idx_mutex);
3169 amdgpu_gfx_select_se_sh(adev, se_bank,
3170 sh_bank, instance_bank);
3171 }
3172
Tom St Denisbd122672016-07-28 09:39:22 -04003173 if (pm_pg_lock)
3174 mutex_lock(&adev->pm.mutex);
3175
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003176 while (size) {
3177 uint32_t value;
3178
3179 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04003180 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003181
3182 value = RREG32(*pos >> 2);
3183 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04003184 if (r) {
3185 result = r;
3186 goto end;
3187 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003188
3189 result += 4;
3190 buf += 4;
3191 *pos += 4;
3192 size -= 4;
3193 }
3194
Tom St Denis566281592016-06-27 11:55:07 -04003195end:
3196 if (use_bank) {
3197 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3198 mutex_unlock(&adev->grbm_idx_mutex);
3199 }
3200
Tom St Denisbd122672016-07-28 09:39:22 -04003201 if (pm_pg_lock)
3202 mutex_unlock(&adev->pm.mutex);
3203
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003204 return result;
3205}
3206
3207static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3208 size_t size, loff_t *pos)
3209{
Al Viro45063092016-12-04 18:24:56 -05003210 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003211 ssize_t result = 0;
3212 int r;
Tom St Denis394fdde2016-10-10 07:31:23 -04003213 bool pm_pg_lock, use_bank;
3214 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003215
3216 if (size & 0x3 || *pos & 0x3)
3217 return -EINVAL;
3218
Tom St Denis394fdde2016-10-10 07:31:23 -04003219 /* are we reading registers for which a PG lock is necessary? */
3220 pm_pg_lock = (*pos >> 23) & 1;
3221
3222 if (*pos & (1ULL << 62)) {
3223 se_bank = (*pos >> 24) & 0x3FF;
3224 sh_bank = (*pos >> 34) & 0x3FF;
3225 instance_bank = (*pos >> 44) & 0x3FF;
3226
3227 if (se_bank == 0x3FF)
3228 se_bank = 0xFFFFFFFF;
3229 if (sh_bank == 0x3FF)
3230 sh_bank = 0xFFFFFFFF;
3231 if (instance_bank == 0x3FF)
3232 instance_bank = 0xFFFFFFFF;
3233 use_bank = 1;
3234 } else {
3235 use_bank = 0;
3236 }
3237
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003238 *pos &= (1UL << 22) - 1;
Tom St Denis394fdde2016-10-10 07:31:23 -04003239
3240 if (use_bank) {
3241 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3242 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3243 return -EINVAL;
3244 mutex_lock(&adev->grbm_idx_mutex);
3245 amdgpu_gfx_select_se_sh(adev, se_bank,
3246 sh_bank, instance_bank);
3247 }
3248
3249 if (pm_pg_lock)
3250 mutex_lock(&adev->pm.mutex);
3251
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003252 while (size) {
3253 uint32_t value;
3254
3255 if (*pos > adev->rmmio_size)
3256 return result;
3257
3258 r = get_user(value, (uint32_t *)buf);
3259 if (r)
3260 return r;
3261
3262 WREG32(*pos >> 2, value);
3263
3264 result += 4;
3265 buf += 4;
3266 *pos += 4;
3267 size -= 4;
3268 }
3269
Tom St Denis394fdde2016-10-10 07:31:23 -04003270 if (use_bank) {
3271 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3272 mutex_unlock(&adev->grbm_idx_mutex);
3273 }
3274
3275 if (pm_pg_lock)
3276 mutex_unlock(&adev->pm.mutex);
3277
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003278 return result;
3279}
3280
Tom St Denisadcec282016-04-15 13:08:44 -04003281static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3282 size_t size, loff_t *pos)
3283{
Al Viro45063092016-12-04 18:24:56 -05003284 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003285 ssize_t result = 0;
3286 int r;
3287
3288 if (size & 0x3 || *pos & 0x3)
3289 return -EINVAL;
3290
3291 while (size) {
3292 uint32_t value;
3293
3294 value = RREG32_PCIE(*pos >> 2);
3295 r = put_user(value, (uint32_t *)buf);
3296 if (r)
3297 return r;
3298
3299 result += 4;
3300 buf += 4;
3301 *pos += 4;
3302 size -= 4;
3303 }
3304
3305 return result;
3306}
3307
3308static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3309 size_t size, loff_t *pos)
3310{
Al Viro45063092016-12-04 18:24:56 -05003311 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003312 ssize_t result = 0;
3313 int r;
3314
3315 if (size & 0x3 || *pos & 0x3)
3316 return -EINVAL;
3317
3318 while (size) {
3319 uint32_t value;
3320
3321 r = get_user(value, (uint32_t *)buf);
3322 if (r)
3323 return r;
3324
3325 WREG32_PCIE(*pos >> 2, value);
3326
3327 result += 4;
3328 buf += 4;
3329 *pos += 4;
3330 size -= 4;
3331 }
3332
3333 return result;
3334}
3335
3336static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3337 size_t size, loff_t *pos)
3338{
Al Viro45063092016-12-04 18:24:56 -05003339 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003340 ssize_t result = 0;
3341 int r;
3342
3343 if (size & 0x3 || *pos & 0x3)
3344 return -EINVAL;
3345
3346 while (size) {
3347 uint32_t value;
3348
3349 value = RREG32_DIDT(*pos >> 2);
3350 r = put_user(value, (uint32_t *)buf);
3351 if (r)
3352 return r;
3353
3354 result += 4;
3355 buf += 4;
3356 *pos += 4;
3357 size -= 4;
3358 }
3359
3360 return result;
3361}
3362
3363static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3364 size_t size, loff_t *pos)
3365{
Al Viro45063092016-12-04 18:24:56 -05003366 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003367 ssize_t result = 0;
3368 int r;
3369
3370 if (size & 0x3 || *pos & 0x3)
3371 return -EINVAL;
3372
3373 while (size) {
3374 uint32_t value;
3375
3376 r = get_user(value, (uint32_t *)buf);
3377 if (r)
3378 return r;
3379
3380 WREG32_DIDT(*pos >> 2, value);
3381
3382 result += 4;
3383 buf += 4;
3384 *pos += 4;
3385 size -= 4;
3386 }
3387
3388 return result;
3389}
3390
3391static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3392 size_t size, loff_t *pos)
3393{
Al Viro45063092016-12-04 18:24:56 -05003394 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003395 ssize_t result = 0;
3396 int r;
3397
3398 if (size & 0x3 || *pos & 0x3)
3399 return -EINVAL;
3400
3401 while (size) {
3402 uint32_t value;
3403
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003404 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04003405 r = put_user(value, (uint32_t *)buf);
3406 if (r)
3407 return r;
3408
3409 result += 4;
3410 buf += 4;
3411 *pos += 4;
3412 size -= 4;
3413 }
3414
3415 return result;
3416}
3417
3418static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3419 size_t size, loff_t *pos)
3420{
Al Viro45063092016-12-04 18:24:56 -05003421 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003422 ssize_t result = 0;
3423 int r;
3424
3425 if (size & 0x3 || *pos & 0x3)
3426 return -EINVAL;
3427
3428 while (size) {
3429 uint32_t value;
3430
3431 r = get_user(value, (uint32_t *)buf);
3432 if (r)
3433 return r;
3434
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003435 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04003436
3437 result += 4;
3438 buf += 4;
3439 *pos += 4;
3440 size -= 4;
3441 }
3442
3443 return result;
3444}
3445
Tom St Denis1e051412016-06-27 09:57:18 -04003446static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3447 size_t size, loff_t *pos)
3448{
Al Viro45063092016-12-04 18:24:56 -05003449 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis1e051412016-06-27 09:57:18 -04003450 ssize_t result = 0;
3451 int r;
3452 uint32_t *config, no_regs = 0;
3453
3454 if (size & 0x3 || *pos & 0x3)
3455 return -EINVAL;
3456
Markus Elfringecab7662016-09-18 17:00:52 +02003457 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
Tom St Denis1e051412016-06-27 09:57:18 -04003458 if (!config)
3459 return -ENOMEM;
3460
3461 /* version, increment each time something is added */
Tom St Denis9a999352017-01-18 13:01:25 -05003462 config[no_regs++] = 3;
Tom St Denis1e051412016-06-27 09:57:18 -04003463 config[no_regs++] = adev->gfx.config.max_shader_engines;
3464 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3465 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3466 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3467 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3468 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3469 config[no_regs++] = adev->gfx.config.max_gprs;
3470 config[no_regs++] = adev->gfx.config.max_gs_threads;
3471 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3472 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3473 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3474 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3475 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3476 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3477 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3478 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3479 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3480 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3481 config[no_regs++] = adev->gfx.config.num_gpus;
3482 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3483 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3484 config[no_regs++] = adev->gfx.config.gb_addr_config;
3485 config[no_regs++] = adev->gfx.config.num_rbs;
3486
Tom St Denis89a8f302016-08-12 15:14:31 -04003487 /* rev==1 */
3488 config[no_regs++] = adev->rev_id;
3489 config[no_regs++] = adev->pg_flags;
3490 config[no_regs++] = adev->cg_flags;
3491
Tom St Denise9f11dc2016-08-17 12:00:51 -04003492 /* rev==2 */
3493 config[no_regs++] = adev->family;
3494 config[no_regs++] = adev->external_rev_id;
3495
Tom St Denis9a999352017-01-18 13:01:25 -05003496 /* rev==3 */
3497 config[no_regs++] = adev->pdev->device;
3498 config[no_regs++] = adev->pdev->revision;
3499 config[no_regs++] = adev->pdev->subsystem_device;
3500 config[no_regs++] = adev->pdev->subsystem_vendor;
3501
Tom St Denis1e051412016-06-27 09:57:18 -04003502 while (size && (*pos < no_regs * 4)) {
3503 uint32_t value;
3504
3505 value = config[*pos >> 2];
3506 r = put_user(value, (uint32_t *)buf);
3507 if (r) {
3508 kfree(config);
3509 return r;
3510 }
3511
3512 result += 4;
3513 buf += 4;
3514 *pos += 4;
3515 size -= 4;
3516 }
3517
3518 kfree(config);
3519 return result;
3520}
3521
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003522static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3523 size_t size, loff_t *pos)
3524{
Al Viro45063092016-12-04 18:24:56 -05003525 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003526 int idx, x, outsize, r, valuesize;
3527 uint32_t values[16];
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003528
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003529 if (size & 3 || *pos & 0x3)
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003530 return -EINVAL;
3531
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003532 if (amdgpu_dpm == 0)
3533 return -EINVAL;
3534
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003535 /* convert offset to sensor number */
3536 idx = *pos >> 2;
3537
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003538 valuesize = sizeof(values);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003539 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003540 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003541 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3542 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3543 &valuesize);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003544 else
3545 return -EINVAL;
3546
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003547 if (size > valuesize)
3548 return -EINVAL;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003549
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003550 outsize = 0;
3551 x = 0;
3552 if (!r) {
3553 while (size) {
3554 r = put_user(values[x++], (int32_t *)buf);
3555 buf += 4;
3556 size -= 4;
3557 outsize += 4;
3558 }
3559 }
3560
3561 return !r ? outsize : r;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003562}
Tom St Denis1e051412016-06-27 09:57:18 -04003563
Tom St Denis273d7aa2016-10-11 14:48:55 -04003564static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3565 size_t size, loff_t *pos)
3566{
3567 struct amdgpu_device *adev = f->f_inode->i_private;
3568 int r, x;
3569 ssize_t result=0;
Tom St Denis472259f2016-10-14 09:49:09 -04003570 uint32_t offset, se, sh, cu, wave, simd, data[32];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003571
3572 if (size & 3 || *pos & 3)
3573 return -EINVAL;
3574
3575 /* decode offset */
3576 offset = (*pos & 0x7F);
3577 se = ((*pos >> 7) & 0xFF);
3578 sh = ((*pos >> 15) & 0xFF);
3579 cu = ((*pos >> 23) & 0xFF);
3580 wave = ((*pos >> 31) & 0xFF);
3581 simd = ((*pos >> 37) & 0xFF);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003582
3583 /* switch to the specific se/sh/cu */
3584 mutex_lock(&adev->grbm_idx_mutex);
3585 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3586
3587 x = 0;
Tom St Denis472259f2016-10-14 09:49:09 -04003588 if (adev->gfx.funcs->read_wave_data)
3589 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003590
3591 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3592 mutex_unlock(&adev->grbm_idx_mutex);
3593
Tom St Denis5ecfb3b2016-10-13 12:15:03 -04003594 if (!x)
3595 return -EINVAL;
3596
Tom St Denis472259f2016-10-14 09:49:09 -04003597 while (size && (offset < x * 4)) {
Tom St Denis273d7aa2016-10-11 14:48:55 -04003598 uint32_t value;
3599
Tom St Denis472259f2016-10-14 09:49:09 -04003600 value = data[offset >> 2];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003601 r = put_user(value, (uint32_t *)buf);
3602 if (r)
3603 return r;
3604
3605 result += 4;
3606 buf += 4;
Tom St Denis472259f2016-10-14 09:49:09 -04003607 offset += 4;
Tom St Denis273d7aa2016-10-11 14:48:55 -04003608 size -= 4;
3609 }
3610
3611 return result;
3612}
3613
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003614static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3615 size_t size, loff_t *pos)
3616{
3617 struct amdgpu_device *adev = f->f_inode->i_private;
3618 int r;
3619 ssize_t result = 0;
3620 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3621
3622 if (size & 3 || *pos & 3)
3623 return -EINVAL;
3624
3625 /* decode offset */
3626 offset = (*pos & 0xFFF); /* in dwords */
3627 se = ((*pos >> 12) & 0xFF);
3628 sh = ((*pos >> 20) & 0xFF);
3629 cu = ((*pos >> 28) & 0xFF);
3630 wave = ((*pos >> 36) & 0xFF);
3631 simd = ((*pos >> 44) & 0xFF);
3632 thread = ((*pos >> 52) & 0xFF);
3633 bank = ((*pos >> 60) & 1);
3634
3635 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3636 if (!data)
3637 return -ENOMEM;
3638
3639 /* switch to the specific se/sh/cu */
3640 mutex_lock(&adev->grbm_idx_mutex);
3641 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3642
3643 if (bank == 0) {
3644 if (adev->gfx.funcs->read_wave_vgprs)
3645 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3646 } else {
3647 if (adev->gfx.funcs->read_wave_sgprs)
3648 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3649 }
3650
3651 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3652 mutex_unlock(&adev->grbm_idx_mutex);
3653
3654 while (size) {
3655 uint32_t value;
3656
3657 value = data[offset++];
3658 r = put_user(value, (uint32_t *)buf);
3659 if (r) {
3660 result = r;
3661 goto err;
3662 }
3663
3664 result += 4;
3665 buf += 4;
3666 size -= 4;
3667 }
3668
3669err:
3670 kfree(data);
3671 return result;
3672}
3673
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003674static const struct file_operations amdgpu_debugfs_regs_fops = {
3675 .owner = THIS_MODULE,
3676 .read = amdgpu_debugfs_regs_read,
3677 .write = amdgpu_debugfs_regs_write,
3678 .llseek = default_llseek
3679};
Tom St Denisadcec282016-04-15 13:08:44 -04003680static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3681 .owner = THIS_MODULE,
3682 .read = amdgpu_debugfs_regs_didt_read,
3683 .write = amdgpu_debugfs_regs_didt_write,
3684 .llseek = default_llseek
3685};
3686static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3687 .owner = THIS_MODULE,
3688 .read = amdgpu_debugfs_regs_pcie_read,
3689 .write = amdgpu_debugfs_regs_pcie_write,
3690 .llseek = default_llseek
3691};
3692static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3693 .owner = THIS_MODULE,
3694 .read = amdgpu_debugfs_regs_smc_read,
3695 .write = amdgpu_debugfs_regs_smc_write,
3696 .llseek = default_llseek
3697};
3698
Tom St Denis1e051412016-06-27 09:57:18 -04003699static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3700 .owner = THIS_MODULE,
3701 .read = amdgpu_debugfs_gca_config_read,
3702 .llseek = default_llseek
3703};
3704
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003705static const struct file_operations amdgpu_debugfs_sensors_fops = {
3706 .owner = THIS_MODULE,
3707 .read = amdgpu_debugfs_sensor_read,
3708 .llseek = default_llseek
3709};
3710
Tom St Denis273d7aa2016-10-11 14:48:55 -04003711static const struct file_operations amdgpu_debugfs_wave_fops = {
3712 .owner = THIS_MODULE,
3713 .read = amdgpu_debugfs_wave_read,
3714 .llseek = default_llseek
3715};
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003716static const struct file_operations amdgpu_debugfs_gpr_fops = {
3717 .owner = THIS_MODULE,
3718 .read = amdgpu_debugfs_gpr_read,
3719 .llseek = default_llseek
3720};
Tom St Denis273d7aa2016-10-11 14:48:55 -04003721
Tom St Denisadcec282016-04-15 13:08:44 -04003722static const struct file_operations *debugfs_regs[] = {
3723 &amdgpu_debugfs_regs_fops,
3724 &amdgpu_debugfs_regs_didt_fops,
3725 &amdgpu_debugfs_regs_pcie_fops,
3726 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04003727 &amdgpu_debugfs_gca_config_fops,
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003728 &amdgpu_debugfs_sensors_fops,
Tom St Denis273d7aa2016-10-11 14:48:55 -04003729 &amdgpu_debugfs_wave_fops,
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003730 &amdgpu_debugfs_gpr_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04003731};
3732
3733static const char *debugfs_regs_names[] = {
3734 "amdgpu_regs",
3735 "amdgpu_regs_didt",
3736 "amdgpu_regs_pcie",
3737 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04003738 "amdgpu_gca_config",
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003739 "amdgpu_sensors",
Tom St Denis273d7aa2016-10-11 14:48:55 -04003740 "amdgpu_wave",
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003741 "amdgpu_gpr",
Tom St Denisadcec282016-04-15 13:08:44 -04003742};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003743
3744static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3745{
3746 struct drm_minor *minor = adev->ddev->primary;
3747 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04003748 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003749
Tom St Denisadcec282016-04-15 13:08:44 -04003750 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3751 ent = debugfs_create_file(debugfs_regs_names[i],
3752 S_IFREG | S_IRUGO, root,
3753 adev, debugfs_regs[i]);
3754 if (IS_ERR(ent)) {
3755 for (j = 0; j < i; j++) {
3756 debugfs_remove(adev->debugfs_regs[i]);
3757 adev->debugfs_regs[i] = NULL;
3758 }
3759 return PTR_ERR(ent);
3760 }
3761
3762 if (!i)
3763 i_size_write(ent->d_inode, adev->rmmio_size);
3764 adev->debugfs_regs[i] = ent;
3765 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003766
3767 return 0;
3768}
3769
3770static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3771{
Tom St Denisadcec282016-04-15 13:08:44 -04003772 unsigned i;
3773
3774 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3775 if (adev->debugfs_regs[i]) {
3776 debugfs_remove(adev->debugfs_regs[i]);
3777 adev->debugfs_regs[i] = NULL;
3778 }
3779 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003780}
3781
Huang Rui4f0955f2017-05-10 23:04:06 +08003782static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3783{
3784 struct drm_info_node *node = (struct drm_info_node *) m->private;
3785 struct drm_device *dev = node->minor->dev;
3786 struct amdgpu_device *adev = dev->dev_private;
3787 int r = 0, i;
3788
3789 /* hold on the scheduler */
3790 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3791 struct amdgpu_ring *ring = adev->rings[i];
3792
3793 if (!ring || !ring->sched.thread)
3794 continue;
3795 kthread_park(ring->sched.thread);
3796 }
3797
3798 seq_printf(m, "run ib test:\n");
3799 r = amdgpu_ib_ring_tests(adev);
3800 if (r)
3801 seq_printf(m, "ib ring tests failed (%d).\n", r);
3802 else
3803 seq_printf(m, "ib ring tests passed.\n");
3804
3805 /* go on the scheduler */
3806 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3807 struct amdgpu_ring *ring = adev->rings[i];
3808
3809 if (!ring || !ring->sched.thread)
3810 continue;
3811 kthread_unpark(ring->sched.thread);
3812 }
3813
3814 return 0;
3815}
3816
3817static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3818 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3819};
3820
3821static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3822{
3823 return amdgpu_debugfs_add_files(adev,
3824 amdgpu_debugfs_test_ib_ring_list, 1);
3825}
3826
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003827int amdgpu_debugfs_init(struct drm_minor *minor)
3828{
3829 return 0;
3830}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003831#else
Arnd Bergmann27bad5b2017-06-21 23:51:02 +02003832static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
Huang Rui4f0955f2017-05-10 23:04:06 +08003833{
3834 return 0;
3835}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003836static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3837{
3838 return 0;
3839}
3840static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003841#endif