blob: 2fe1e0a20c171700f3d1464990bce91207f88f1f [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deuchera5bde2f2016-09-23 16:23:41 -040043#include "amdgpu_atomfirmware.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050044#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080045#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040048#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040051#include "vi.h"
Ken Wang460826e2017-03-06 14:53:16 -050052#include "soc15.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080054#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080055#include <linux/firmware.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040056
Alex Deuchere2a75f82017-04-27 16:58:01 -040057MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
Alex Deucher2d2e5e72017-05-09 12:27:35 -040058MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
Alex Deuchere2a75f82017-04-27 16:58:01 -040059
Shirish S2dc80b02017-05-25 10:05:25 +053060#define AMDGPU_RESUME_MS 2000
61
Alex Deucherd38ceaf2015-04-20 16:55:21 -040062static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
63static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
Huang Rui4f0955f2017-05-10 23:04:06 +080064static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040065
66static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080067 "TAHITI",
68 "PITCAIRN",
69 "VERDE",
70 "OLAND",
71 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040072 "BONAIRE",
73 "KAVERI",
74 "KABINI",
75 "HAWAII",
76 "MULLINS",
77 "TOPAZ",
78 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080079 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040080 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040081 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040082 "POLARIS10",
83 "POLARIS11",
Junwei Zhangc4642a42016-12-14 15:32:28 -050084 "POLARIS12",
Ken Wangd4196f02016-03-09 09:28:32 +080085 "VEGA10",
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +080086 "RAVEN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040087 "LAST",
88};
89
90bool amdgpu_device_is_px(struct drm_device *dev)
91{
92 struct amdgpu_device *adev = dev->dev_private;
93
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080094 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040095 return true;
96 return false;
97}
98
99/*
100 * MMIO register access helper functions.
101 */
102uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
Monk Liu15d72fd2017-01-25 15:07:40 +0800103 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400104{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400105 uint32_t ret;
106
Monk Liu15d72fd2017-01-25 15:07:40 +0800107 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800108 BUG_ON(in_interrupt());
109 return amdgpu_virt_kiq_rreg(adev, reg);
110 }
111
Monk Liu15d72fd2017-01-25 15:07:40 +0800112 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Tom St Denisf4b373f2016-05-31 08:02:27 -0400113 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400114 else {
115 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400116
117 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
118 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
119 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
120 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400121 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400122 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
123 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400124}
125
126void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
Monk Liu15d72fd2017-01-25 15:07:40 +0800127 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400128{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400129 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800130
Monk Liu15d72fd2017-01-25 15:07:40 +0800131 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800132 BUG_ON(in_interrupt());
133 return amdgpu_virt_kiq_wreg(adev, reg, v);
134 }
135
Monk Liu15d72fd2017-01-25 15:07:40 +0800136 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400137 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
138 else {
139 unsigned long flags;
140
141 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
142 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
143 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
144 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
145 }
146}
147
148u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
149{
150 if ((reg * 4) < adev->rio_mem_size)
151 return ioread32(adev->rio_mem + (reg * 4));
152 else {
153 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
154 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
155 }
156}
157
158void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
159{
160
161 if ((reg * 4) < adev->rio_mem_size)
162 iowrite32(v, adev->rio_mem + (reg * 4));
163 else {
164 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
165 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
166 }
167}
168
169/**
170 * amdgpu_mm_rdoorbell - read a doorbell dword
171 *
172 * @adev: amdgpu_device pointer
173 * @index: doorbell index
174 *
175 * Returns the value in the doorbell aperture at the
176 * requested doorbell index (CIK).
177 */
178u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
179{
180 if (index < adev->doorbell.num_doorbells) {
181 return readl(adev->doorbell.ptr + index);
182 } else {
183 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
184 return 0;
185 }
186}
187
188/**
189 * amdgpu_mm_wdoorbell - write a doorbell dword
190 *
191 * @adev: amdgpu_device pointer
192 * @index: doorbell index
193 * @v: value to write
194 *
195 * Writes @v to the doorbell aperture at the
196 * requested doorbell index (CIK).
197 */
198void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
199{
200 if (index < adev->doorbell.num_doorbells) {
201 writel(v, adev->doorbell.ptr + index);
202 } else {
203 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
204 }
205}
206
207/**
Ken Wang832be402016-03-18 15:23:08 +0800208 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
209 *
210 * @adev: amdgpu_device pointer
211 * @index: doorbell index
212 *
213 * Returns the value in the doorbell aperture at the
214 * requested doorbell index (VEGA10+).
215 */
216u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
217{
218 if (index < adev->doorbell.num_doorbells) {
219 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
220 } else {
221 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
222 return 0;
223 }
224}
225
226/**
227 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
228 *
229 * @adev: amdgpu_device pointer
230 * @index: doorbell index
231 * @v: value to write
232 *
233 * Writes @v to the doorbell aperture at the
234 * requested doorbell index (VEGA10+).
235 */
236void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
237{
238 if (index < adev->doorbell.num_doorbells) {
239 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
240 } else {
241 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
242 }
243}
244
245/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400246 * amdgpu_invalid_rreg - dummy reg read function
247 *
248 * @adev: amdgpu device pointer
249 * @reg: offset of register
250 *
251 * Dummy register read function. Used for register blocks
252 * that certain asics don't have (all asics).
253 * Returns the value in the register.
254 */
255static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
256{
257 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
258 BUG();
259 return 0;
260}
261
262/**
263 * amdgpu_invalid_wreg - dummy reg write function
264 *
265 * @adev: amdgpu device pointer
266 * @reg: offset of register
267 * @v: value to write to the register
268 *
269 * Dummy register read function. Used for register blocks
270 * that certain asics don't have (all asics).
271 */
272static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
273{
274 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
275 reg, v);
276 BUG();
277}
278
279/**
280 * amdgpu_block_invalid_rreg - dummy reg read function
281 *
282 * @adev: amdgpu device pointer
283 * @block: offset of instance
284 * @reg: offset of register
285 *
286 * Dummy register read function. Used for register blocks
287 * that certain asics don't have (all asics).
288 * Returns the value in the register.
289 */
290static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
291 uint32_t block, uint32_t reg)
292{
293 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
294 reg, block);
295 BUG();
296 return 0;
297}
298
299/**
300 * amdgpu_block_invalid_wreg - dummy reg write function
301 *
302 * @adev: amdgpu device pointer
303 * @block: offset of instance
304 * @reg: offset of register
305 * @v: value to write to the register
306 *
307 * Dummy register read function. Used for register blocks
308 * that certain asics don't have (all asics).
309 */
310static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
311 uint32_t block,
312 uint32_t reg, uint32_t v)
313{
314 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
315 reg, block, v);
316 BUG();
317}
318
319static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
320{
321 int r;
322
323 if (adev->vram_scratch.robj == NULL) {
324 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
Alex Deucher857d9132015-08-27 00:14:16 -0400325 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
Christian König03f48dd2016-08-15 17:00:22 +0200326 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
327 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
Christian König72d76682015-09-03 17:34:59 +0200328 NULL, NULL, &adev->vram_scratch.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400329 if (r) {
330 return r;
331 }
332 }
333
334 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
335 if (unlikely(r != 0))
336 return r;
337 r = amdgpu_bo_pin(adev->vram_scratch.robj,
338 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
339 if (r) {
340 amdgpu_bo_unreserve(adev->vram_scratch.robj);
341 return r;
342 }
343 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
344 (void **)&adev->vram_scratch.ptr);
345 if (r)
346 amdgpu_bo_unpin(adev->vram_scratch.robj);
347 amdgpu_bo_unreserve(adev->vram_scratch.robj);
348
349 return r;
350}
351
352static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
353{
354 int r;
355
356 if (adev->vram_scratch.robj == NULL) {
357 return;
358 }
Alex Xie8ab25b42017-04-24 13:30:43 -0400359 r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400360 if (likely(r == 0)) {
361 amdgpu_bo_kunmap(adev->vram_scratch.robj);
362 amdgpu_bo_unpin(adev->vram_scratch.robj);
363 amdgpu_bo_unreserve(adev->vram_scratch.robj);
364 }
365 amdgpu_bo_unref(&adev->vram_scratch.robj);
366}
367
368/**
369 * amdgpu_program_register_sequence - program an array of registers.
370 *
371 * @adev: amdgpu_device pointer
372 * @registers: pointer to the register array
373 * @array_size: size of the register array
374 *
375 * Programs an array or registers with and and or masks.
376 * This is a helper for setting golden registers.
377 */
378void amdgpu_program_register_sequence(struct amdgpu_device *adev,
379 const u32 *registers,
380 const u32 array_size)
381{
382 u32 tmp, reg, and_mask, or_mask;
383 int i;
384
385 if (array_size % 3)
386 return;
387
388 for (i = 0; i < array_size; i +=3) {
389 reg = registers[i + 0];
390 and_mask = registers[i + 1];
391 or_mask = registers[i + 2];
392
393 if (and_mask == 0xffffffff) {
394 tmp = or_mask;
395 } else {
396 tmp = RREG32(reg);
397 tmp &= ~and_mask;
398 tmp |= or_mask;
399 }
400 WREG32(reg, tmp);
401 }
402}
403
404void amdgpu_pci_config_reset(struct amdgpu_device *adev)
405{
406 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
407}
408
409/*
410 * GPU doorbell aperture helpers function.
411 */
412/**
413 * amdgpu_doorbell_init - Init doorbell driver information.
414 *
415 * @adev: amdgpu_device pointer
416 *
417 * Init doorbell driver information (CIK)
418 * Returns 0 on success, error on failure.
419 */
420static int amdgpu_doorbell_init(struct amdgpu_device *adev)
421{
422 /* doorbell bar mapping */
423 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
424 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
425
Christian Königedf600d2016-05-03 15:54:54 +0200426 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400427 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
428 if (adev->doorbell.num_doorbells == 0)
429 return -EINVAL;
430
Christian König8972e5d2017-03-06 13:34:57 +0100431 adev->doorbell.ptr = ioremap(adev->doorbell.base,
432 adev->doorbell.num_doorbells *
433 sizeof(u32));
434 if (adev->doorbell.ptr == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400435 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400436
437 return 0;
438}
439
440/**
441 * amdgpu_doorbell_fini - Tear down doorbell driver information.
442 *
443 * @adev: amdgpu_device pointer
444 *
445 * Tear down doorbell driver information (CIK)
446 */
447static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
448{
449 iounmap(adev->doorbell.ptr);
450 adev->doorbell.ptr = NULL;
451}
452
453/**
454 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
455 * setup amdkfd
456 *
457 * @adev: amdgpu_device pointer
458 * @aperture_base: output returning doorbell aperture base physical address
459 * @aperture_size: output returning doorbell aperture size in bytes
460 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
461 *
462 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
463 * takes doorbells required for its own rings and reports the setup to amdkfd.
464 * amdgpu reserved doorbells are at the start of the doorbell aperture.
465 */
466void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
467 phys_addr_t *aperture_base,
468 size_t *aperture_size,
469 size_t *start_offset)
470{
471 /*
472 * The first num_doorbells are used by amdgpu.
473 * amdkfd takes whatever's left in the aperture.
474 */
475 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
476 *aperture_base = adev->doorbell.base;
477 *aperture_size = adev->doorbell.size;
478 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
479 } else {
480 *aperture_base = 0;
481 *aperture_size = 0;
482 *start_offset = 0;
483 }
484}
485
486/*
487 * amdgpu_wb_*()
Alex Xie455a7bc2017-05-08 21:36:03 -0400488 * Writeback is the method by which the GPU updates special pages in memory
Alex Xieea81a172017-05-08 13:41:11 -0400489 * with the status of certain GPU events (fences, ring pointers,etc.).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400490 */
491
492/**
493 * amdgpu_wb_fini - Disable Writeback and free memory
494 *
495 * @adev: amdgpu_device pointer
496 *
497 * Disables Writeback and frees the Writeback memory (all asics).
498 * Used at driver shutdown.
499 */
500static void amdgpu_wb_fini(struct amdgpu_device *adev)
501{
502 if (adev->wb.wb_obj) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400503 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
504 &adev->wb.gpu_addr,
505 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400506 adev->wb.wb_obj = NULL;
507 }
508}
509
510/**
511 * amdgpu_wb_init- Init Writeback driver info and allocate memory
512 *
513 * @adev: amdgpu_device pointer
514 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400515 * Initializes writeback and allocates writeback memory (all asics).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400516 * Used at driver startup.
517 * Returns 0 on success or an -error on failure.
518 */
519static int amdgpu_wb_init(struct amdgpu_device *adev)
520{
521 int r;
522
523 if (adev->wb.wb_obj == NULL) {
Huang Rui60a970a62017-03-15 10:13:32 +0800524 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
Alex Deuchera76ed482016-10-21 15:30:36 -0400525 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
526 &adev->wb.wb_obj, &adev->wb.gpu_addr,
527 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400528 if (r) {
529 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
530 return r;
531 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400532
533 adev->wb.num_wb = AMDGPU_MAX_WB;
534 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
535
536 /* clear wb memory */
Huang Rui60a970a62017-03-15 10:13:32 +0800537 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400538 }
539
540 return 0;
541}
542
543/**
544 * amdgpu_wb_get - Allocate a wb entry
545 *
546 * @adev: amdgpu_device pointer
547 * @wb: wb index
548 *
549 * Allocate a wb slot for use by the driver (all asics).
550 * Returns 0 on success or -EINVAL on failure.
551 */
552int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
553{
554 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
555 if (offset < adev->wb.num_wb) {
556 __set_bit(offset, adev->wb.used);
557 *wb = offset;
558 return 0;
559 } else {
560 return -EINVAL;
561 }
562}
563
564/**
Ken Wang70142852016-03-18 15:08:49 +0800565 * amdgpu_wb_get_64bit - Allocate a wb entry
566 *
567 * @adev: amdgpu_device pointer
568 * @wb: wb index
569 *
570 * Allocate a wb slot for use by the driver (all asics).
571 * Returns 0 on success or -EINVAL on failure.
572 */
573int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
574{
575 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
576 adev->wb.num_wb, 0, 2, 7, 0);
577 if ((offset + 1) < adev->wb.num_wb) {
578 __set_bit(offset, adev->wb.used);
579 __set_bit(offset + 1, adev->wb.used);
580 *wb = offset;
581 return 0;
582 } else {
583 return -EINVAL;
584 }
585}
586
587/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400588 * amdgpu_wb_free - Free a wb entry
589 *
590 * @adev: amdgpu_device pointer
591 * @wb: wb index
592 *
593 * Free a wb slot allocated for use by the driver (all asics)
594 */
595void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
596{
597 if (wb < adev->wb.num_wb)
598 __clear_bit(wb, adev->wb.used);
599}
600
601/**
Ken Wang70142852016-03-18 15:08:49 +0800602 * amdgpu_wb_free_64bit - Free a wb entry
603 *
604 * @adev: amdgpu_device pointer
605 * @wb: wb index
606 *
607 * Free a wb slot allocated for use by the driver (all asics)
608 */
609void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
610{
611 if ((wb + 1) < adev->wb.num_wb) {
612 __clear_bit(wb, adev->wb.used);
613 __clear_bit(wb + 1, adev->wb.used);
614 }
615}
616
617/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400618 * amdgpu_vram_location - try to find VRAM location
619 * @adev: amdgpu device structure holding all necessary informations
620 * @mc: memory controller structure holding memory informations
621 * @base: base address at which to put VRAM
622 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400623 * Function will try to place VRAM at base address provided
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400624 * as parameter (which is so far either PCI aperture address or
625 * for IGP TOM base address).
626 *
627 * If there is not enough space to fit the unvisible VRAM in the 32bits
628 * address space then we limit the VRAM size to the aperture.
629 *
630 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
631 * this shouldn't be a problem as we are using the PCI aperture as a reference.
632 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
633 * not IGP.
634 *
635 * Note: we use mc_vram_size as on some board we need to program the mc to
636 * cover the whole aperture even if VRAM size is inferior to aperture size
637 * Novell bug 204882 + along with lots of ubuntu ones
638 *
639 * Note: when limiting vram it's safe to overwritte real_vram_size because
640 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
641 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
642 * ones)
643 *
644 * Note: IGP TOM addr should be the same as the aperture addr, we don't
Alex Xie455a7bc2017-05-08 21:36:03 -0400645 * explicitly check for that though.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400646 *
647 * FIXME: when reducing VRAM size align new size on power of 2.
648 */
649void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
650{
651 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
652
653 mc->vram_start = base;
654 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
655 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
656 mc->real_vram_size = mc->aper_size;
657 mc->mc_vram_size = mc->aper_size;
658 }
659 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
660 if (limit && limit < mc->real_vram_size)
661 mc->real_vram_size = limit;
662 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
663 mc->mc_vram_size >> 20, mc->vram_start,
664 mc->vram_end, mc->real_vram_size >> 20);
665}
666
667/**
668 * amdgpu_gtt_location - try to find GTT location
669 * @adev: amdgpu device structure holding all necessary informations
670 * @mc: memory controller structure holding memory informations
671 *
672 * Function will place try to place GTT before or after VRAM.
673 *
674 * If GTT size is bigger than space left then we ajust GTT size.
675 * Thus function will never fails.
676 *
677 * FIXME: when reducing GTT size align new size on power of 2.
678 */
679void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
680{
681 u64 size_af, size_bf;
682
683 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
684 size_bf = mc->vram_start & ~mc->gtt_base_align;
685 if (size_bf > size_af) {
686 if (mc->gtt_size > size_bf) {
687 dev_warn(adev->dev, "limiting GTT\n");
688 mc->gtt_size = size_bf;
689 }
Alex Deucher9dc5a912016-11-17 15:40:22 -0500690 mc->gtt_start = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400691 } else {
692 if (mc->gtt_size > size_af) {
693 dev_warn(adev->dev, "limiting GTT\n");
694 mc->gtt_size = size_af;
695 }
696 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
697 }
698 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
699 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
700 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
701}
702
703/*
704 * GPU helpers function.
705 */
706/**
Jim Quc836fec2017-02-10 15:59:59 +0800707 * amdgpu_need_post - check if the hw need post or not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400708 *
709 * @adev: amdgpu_device pointer
710 *
Jim Quc836fec2017-02-10 15:59:59 +0800711 * Check if the asic has been initialized (all asics) at driver startup
712 * or post is needed if hw reset is performed.
713 * Returns true if need or false if not.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400714 */
Jim Quc836fec2017-02-10 15:59:59 +0800715bool amdgpu_need_post(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400716{
717 uint32_t reg;
718
Jim Quc836fec2017-02-10 15:59:59 +0800719 if (adev->has_hw_reset) {
720 adev->has_hw_reset = false;
721 return true;
722 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400723 /* then check MEM_SIZE, in case the crtcs are off */
Alex Deucherbbf282d2017-03-03 17:26:10 -0500724 reg = amdgpu_asic_get_config_memsize(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400725
Alex Deucherf2713e82017-03-28 12:19:31 -0400726 if ((reg != 0) && (reg != 0xffffffff))
Jim Quc836fec2017-02-10 15:59:59 +0800727 return false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400728
Jim Quc836fec2017-02-10 15:59:59 +0800729 return true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400730
731}
732
Monk Liubec86372016-09-14 19:38:08 +0800733static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
734{
735 if (amdgpu_sriov_vf(adev))
736 return false;
737
738 if (amdgpu_passthrough(adev)) {
Monk Liu1da2c322016-11-11 11:24:29 +0800739 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
740 * some old smc fw still need driver do vPost otherwise gpu hang, while
741 * those smc fw version above 22.15 doesn't have this flaw, so we force
742 * vpost executed for smc version below 22.15
Monk Liubec86372016-09-14 19:38:08 +0800743 */
744 if (adev->asic_type == CHIP_FIJI) {
745 int err;
746 uint32_t fw_ver;
747 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
748 /* force vPost if error occured */
749 if (err)
750 return true;
751
752 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
Monk Liu1da2c322016-11-11 11:24:29 +0800753 if (fw_ver < 0x00160e00)
754 return true;
Monk Liubec86372016-09-14 19:38:08 +0800755 }
Monk Liubec86372016-09-14 19:38:08 +0800756 }
Jim Quc836fec2017-02-10 15:59:59 +0800757 return amdgpu_need_post(adev);
Monk Liubec86372016-09-14 19:38:08 +0800758}
759
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400760/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400761 * amdgpu_dummy_page_init - init dummy page used by the driver
762 *
763 * @adev: amdgpu_device pointer
764 *
765 * Allocate the dummy page used by the driver (all asics).
766 * This dummy page is used by the driver as a filler for gart entries
767 * when pages are taken out of the GART
768 * Returns 0 on sucess, -ENOMEM on failure.
769 */
770int amdgpu_dummy_page_init(struct amdgpu_device *adev)
771{
772 if (adev->dummy_page.page)
773 return 0;
774 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
775 if (adev->dummy_page.page == NULL)
776 return -ENOMEM;
777 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
778 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
779 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
780 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
781 __free_page(adev->dummy_page.page);
782 adev->dummy_page.page = NULL;
783 return -ENOMEM;
784 }
785 return 0;
786}
787
788/**
789 * amdgpu_dummy_page_fini - free dummy page used by the driver
790 *
791 * @adev: amdgpu_device pointer
792 *
793 * Frees the dummy page used by the driver (all asics).
794 */
795void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
796{
797 if (adev->dummy_page.page == NULL)
798 return;
799 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
800 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
801 __free_page(adev->dummy_page.page);
802 adev->dummy_page.page = NULL;
803}
804
805
806/* ATOM accessor methods */
807/*
808 * ATOM is an interpreted byte code stored in tables in the vbios. The
809 * driver registers callbacks to access registers and the interpreter
810 * in the driver parses the tables and executes then to program specific
811 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
812 * atombios.h, and atom.c
813 */
814
815/**
816 * cail_pll_read - read PLL register
817 *
818 * @info: atom card_info pointer
819 * @reg: PLL register offset
820 *
821 * Provides a PLL register accessor for the atom interpreter (r4xx+).
822 * Returns the value of the PLL register.
823 */
824static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
825{
826 return 0;
827}
828
829/**
830 * cail_pll_write - write PLL register
831 *
832 * @info: atom card_info pointer
833 * @reg: PLL register offset
834 * @val: value to write to the pll register
835 *
836 * Provides a PLL register accessor for the atom interpreter (r4xx+).
837 */
838static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
839{
840
841}
842
843/**
844 * cail_mc_read - read MC (Memory Controller) register
845 *
846 * @info: atom card_info pointer
847 * @reg: MC register offset
848 *
849 * Provides an MC register accessor for the atom interpreter (r4xx+).
850 * Returns the value of the MC register.
851 */
852static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
853{
854 return 0;
855}
856
857/**
858 * cail_mc_write - write MC (Memory Controller) register
859 *
860 * @info: atom card_info pointer
861 * @reg: MC register offset
862 * @val: value to write to the pll register
863 *
864 * Provides a MC register accessor for the atom interpreter (r4xx+).
865 */
866static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
867{
868
869}
870
871/**
872 * cail_reg_write - write MMIO register
873 *
874 * @info: atom card_info pointer
875 * @reg: MMIO register offset
876 * @val: value to write to the pll register
877 *
878 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
879 */
880static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
881{
882 struct amdgpu_device *adev = info->dev->dev_private;
883
884 WREG32(reg, val);
885}
886
887/**
888 * cail_reg_read - read MMIO register
889 *
890 * @info: atom card_info pointer
891 * @reg: MMIO register offset
892 *
893 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
894 * Returns the value of the MMIO register.
895 */
896static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
897{
898 struct amdgpu_device *adev = info->dev->dev_private;
899 uint32_t r;
900
901 r = RREG32(reg);
902 return r;
903}
904
905/**
906 * cail_ioreg_write - write IO register
907 *
908 * @info: atom card_info pointer
909 * @reg: IO register offset
910 * @val: value to write to the pll register
911 *
912 * Provides a IO register accessor for the atom interpreter (r4xx+).
913 */
914static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
915{
916 struct amdgpu_device *adev = info->dev->dev_private;
917
918 WREG32_IO(reg, val);
919}
920
921/**
922 * cail_ioreg_read - read IO register
923 *
924 * @info: atom card_info pointer
925 * @reg: IO register offset
926 *
927 * Provides an IO register accessor for the atom interpreter (r4xx+).
928 * Returns the value of the IO register.
929 */
930static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
931{
932 struct amdgpu_device *adev = info->dev->dev_private;
933 uint32_t r;
934
935 r = RREG32_IO(reg);
936 return r;
937}
938
939/**
940 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
941 *
942 * @adev: amdgpu_device pointer
943 *
944 * Frees the driver info and register access callbacks for the ATOM
945 * interpreter (r4xx+).
946 * Called at driver shutdown.
947 */
948static void amdgpu_atombios_fini(struct amdgpu_device *adev)
949{
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800950 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400951 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800952 kfree(adev->mode_info.atom_context->iio);
953 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400954 kfree(adev->mode_info.atom_context);
955 adev->mode_info.atom_context = NULL;
956 kfree(adev->mode_info.atom_card_info);
957 adev->mode_info.atom_card_info = NULL;
958}
959
960/**
961 * amdgpu_atombios_init - init the driver info and callbacks for atombios
962 *
963 * @adev: amdgpu_device pointer
964 *
965 * Initializes the driver info and register access callbacks for the
966 * ATOM interpreter (r4xx+).
967 * Returns 0 on sucess, -ENOMEM on failure.
968 * Called at driver startup.
969 */
970static int amdgpu_atombios_init(struct amdgpu_device *adev)
971{
972 struct card_info *atom_card_info =
973 kzalloc(sizeof(struct card_info), GFP_KERNEL);
974
975 if (!atom_card_info)
976 return -ENOMEM;
977
978 adev->mode_info.atom_card_info = atom_card_info;
979 atom_card_info->dev = adev->ddev;
980 atom_card_info->reg_read = cail_reg_read;
981 atom_card_info->reg_write = cail_reg_write;
982 /* needed for iio ops */
983 if (adev->rio_mem) {
984 atom_card_info->ioreg_read = cail_ioreg_read;
985 atom_card_info->ioreg_write = cail_ioreg_write;
986 } else {
Amber Linb64a18c2017-01-04 08:06:58 -0500987 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400988 atom_card_info->ioreg_read = cail_reg_read;
989 atom_card_info->ioreg_write = cail_reg_write;
990 }
991 atom_card_info->mc_read = cail_mc_read;
992 atom_card_info->mc_write = cail_mc_write;
993 atom_card_info->pll_read = cail_pll_read;
994 atom_card_info->pll_write = cail_pll_write;
995
996 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
997 if (!adev->mode_info.atom_context) {
998 amdgpu_atombios_fini(adev);
999 return -ENOMEM;
1000 }
1001
1002 mutex_init(&adev->mode_info.atom_context->mutex);
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001003 if (adev->is_atom_fw) {
1004 amdgpu_atomfirmware_scratch_regs_init(adev);
1005 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1006 } else {
1007 amdgpu_atombios_scratch_regs_init(adev);
1008 amdgpu_atombios_allocate_fb_scratch(adev);
1009 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001010 return 0;
1011}
1012
1013/* if we get transitioned to only one device, take VGA back */
1014/**
1015 * amdgpu_vga_set_decode - enable/disable vga decode
1016 *
1017 * @cookie: amdgpu_device pointer
1018 * @state: enable/disable vga decode
1019 *
1020 * Enable/disable vga decode (all asics).
1021 * Returns VGA resource flags.
1022 */
1023static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1024{
1025 struct amdgpu_device *adev = cookie;
1026 amdgpu_asic_set_vga_state(adev, state);
1027 if (state)
1028 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1029 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1030 else
1031 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1032}
1033
1034/**
1035 * amdgpu_check_pot_argument - check that argument is a power of two
1036 *
1037 * @arg: value to check
1038 *
1039 * Validates that a certain argument is a power of two (all asics).
1040 * Returns true if argument is valid.
1041 */
1042static bool amdgpu_check_pot_argument(int arg)
1043{
1044 return (arg & (arg - 1)) == 0;
1045}
1046
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001047static void amdgpu_check_block_size(struct amdgpu_device *adev)
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001048{
1049 /* defines number of bits in page table versus page directory,
1050 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1051 * page table and the remaining bits are in the page directory */
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001052 if (amdgpu_vm_block_size == -1)
1053 return;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001054
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001055 if (amdgpu_vm_block_size < 9) {
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001056 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1057 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001058 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001059 }
1060
1061 if (amdgpu_vm_block_size > 24 ||
1062 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1063 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1064 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001065 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001066 }
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001067
1068 return;
1069
1070def_value:
1071 amdgpu_vm_block_size = -1;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001072}
1073
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001074static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1075{
Alex Deucher64dab072017-06-15 18:20:09 -04001076 /* no need to check the default value */
1077 if (amdgpu_vm_size == -1)
1078 return;
1079
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001080 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1081 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1082 amdgpu_vm_size);
1083 goto def_value;
1084 }
1085
1086 if (amdgpu_vm_size < 1) {
1087 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1088 amdgpu_vm_size);
1089 goto def_value;
1090 }
1091
1092 /*
1093 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1094 */
1095 if (amdgpu_vm_size > 1024) {
1096 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1097 amdgpu_vm_size);
1098 goto def_value;
1099 }
1100
1101 return;
1102
1103def_value:
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001104 amdgpu_vm_size = -1;
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001105}
1106
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001107/**
1108 * amdgpu_check_arguments - validate module params
1109 *
1110 * @adev: amdgpu_device pointer
1111 *
1112 * Validates certain module parameters and updates
1113 * the associated values used by the driver (all asics).
1114 */
1115static void amdgpu_check_arguments(struct amdgpu_device *adev)
1116{
Chunming Zhou5b011232015-12-10 17:34:33 +08001117 if (amdgpu_sched_jobs < 4) {
1118 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1119 amdgpu_sched_jobs);
1120 amdgpu_sched_jobs = 4;
1121 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
1122 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1123 amdgpu_sched_jobs);
1124 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1125 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001126
1127 if (amdgpu_gart_size != -1) {
Christian Königc4e1a132016-03-17 16:25:15 +01001128 /* gtt size must be greater or equal to 32M */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001129 if (amdgpu_gart_size < 32) {
1130 dev_warn(adev->dev, "gart size (%d) too small\n",
1131 amdgpu_gart_size);
1132 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001133 }
1134 }
1135
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001136 amdgpu_check_vm_size(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001137
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001138 amdgpu_check_block_size(adev);
Christian König6a7f76e2016-08-24 15:51:49 +02001139
jimqu526bae32016-11-07 09:53:10 +08001140 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1141 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
Christian König6a7f76e2016-08-24 15:51:49 +02001142 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1143 amdgpu_vram_page_split);
1144 amdgpu_vram_page_split = 1024;
1145 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001146}
1147
1148/**
1149 * amdgpu_switcheroo_set_state - set switcheroo state
1150 *
1151 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001152 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001153 *
1154 * Callback for the switcheroo driver. Suspends or resumes the
1155 * the asics before or after it is powered up using ACPI methods.
1156 */
1157static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1158{
1159 struct drm_device *dev = pci_get_drvdata(pdev);
1160
1161 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1162 return;
1163
1164 if (state == VGA_SWITCHEROO_ON) {
1165 unsigned d3_delay = dev->pdev->d3_delay;
1166
Joe Perches7ca85292017-02-28 04:55:52 -08001167 pr_info("amdgpu: switched on\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001168 /* don't suspend or resume card normally */
1169 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1170
Alex Deucher810ddc32016-08-23 13:25:49 -04001171 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001172
1173 dev->pdev->d3_delay = d3_delay;
1174
1175 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1176 drm_kms_helper_poll_enable(dev);
1177 } else {
Joe Perches7ca85292017-02-28 04:55:52 -08001178 pr_info("amdgpu: switched off\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001179 drm_kms_helper_poll_disable(dev);
1180 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001181 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001182 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1183 }
1184}
1185
1186/**
1187 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1188 *
1189 * @pdev: pci dev pointer
1190 *
1191 * Callback for the switcheroo driver. Check of the switcheroo
1192 * state can be changed.
1193 * Returns true if the state can be changed, false if not.
1194 */
1195static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1196{
1197 struct drm_device *dev = pci_get_drvdata(pdev);
1198
1199 /*
1200 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1201 * locking inversion with the driver load path. And the access here is
1202 * completely racy anyway. So don't bother with locking for now.
1203 */
1204 return dev->open_count == 0;
1205}
1206
1207static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1208 .set_gpu_state = amdgpu_switcheroo_set_state,
1209 .reprobe = NULL,
1210 .can_switch = amdgpu_switcheroo_can_switch,
1211};
1212
1213int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001214 enum amd_ip_block_type block_type,
1215 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001216{
1217 int i, r = 0;
1218
1219 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001220 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001221 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001222 if (adev->ip_blocks[i].version->type != block_type)
1223 continue;
1224 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1225 continue;
1226 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1227 (void *)adev, state);
1228 if (r)
1229 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1230 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001231 }
1232 return r;
1233}
1234
1235int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001236 enum amd_ip_block_type block_type,
1237 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001238{
1239 int i, r = 0;
1240
1241 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001242 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001243 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001244 if (adev->ip_blocks[i].version->type != block_type)
1245 continue;
1246 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1247 continue;
1248 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1249 (void *)adev, state);
1250 if (r)
1251 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1252 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001253 }
1254 return r;
1255}
1256
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001257void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1258{
1259 int i;
1260
1261 for (i = 0; i < adev->num_ip_blocks; i++) {
1262 if (!adev->ip_blocks[i].status.valid)
1263 continue;
1264 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1265 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1266 }
1267}
1268
Alex Deucher5dbbb602016-06-23 11:41:04 -04001269int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1270 enum amd_ip_block_type block_type)
1271{
1272 int i, r;
1273
1274 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001275 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001276 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001277 if (adev->ip_blocks[i].version->type == block_type) {
1278 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001279 if (r)
1280 return r;
1281 break;
1282 }
1283 }
1284 return 0;
1285
1286}
1287
1288bool amdgpu_is_idle(struct amdgpu_device *adev,
1289 enum amd_ip_block_type block_type)
1290{
1291 int i;
1292
1293 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001294 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001295 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001296 if (adev->ip_blocks[i].version->type == block_type)
1297 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001298 }
1299 return true;
1300
1301}
1302
Alex Deuchera1255102016-10-13 17:41:13 -04001303struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1304 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001305{
1306 int i;
1307
1308 for (i = 0; i < adev->num_ip_blocks; i++)
Alex Deuchera1255102016-10-13 17:41:13 -04001309 if (adev->ip_blocks[i].version->type == type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001310 return &adev->ip_blocks[i];
1311
1312 return NULL;
1313}
1314
1315/**
1316 * amdgpu_ip_block_version_cmp
1317 *
1318 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001319 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001320 * @major: major version
1321 * @minor: minor version
1322 *
1323 * return 0 if equal or greater
1324 * return 1 if smaller or the ip_block doesn't exist
1325 */
1326int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001327 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001328 u32 major, u32 minor)
1329{
Alex Deuchera1255102016-10-13 17:41:13 -04001330 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001331
Alex Deuchera1255102016-10-13 17:41:13 -04001332 if (ip_block && ((ip_block->version->major > major) ||
1333 ((ip_block->version->major == major) &&
1334 (ip_block->version->minor >= minor))))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001335 return 0;
1336
1337 return 1;
1338}
1339
Alex Deuchera1255102016-10-13 17:41:13 -04001340/**
1341 * amdgpu_ip_block_add
1342 *
1343 * @adev: amdgpu_device pointer
1344 * @ip_block_version: pointer to the IP to add
1345 *
1346 * Adds the IP block driver information to the collection of IPs
1347 * on the asic.
1348 */
1349int amdgpu_ip_block_add(struct amdgpu_device *adev,
1350 const struct amdgpu_ip_block_version *ip_block_version)
1351{
1352 if (!ip_block_version)
1353 return -EINVAL;
1354
Huang Ruia0bae352017-05-03 09:52:06 +08001355 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1356 ip_block_version->funcs->name);
1357
Alex Deuchera1255102016-10-13 17:41:13 -04001358 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1359
1360 return 0;
1361}
1362
Alex Deucher483ef982016-09-30 12:43:04 -04001363static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
Emily Deng9accf2f2016-08-10 16:01:25 +08001364{
1365 adev->enable_virtual_display = false;
1366
1367 if (amdgpu_virtual_display) {
1368 struct drm_device *ddev = adev->ddev;
1369 const char *pci_address_name = pci_name(ddev->pdev);
Emily Deng0f663562016-09-30 13:02:18 -04001370 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
Emily Deng9accf2f2016-08-10 16:01:25 +08001371
1372 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1373 pciaddstr_tmp = pciaddstr;
Emily Deng0f663562016-09-30 13:02:18 -04001374 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1375 pciaddname = strsep(&pciaddname_tmp, ",");
Yintian Tao967de2a2017-01-22 15:16:51 +08001376 if (!strcmp("all", pciaddname)
1377 || !strcmp(pci_address_name, pciaddname)) {
Emily Deng0f663562016-09-30 13:02:18 -04001378 long num_crtc;
1379 int res = -1;
1380
Emily Deng9accf2f2016-08-10 16:01:25 +08001381 adev->enable_virtual_display = true;
Emily Deng0f663562016-09-30 13:02:18 -04001382
1383 if (pciaddname_tmp)
1384 res = kstrtol(pciaddname_tmp, 10,
1385 &num_crtc);
1386
1387 if (!res) {
1388 if (num_crtc < 1)
1389 num_crtc = 1;
1390 if (num_crtc > 6)
1391 num_crtc = 6;
1392 adev->mode_info.num_crtc = num_crtc;
1393 } else {
1394 adev->mode_info.num_crtc = 1;
1395 }
Emily Deng9accf2f2016-08-10 16:01:25 +08001396 break;
1397 }
1398 }
1399
Emily Deng0f663562016-09-30 13:02:18 -04001400 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1401 amdgpu_virtual_display, pci_address_name,
1402 adev->enable_virtual_display, adev->mode_info.num_crtc);
Emily Deng9accf2f2016-08-10 16:01:25 +08001403
1404 kfree(pciaddstr);
1405 }
1406}
1407
Alex Deuchere2a75f82017-04-27 16:58:01 -04001408static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1409{
Alex Deuchere2a75f82017-04-27 16:58:01 -04001410 const char *chip_name;
1411 char fw_name[30];
1412 int err;
1413 const struct gpu_info_firmware_header_v1_0 *hdr;
1414
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001415 adev->firmware.gpu_info_fw = NULL;
1416
Alex Deuchere2a75f82017-04-27 16:58:01 -04001417 switch (adev->asic_type) {
1418 case CHIP_TOPAZ:
1419 case CHIP_TONGA:
1420 case CHIP_FIJI:
1421 case CHIP_POLARIS11:
1422 case CHIP_POLARIS10:
1423 case CHIP_POLARIS12:
1424 case CHIP_CARRIZO:
1425 case CHIP_STONEY:
1426#ifdef CONFIG_DRM_AMDGPU_SI
1427 case CHIP_VERDE:
1428 case CHIP_TAHITI:
1429 case CHIP_PITCAIRN:
1430 case CHIP_OLAND:
1431 case CHIP_HAINAN:
1432#endif
1433#ifdef CONFIG_DRM_AMDGPU_CIK
1434 case CHIP_BONAIRE:
1435 case CHIP_HAWAII:
1436 case CHIP_KAVERI:
1437 case CHIP_KABINI:
1438 case CHIP_MULLINS:
1439#endif
1440 default:
1441 return 0;
1442 case CHIP_VEGA10:
1443 chip_name = "vega10";
1444 break;
Alex Deucher2d2e5e72017-05-09 12:27:35 -04001445 case CHIP_RAVEN:
1446 chip_name = "raven";
1447 break;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001448 }
1449
1450 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001451 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001452 if (err) {
1453 dev_err(adev->dev,
1454 "Failed to load gpu_info firmware \"%s\"\n",
1455 fw_name);
1456 goto out;
1457 }
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001458 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001459 if (err) {
1460 dev_err(adev->dev,
1461 "Failed to validate gpu_info firmware \"%s\"\n",
1462 fw_name);
1463 goto out;
1464 }
1465
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001466 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001467 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1468
1469 switch (hdr->version_major) {
1470 case 1:
1471 {
1472 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001473 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
Alex Deuchere2a75f82017-04-27 16:58:01 -04001474 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1475
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001476 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1477 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1478 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1479 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001480 adev->gfx.config.max_texture_channel_caches =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001481 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1482 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1483 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1484 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1485 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001486 adev->gfx.config.double_offchip_lds_buf =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001487 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1488 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
Hawking Zhang51fd0372017-06-09 22:30:52 +08001489 adev->gfx.cu_info.max_waves_per_simd =
1490 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1491 adev->gfx.cu_info.max_scratch_slots_per_cu =
1492 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1493 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001494 break;
1495 }
1496 default:
1497 dev_err(adev->dev,
1498 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1499 err = -EINVAL;
1500 goto out;
1501 }
1502out:
Alex Deuchere2a75f82017-04-27 16:58:01 -04001503 return err;
1504}
1505
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001506static int amdgpu_early_init(struct amdgpu_device *adev)
1507{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001508 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001509
Alex Deucher483ef982016-09-30 12:43:04 -04001510 amdgpu_device_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001511
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001512 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001513 case CHIP_TOPAZ:
1514 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001515 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001516 case CHIP_POLARIS11:
1517 case CHIP_POLARIS10:
Junwei Zhangc4642a42016-12-14 15:32:28 -05001518 case CHIP_POLARIS12:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001519 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001520 case CHIP_STONEY:
1521 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001522 adev->family = AMDGPU_FAMILY_CZ;
1523 else
1524 adev->family = AMDGPU_FAMILY_VI;
1525
1526 r = vi_set_ip_blocks(adev);
1527 if (r)
1528 return r;
1529 break;
Ken Wang33f34802016-01-21 17:29:41 +08001530#ifdef CONFIG_DRM_AMDGPU_SI
1531 case CHIP_VERDE:
1532 case CHIP_TAHITI:
1533 case CHIP_PITCAIRN:
1534 case CHIP_OLAND:
1535 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001536 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001537 r = si_set_ip_blocks(adev);
1538 if (r)
1539 return r;
1540 break;
1541#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001542#ifdef CONFIG_DRM_AMDGPU_CIK
1543 case CHIP_BONAIRE:
1544 case CHIP_HAWAII:
1545 case CHIP_KAVERI:
1546 case CHIP_KABINI:
1547 case CHIP_MULLINS:
1548 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1549 adev->family = AMDGPU_FAMILY_CI;
1550 else
1551 adev->family = AMDGPU_FAMILY_KV;
1552
1553 r = cik_set_ip_blocks(adev);
1554 if (r)
1555 return r;
1556 break;
1557#endif
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +08001558 case CHIP_VEGA10:
1559 case CHIP_RAVEN:
1560 if (adev->asic_type == CHIP_RAVEN)
1561 adev->family = AMDGPU_FAMILY_RV;
1562 else
1563 adev->family = AMDGPU_FAMILY_AI;
Ken Wang460826e2017-03-06 14:53:16 -05001564
1565 r = soc15_set_ip_blocks(adev);
1566 if (r)
1567 return r;
1568 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001569 default:
1570 /* FIXME: not supported yet */
1571 return -EINVAL;
1572 }
1573
Alex Deuchere2a75f82017-04-27 16:58:01 -04001574 r = amdgpu_device_parse_gpu_info_fw(adev);
1575 if (r)
1576 return r;
1577
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001578 if (amdgpu_sriov_vf(adev)) {
1579 r = amdgpu_virt_request_full_gpu(adev, true);
1580 if (r)
1581 return r;
1582 }
1583
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001584 for (i = 0; i < adev->num_ip_blocks; i++) {
1585 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
Huang Ruied8cf002017-05-03 09:40:17 +08001586 DRM_ERROR("disabled ip block: %d <%s>\n",
1587 i, adev->ip_blocks[i].version->funcs->name);
Alex Deuchera1255102016-10-13 17:41:13 -04001588 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001589 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001590 if (adev->ip_blocks[i].version->funcs->early_init) {
1591 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001592 if (r == -ENOENT) {
Alex Deuchera1255102016-10-13 17:41:13 -04001593 adev->ip_blocks[i].status.valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001594 } else if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001595 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1596 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001597 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001598 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001599 adev->ip_blocks[i].status.valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001600 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001601 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001602 adev->ip_blocks[i].status.valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001603 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001604 }
1605 }
1606
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001607 adev->cg_flags &= amdgpu_cg_mask;
1608 adev->pg_flags &= amdgpu_pg_mask;
1609
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001610 return 0;
1611}
1612
1613static int amdgpu_init(struct amdgpu_device *adev)
1614{
1615 int i, r;
1616
1617 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001618 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001619 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001620 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001621 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001622 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1623 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001624 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001625 }
Alex Deuchera1255102016-10-13 17:41:13 -04001626 adev->ip_blocks[i].status.sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001627 /* need to do gmc hw init early so we can allocate gpu mem */
Alex Deuchera1255102016-10-13 17:41:13 -04001628 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001629 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001630 if (r) {
1631 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001632 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001633 }
Alex Deuchera1255102016-10-13 17:41:13 -04001634 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001635 if (r) {
1636 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001637 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001638 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001639 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001640 if (r) {
1641 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001642 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001643 }
Alex Deuchera1255102016-10-13 17:41:13 -04001644 adev->ip_blocks[i].status.hw = true;
Monk Liu24936642017-01-09 15:54:32 +08001645
1646 /* right after GMC hw init, we create CSA */
1647 if (amdgpu_sriov_vf(adev)) {
1648 r = amdgpu_allocate_static_csa(adev);
1649 if (r) {
1650 DRM_ERROR("allocate CSA failed %d\n", r);
1651 return r;
1652 }
1653 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001654 }
1655 }
1656
1657 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001658 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001659 continue;
1660 /* gmc hw init is done early */
Alex Deuchera1255102016-10-13 17:41:13 -04001661 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001662 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001663 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001664 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001665 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1666 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001667 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001668 }
Alex Deuchera1255102016-10-13 17:41:13 -04001669 adev->ip_blocks[i].status.hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001670 }
1671
1672 return 0;
1673}
1674
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001675static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1676{
1677 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1678}
1679
1680static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1681{
1682 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1683 AMDGPU_RESET_MAGIC_NUM);
1684}
1685
Shirish S2dc80b02017-05-25 10:05:25 +05301686static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
1687{
1688 int i = 0, r;
1689
1690 for (i = 0; i < adev->num_ip_blocks; i++) {
1691 if (!adev->ip_blocks[i].status.valid)
1692 continue;
1693 /* skip CG for VCE/UVD, it's handled specially */
1694 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1695 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1696 /* enable clockgating to save power */
1697 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1698 AMD_CG_STATE_GATE);
1699 if (r) {
1700 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1701 adev->ip_blocks[i].version->funcs->name, r);
1702 return r;
1703 }
1704 }
1705 }
1706 return 0;
1707}
1708
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001709static int amdgpu_late_init(struct amdgpu_device *adev)
1710{
1711 int i = 0, r;
1712
1713 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001714 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001715 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001716 if (adev->ip_blocks[i].version->funcs->late_init) {
1717 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001718 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001719 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1720 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001721 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001722 }
Alex Deuchera1255102016-10-13 17:41:13 -04001723 adev->ip_blocks[i].status.late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001724 }
1725 }
1726
Shirish S2dc80b02017-05-25 10:05:25 +05301727 mod_delayed_work(system_wq, &adev->late_init_work,
1728 msecs_to_jiffies(AMDGPU_RESUME_MS));
1729
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001730 amdgpu_fill_reset_magic(adev);
1731
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001732 return 0;
1733}
1734
1735static int amdgpu_fini(struct amdgpu_device *adev)
1736{
1737 int i, r;
1738
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001739 /* need to disable SMC first */
1740 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001741 if (!adev->ip_blocks[i].status.hw)
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001742 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001743 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001744 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
Alex Deuchera1255102016-10-13 17:41:13 -04001745 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1746 AMD_CG_STATE_UNGATE);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001747 if (r) {
1748 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001749 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001750 return r;
1751 }
Alex Deuchera1255102016-10-13 17:41:13 -04001752 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001753 /* XXX handle errors */
1754 if (r) {
1755 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001756 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001757 }
Alex Deuchera1255102016-10-13 17:41:13 -04001758 adev->ip_blocks[i].status.hw = false;
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001759 break;
1760 }
1761 }
1762
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001763 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001764 if (!adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001765 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001766 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001767 amdgpu_wb_fini(adev);
1768 amdgpu_vram_scratch_fini(adev);
1769 }
Rex Zhu8201a672016-11-24 21:44:44 +08001770
1771 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1772 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1773 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1774 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1775 AMD_CG_STATE_UNGATE);
1776 if (r) {
1777 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1778 adev->ip_blocks[i].version->funcs->name, r);
1779 return r;
1780 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001781 }
Rex Zhu8201a672016-11-24 21:44:44 +08001782
Alex Deuchera1255102016-10-13 17:41:13 -04001783 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001784 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001785 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001786 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1787 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001788 }
Rex Zhu8201a672016-11-24 21:44:44 +08001789
Alex Deuchera1255102016-10-13 17:41:13 -04001790 adev->ip_blocks[i].status.hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001791 }
1792
1793 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001794 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001795 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001796 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001797 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001798 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001799 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1800 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001801 }
Alex Deuchera1255102016-10-13 17:41:13 -04001802 adev->ip_blocks[i].status.sw = false;
1803 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001804 }
1805
Monk Liua6dcfd92016-05-19 14:36:34 +08001806 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001807 if (!adev->ip_blocks[i].status.late_initialized)
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001808 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001809 if (adev->ip_blocks[i].version->funcs->late_fini)
1810 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1811 adev->ip_blocks[i].status.late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001812 }
1813
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001814 if (amdgpu_sriov_vf(adev)) {
Monk Liu24936642017-01-09 15:54:32 +08001815 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001816 amdgpu_virt_release_full_gpu(adev, false);
1817 }
Monk Liu24936642017-01-09 15:54:32 +08001818
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001819 return 0;
1820}
1821
Shirish S2dc80b02017-05-25 10:05:25 +05301822static void amdgpu_late_init_func_handler(struct work_struct *work)
1823{
1824 struct amdgpu_device *adev =
1825 container_of(work, struct amdgpu_device, late_init_work.work);
1826 amdgpu_late_set_cg_state(adev);
1827}
1828
Alex Deucherfaefba92016-12-06 10:38:29 -05001829int amdgpu_suspend(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001830{
1831 int i, r;
1832
Xiangliang Yue941ea92017-01-18 12:47:55 +08001833 if (amdgpu_sriov_vf(adev))
1834 amdgpu_virt_request_full_gpu(adev, false);
1835
Flora Cuic5a93a22016-02-26 10:45:25 +08001836 /* ungate SMC block first */
1837 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1838 AMD_CG_STATE_UNGATE);
1839 if (r) {
1840 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1841 }
1842
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001843 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001844 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001845 continue;
1846 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001847 if (i != AMD_IP_BLOCK_TYPE_SMC) {
Alex Deuchera1255102016-10-13 17:41:13 -04001848 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1849 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001850 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001851 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1852 adev->ip_blocks[i].version->funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001853 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001854 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001855 /* XXX handle errors */
Alex Deuchera1255102016-10-13 17:41:13 -04001856 r = adev->ip_blocks[i].version->funcs->suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001857 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001858 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001859 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1860 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001861 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001862 }
1863
Xiangliang Yue941ea92017-01-18 12:47:55 +08001864 if (amdgpu_sriov_vf(adev))
1865 amdgpu_virt_release_full_gpu(adev, false);
1866
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001867 return 0;
1868}
1869
Monk Liue4f0fdc2017-02-09 11:55:49 +08001870static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001871{
1872 int i, r;
1873
Monk Liu2cb681b2017-04-26 12:00:49 +08001874 static enum amd_ip_block_type ip_order[] = {
1875 AMD_IP_BLOCK_TYPE_GMC,
1876 AMD_IP_BLOCK_TYPE_COMMON,
Monk Liu2cb681b2017-04-26 12:00:49 +08001877 AMD_IP_BLOCK_TYPE_IH,
1878 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001879
Monk Liu2cb681b2017-04-26 12:00:49 +08001880 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1881 int j;
1882 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001883
Monk Liu2cb681b2017-04-26 12:00:49 +08001884 for (j = 0; j < adev->num_ip_blocks; j++) {
1885 block = &adev->ip_blocks[j];
1886
1887 if (block->version->type != ip_order[i] ||
1888 !block->status.valid)
1889 continue;
1890
1891 r = block->version->funcs->hw_init(adev);
1892 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001893 }
1894 }
1895
1896 return 0;
1897}
1898
Monk Liue4f0fdc2017-02-09 11:55:49 +08001899static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001900{
1901 int i, r;
1902
Monk Liu2cb681b2017-04-26 12:00:49 +08001903 static enum amd_ip_block_type ip_order[] = {
1904 AMD_IP_BLOCK_TYPE_SMC,
1905 AMD_IP_BLOCK_TYPE_DCE,
1906 AMD_IP_BLOCK_TYPE_GFX,
1907 AMD_IP_BLOCK_TYPE_SDMA,
1908 AMD_IP_BLOCK_TYPE_VCE,
1909 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001910
Monk Liu2cb681b2017-04-26 12:00:49 +08001911 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1912 int j;
1913 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001914
Monk Liu2cb681b2017-04-26 12:00:49 +08001915 for (j = 0; j < adev->num_ip_blocks; j++) {
1916 block = &adev->ip_blocks[j];
1917
1918 if (block->version->type != ip_order[i] ||
1919 !block->status.valid)
1920 continue;
1921
1922 r = block->version->funcs->hw_init(adev);
1923 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001924 }
1925 }
1926
1927 return 0;
1928}
1929
Chunming Zhoufcf06492017-05-05 10:33:33 +08001930static int amdgpu_resume_phase1(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001931{
1932 int i, r;
1933
1934 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001935 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001936 continue;
Chunming Zhoufcf06492017-05-05 10:33:33 +08001937 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1938 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1939 adev->ip_blocks[i].version->type ==
1940 AMD_IP_BLOCK_TYPE_IH) {
1941 r = adev->ip_blocks[i].version->funcs->resume(adev);
1942 if (r) {
1943 DRM_ERROR("resume of IP block <%s> failed %d\n",
1944 adev->ip_blocks[i].version->funcs->name, r);
1945 return r;
1946 }
1947 }
1948 }
1949
1950 return 0;
1951}
1952
1953static int amdgpu_resume_phase2(struct amdgpu_device *adev)
1954{
1955 int i, r;
1956
1957 for (i = 0; i < adev->num_ip_blocks; i++) {
1958 if (!adev->ip_blocks[i].status.valid)
1959 continue;
1960 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1961 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1962 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1963 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001964 r = adev->ip_blocks[i].version->funcs->resume(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001965 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001966 DRM_ERROR("resume of IP block <%s> failed %d\n",
1967 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001968 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001969 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001970 }
1971
1972 return 0;
1973}
1974
Chunming Zhoufcf06492017-05-05 10:33:33 +08001975static int amdgpu_resume(struct amdgpu_device *adev)
1976{
1977 int r;
1978
1979 r = amdgpu_resume_phase1(adev);
1980 if (r)
1981 return r;
1982 r = amdgpu_resume_phase2(adev);
1983
1984 return r;
1985}
1986
Monk Liu4e99a442016-03-31 13:26:59 +08001987static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04001988{
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001989 if (adev->is_atom_fw) {
1990 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1991 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1992 } else {
1993 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1994 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1995 }
Andres Rodriguez048765a2016-06-11 02:51:32 -04001996}
1997
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001998/**
1999 * amdgpu_device_init - initialize the driver
2000 *
2001 * @adev: amdgpu_device pointer
2002 * @pdev: drm dev pointer
2003 * @pdev: pci dev pointer
2004 * @flags: driver flags
2005 *
2006 * Initializes the driver info and hw (all asics).
2007 * Returns 0 for success or an error on failure.
2008 * Called at driver startup.
2009 */
2010int amdgpu_device_init(struct amdgpu_device *adev,
2011 struct drm_device *ddev,
2012 struct pci_dev *pdev,
2013 uint32_t flags)
2014{
2015 int r, i;
2016 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02002017 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002018
2019 adev->shutdown = false;
2020 adev->dev = &pdev->dev;
2021 adev->ddev = ddev;
2022 adev->pdev = pdev;
2023 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08002024 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002025 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2026 adev->mc.gtt_size = 512 * 1024 * 1024;
2027 adev->accel_working = false;
2028 adev->num_rings = 0;
2029 adev->mman.buffer_funcs = NULL;
2030 adev->mman.buffer_funcs_ring = NULL;
2031 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01002032 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002033 adev->gart.gart_funcs = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002034 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002035
2036 adev->smc_rreg = &amdgpu_invalid_rreg;
2037 adev->smc_wreg = &amdgpu_invalid_wreg;
2038 adev->pcie_rreg = &amdgpu_invalid_rreg;
2039 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08002040 adev->pciep_rreg = &amdgpu_invalid_rreg;
2041 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002042 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2043 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2044 adev->didt_rreg = &amdgpu_invalid_rreg;
2045 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08002046 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2047 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002048 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2049 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2050
Rex Zhuccdbb202016-06-08 12:47:41 +08002051
Alex Deucher3e39ab92015-06-05 15:04:33 -04002052 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2053 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2054 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002055
2056 /* mutex initialization are all done here so we
2057 * can recall function without having locking issues */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002058 atomic_set(&adev->irq.ih.lock, 0);
Huang Rui0e5ca0d2017-03-03 18:37:23 -05002059 mutex_init(&adev->firmware.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002060 mutex_init(&adev->pm.mutex);
2061 mutex_init(&adev->gfx.gpu_clock_mutex);
2062 mutex_init(&adev->srbm_mutex);
2063 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002064 mutex_init(&adev->mn_lock);
2065 hash_init(adev->mn_hash);
2066
2067 amdgpu_check_arguments(adev);
2068
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002069 spin_lock_init(&adev->mmio_idx_lock);
2070 spin_lock_init(&adev->smc_idx_lock);
2071 spin_lock_init(&adev->pcie_idx_lock);
2072 spin_lock_init(&adev->uvd_ctx_idx_lock);
2073 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08002074 spin_lock_init(&adev->gc_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002075 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02002076 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002077
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08002078 INIT_LIST_HEAD(&adev->shadow_list);
2079 mutex_init(&adev->shadow_list_lock);
2080
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002081 INIT_LIST_HEAD(&adev->gtt_list);
2082 spin_lock_init(&adev->gtt_list_lock);
2083
Andres Rodriguez795f2812017-03-06 16:27:55 -05002084 INIT_LIST_HEAD(&adev->ring_lru_list);
2085 spin_lock_init(&adev->ring_lru_list_lock);
2086
Shirish S2dc80b02017-05-25 10:05:25 +05302087 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2088
Alex Xie0fa49552017-06-08 14:58:05 -04002089 /* Registers mapping */
2090 /* TODO: block userspace mapping of io register */
Ken Wangda69c1612016-01-21 19:08:55 +08002091 if (adev->asic_type >= CHIP_BONAIRE) {
2092 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2093 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2094 } else {
2095 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2096 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2097 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002098
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002099 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2100 if (adev->rmmio == NULL) {
2101 return -ENOMEM;
2102 }
2103 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2104 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2105
Ken Wangda69c1612016-01-21 19:08:55 +08002106 if (adev->asic_type >= CHIP_BONAIRE)
2107 /* doorbell bar mapping */
2108 amdgpu_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002109
2110 /* io port mapping */
2111 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2112 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2113 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2114 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2115 break;
2116 }
2117 }
2118 if (adev->rio_mem == NULL)
Amber Linb64a18c2017-01-04 08:06:58 -05002119 DRM_INFO("PCI I/O BAR is not found.\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002120
2121 /* early init functions */
2122 r = amdgpu_early_init(adev);
2123 if (r)
2124 return r;
2125
2126 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2127 /* this will fail for cards that aren't VGA class devices, just
2128 * ignore it */
2129 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2130
2131 if (amdgpu_runtime_pm == 1)
2132 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04002133 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002134 runtime = true;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002135 if (!pci_is_thunderbolt_attached(adev->pdev))
2136 vga_switcheroo_register_client(adev->pdev,
2137 &amdgpu_switcheroo_ops, runtime);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002138 if (runtime)
2139 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2140
2141 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04002142 if (!amdgpu_get_bios(adev)) {
2143 r = -EINVAL;
2144 goto failed;
2145 }
Nils Wallméniusf7e9e9f2016-12-14 21:52:45 +01002146
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002147 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002148 if (r) {
2149 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002150 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002151 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002152
Monk Liu4e99a442016-03-31 13:26:59 +08002153 /* detect if we are with an SRIOV vbios */
2154 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04002155
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002156 /* Post card if necessary */
Monk Liubec86372016-09-14 19:38:08 +08002157 if (amdgpu_vpost_needed(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002158 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08002159 dev_err(adev->dev, "no vBIOS found\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002160 r = -EINVAL;
2161 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002162 }
Monk Liubec86372016-09-14 19:38:08 +08002163 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08002164 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2165 if (r) {
2166 dev_err(adev->dev, "gpu post error!\n");
2167 goto failed;
2168 }
2169 } else {
2170 DRM_INFO("GPU post is not needed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002171 }
2172
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002173 if (!adev->is_atom_fw) {
2174 /* Initialize clocks */
2175 r = amdgpu_atombios_get_clock_info(adev);
2176 if (r) {
2177 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2178 return r;
2179 }
2180 /* init i2c buses */
2181 amdgpu_atombios_i2c_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002182 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002183
2184 /* Fence driver */
2185 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002186 if (r) {
2187 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002188 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002189 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002190
2191 /* init the mode config */
2192 drm_mode_config_init(adev->ddev);
2193
2194 r = amdgpu_init(adev);
2195 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05002196 dev_err(adev->dev, "amdgpu_init failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002197 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002198 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002199 }
2200
2201 adev->accel_working = true;
2202
Alex Xiee59c0202017-06-01 09:42:59 -04002203 amdgpu_vm_check_compute_bug(adev);
2204
Marek Olšák95844d22016-08-17 23:49:27 +02002205 /* Initialize the buffer migration limit. */
2206 if (amdgpu_moverate >= 0)
2207 max_MBps = amdgpu_moverate;
2208 else
2209 max_MBps = 8; /* Allow 8 MB/s. */
2210 /* Get a log2 for easy divisions. */
2211 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2212
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002213 r = amdgpu_ib_pool_init(adev);
2214 if (r) {
2215 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002216 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002217 }
2218
2219 r = amdgpu_ib_ring_tests(adev);
2220 if (r)
2221 DRM_ERROR("ib ring test failed (%d).\n", r);
2222
Monk Liu9bc92b92017-02-08 17:38:13 +08002223 amdgpu_fbdev_init(adev);
2224
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002225 r = amdgpu_gem_debugfs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002226 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002227 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002228
2229 r = amdgpu_debugfs_regs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002230 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002231 DRM_ERROR("registering register debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002232
Huang Rui4f0955f2017-05-10 23:04:06 +08002233 r = amdgpu_debugfs_test_ib_ring_init(adev);
2234 if (r)
2235 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2236
Huang Rui50ab2532016-06-12 15:51:09 +08002237 r = amdgpu_debugfs_firmware_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002238 if (r)
Huang Rui50ab2532016-06-12 15:51:09 +08002239 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
Huang Rui50ab2532016-06-12 15:51:09 +08002240
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002241 if ((amdgpu_testing & 1)) {
2242 if (adev->accel_working)
2243 amdgpu_test_moves(adev);
2244 else
2245 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2246 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002247 if (amdgpu_benchmarking) {
2248 if (adev->accel_working)
2249 amdgpu_benchmark(adev, amdgpu_benchmarking);
2250 else
2251 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2252 }
2253
2254 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2255 * explicit gating rather than handling it automatically.
2256 */
2257 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002258 if (r) {
2259 dev_err(adev->dev, "amdgpu_late_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002260 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002261 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002262
2263 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04002264
2265failed:
2266 if (runtime)
2267 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2268 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002269}
2270
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002271/**
2272 * amdgpu_device_fini - tear down the driver
2273 *
2274 * @adev: amdgpu_device pointer
2275 *
2276 * Tear down the driver info (all asics).
2277 * Called at driver shutdown.
2278 */
2279void amdgpu_device_fini(struct amdgpu_device *adev)
2280{
2281 int r;
2282
2283 DRM_INFO("amdgpu: finishing device.\n");
2284 adev->shutdown = true;
Pixel Dingdb2c2a92017-04-25 16:47:42 +08002285 if (adev->mode_info.mode_config_initialized)
2286 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002287 /* evict vram memory */
2288 amdgpu_bo_evict_vram(adev);
2289 amdgpu_ib_pool_fini(adev);
2290 amdgpu_fence_driver_fini(adev);
2291 amdgpu_fbdev_fini(adev);
2292 r = amdgpu_fini(adev);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08002293 if (adev->firmware.gpu_info_fw) {
2294 release_firmware(adev->firmware.gpu_info_fw);
2295 adev->firmware.gpu_info_fw = NULL;
2296 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002297 adev->accel_working = false;
Shirish S2dc80b02017-05-25 10:05:25 +05302298 cancel_delayed_work_sync(&adev->late_init_work);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002299 /* free i2c buses */
2300 amdgpu_i2c_fini(adev);
2301 amdgpu_atombios_fini(adev);
2302 kfree(adev->bios);
2303 adev->bios = NULL;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002304 if (!pci_is_thunderbolt_attached(adev->pdev))
2305 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002306 if (adev->flags & AMD_IS_PX)
2307 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002308 vga_client_register(adev->pdev, NULL, NULL, NULL);
2309 if (adev->rio_mem)
2310 pci_iounmap(adev->pdev, adev->rio_mem);
2311 adev->rio_mem = NULL;
2312 iounmap(adev->rmmio);
2313 adev->rmmio = NULL;
Ken Wangda69c1612016-01-21 19:08:55 +08002314 if (adev->asic_type >= CHIP_BONAIRE)
2315 amdgpu_doorbell_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002316 amdgpu_debugfs_regs_cleanup(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002317}
2318
2319
2320/*
2321 * Suspend & resume.
2322 */
2323/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002324 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002325 *
2326 * @pdev: drm dev pointer
2327 * @state: suspend state
2328 *
2329 * Puts the hw in the suspend state (all asics).
2330 * Returns 0 for success or an error on failure.
2331 * Called at driver suspend.
2332 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002333int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002334{
2335 struct amdgpu_device *adev;
2336 struct drm_crtc *crtc;
2337 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002338 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002339
2340 if (dev == NULL || dev->dev_private == NULL) {
2341 return -ENODEV;
2342 }
2343
2344 adev = dev->dev_private;
2345
2346 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2347 return 0;
2348
2349 drm_kms_helper_poll_disable(dev);
2350
2351 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002352 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002353 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2354 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2355 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002356 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002357
Alex Deucher756e6882015-10-08 00:03:36 -04002358 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002359 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04002360 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002361 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2362 struct amdgpu_bo *robj;
2363
Alex Deucher756e6882015-10-08 00:03:36 -04002364 if (amdgpu_crtc->cursor_bo) {
2365 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002366 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002367 if (r == 0) {
2368 amdgpu_bo_unpin(aobj);
2369 amdgpu_bo_unreserve(aobj);
2370 }
2371 }
2372
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002373 if (rfb == NULL || rfb->obj == NULL) {
2374 continue;
2375 }
2376 robj = gem_to_amdgpu_bo(rfb->obj);
2377 /* don't unpin kernel fb objects */
2378 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
Alex Xie7a6901d2017-04-24 13:52:41 -04002379 r = amdgpu_bo_reserve(robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002380 if (r == 0) {
2381 amdgpu_bo_unpin(robj);
2382 amdgpu_bo_unreserve(robj);
2383 }
2384 }
2385 }
2386 /* evict vram memory */
2387 amdgpu_bo_evict_vram(adev);
2388
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002389 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002390
2391 r = amdgpu_suspend(adev);
2392
Alex Deuchera0a71e42016-10-10 12:41:36 -04002393 /* evict remaining vram memory
2394 * This second call to evict vram is to evict the gart page table
2395 * using the CPU.
2396 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002397 amdgpu_bo_evict_vram(adev);
2398
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002399 if (adev->is_atom_fw)
2400 amdgpu_atomfirmware_scratch_regs_save(adev);
2401 else
2402 amdgpu_atombios_scratch_regs_save(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002403 pci_save_state(dev->pdev);
2404 if (suspend) {
2405 /* Shut down the device */
2406 pci_disable_device(dev->pdev);
2407 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08002408 } else {
2409 r = amdgpu_asic_reset(adev);
2410 if (r)
2411 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002412 }
2413
2414 if (fbcon) {
2415 console_lock();
2416 amdgpu_fbdev_set_suspend(adev, 1);
2417 console_unlock();
2418 }
2419 return 0;
2420}
2421
2422/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002423 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002424 *
2425 * @pdev: drm dev pointer
2426 *
2427 * Bring the hw back to operating state (all asics).
2428 * Returns 0 for success or an error on failure.
2429 * Called at driver resume.
2430 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002431int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002432{
2433 struct drm_connector *connector;
2434 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04002435 struct drm_crtc *crtc;
Huang Rui03161a62017-04-13 16:12:26 +08002436 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002437
2438 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2439 return 0;
2440
jimqu74b0b152016-09-07 17:09:12 +08002441 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002442 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08002443
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002444 if (resume) {
2445 pci_set_power_state(dev->pdev, PCI_D0);
2446 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08002447 r = pci_enable_device(dev->pdev);
Huang Rui03161a62017-04-13 16:12:26 +08002448 if (r)
2449 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002450 }
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002451 if (adev->is_atom_fw)
2452 amdgpu_atomfirmware_scratch_regs_restore(adev);
2453 else
2454 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002455
2456 /* post card */
Jim Quc836fec2017-02-10 15:59:59 +08002457 if (amdgpu_need_post(adev)) {
jimqu74b0b152016-09-07 17:09:12 +08002458 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2459 if (r)
2460 DRM_ERROR("amdgpu asic init failed\n");
2461 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002462
2463 r = amdgpu_resume(adev);
Rex Zhue6707212017-03-30 13:21:01 +08002464 if (r) {
Flora Cuica198522016-02-04 15:10:08 +08002465 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Huang Rui03161a62017-04-13 16:12:26 +08002466 goto unlock;
Rex Zhue6707212017-03-30 13:21:01 +08002467 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002468 amdgpu_fence_driver_resume(adev);
2469
Flora Cuica198522016-02-04 15:10:08 +08002470 if (resume) {
2471 r = amdgpu_ib_ring_tests(adev);
2472 if (r)
2473 DRM_ERROR("ib ring test failed (%d).\n", r);
2474 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002475
2476 r = amdgpu_late_init(adev);
Huang Rui03161a62017-04-13 16:12:26 +08002477 if (r)
2478 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002479
Alex Deucher756e6882015-10-08 00:03:36 -04002480 /* pin cursors */
2481 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2482 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2483
2484 if (amdgpu_crtc->cursor_bo) {
2485 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002486 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002487 if (r == 0) {
2488 r = amdgpu_bo_pin(aobj,
2489 AMDGPU_GEM_DOMAIN_VRAM,
2490 &amdgpu_crtc->cursor_addr);
2491 if (r != 0)
2492 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2493 amdgpu_bo_unreserve(aobj);
2494 }
2495 }
2496 }
2497
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002498 /* blat the mode back in */
2499 if (fbcon) {
2500 drm_helper_resume_force_mode(dev);
2501 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002502 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002503 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2504 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2505 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002506 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002507 }
2508
2509 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002510
2511 /*
2512 * Most of the connector probing functions try to acquire runtime pm
2513 * refs to ensure that the GPU is powered on when connector polling is
2514 * performed. Since we're calling this from a runtime PM callback,
2515 * trying to acquire rpm refs will cause us to deadlock.
2516 *
2517 * Since we're guaranteed to be holding the rpm lock, it's safe to
2518 * temporarily disable the rpm helpers so this doesn't deadlock us.
2519 */
2520#ifdef CONFIG_PM
2521 dev->dev->power.disable_depth++;
2522#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05002523 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002524#ifdef CONFIG_PM
2525 dev->dev->power.disable_depth--;
2526#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002527
Huang Rui03161a62017-04-13 16:12:26 +08002528 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002529 amdgpu_fbdev_set_suspend(adev, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002530
Huang Rui03161a62017-04-13 16:12:26 +08002531unlock:
2532 if (fbcon)
2533 console_unlock();
2534
2535 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002536}
2537
Chunming Zhou63fbf422016-07-15 11:19:20 +08002538static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2539{
2540 int i;
2541 bool asic_hang = false;
2542
2543 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002544 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002545 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002546 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2547 adev->ip_blocks[i].status.hang =
2548 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2549 if (adev->ip_blocks[i].status.hang) {
2550 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
Chunming Zhou63fbf422016-07-15 11:19:20 +08002551 asic_hang = true;
2552 }
2553 }
2554 return asic_hang;
2555}
2556
Baoyou Xie4d446652016-09-18 22:09:35 +08002557static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002558{
2559 int i, r = 0;
2560
2561 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002562 if (!adev->ip_blocks[i].status.valid)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002563 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002564 if (adev->ip_blocks[i].status.hang &&
2565 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2566 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
Chunming Zhoud31a5012016-07-18 10:04:34 +08002567 if (r)
2568 return r;
2569 }
2570 }
2571
2572 return 0;
2573}
2574
Chunming Zhou35d782f2016-07-15 15:57:13 +08002575static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2576{
Alex Deucherda146d32016-10-13 16:07:03 -04002577 int i;
2578
2579 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002580 if (!adev->ip_blocks[i].status.valid)
Alex Deucherda146d32016-10-13 16:07:03 -04002581 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002582 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2583 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2584 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2585 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2586 if (adev->ip_blocks[i].status.hang) {
Alex Deucherda146d32016-10-13 16:07:03 -04002587 DRM_INFO("Some block need full reset!\n");
2588 return true;
2589 }
2590 }
Chunming Zhou35d782f2016-07-15 15:57:13 +08002591 }
2592 return false;
2593}
2594
2595static int amdgpu_soft_reset(struct amdgpu_device *adev)
2596{
2597 int i, r = 0;
2598
2599 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002600 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002601 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002602 if (adev->ip_blocks[i].status.hang &&
2603 adev->ip_blocks[i].version->funcs->soft_reset) {
2604 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002605 if (r)
2606 return r;
2607 }
2608 }
2609
2610 return 0;
2611}
2612
2613static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2614{
2615 int i, r = 0;
2616
2617 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002618 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002619 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002620 if (adev->ip_blocks[i].status.hang &&
2621 adev->ip_blocks[i].version->funcs->post_soft_reset)
2622 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002623 if (r)
2624 return r;
2625 }
2626
2627 return 0;
2628}
2629
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002630bool amdgpu_need_backup(struct amdgpu_device *adev)
2631{
2632 if (adev->flags & AMD_IS_APU)
2633 return false;
2634
2635 return amdgpu_lockup_timeout > 0 ? true : false;
2636}
2637
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002638static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2639 struct amdgpu_ring *ring,
2640 struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +01002641 struct dma_fence **fence)
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002642{
2643 uint32_t domain;
2644 int r;
2645
Roger.He23d2e502017-04-21 14:24:26 +08002646 if (!bo->shadow)
2647 return 0;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002648
Alex Xie1d284792017-04-24 13:53:04 -04002649 r = amdgpu_bo_reserve(bo, true);
Roger.He23d2e502017-04-21 14:24:26 +08002650 if (r)
2651 return r;
2652 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2653 /* if bo has been evicted, then no need to recover */
2654 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
Roger.He82521312017-04-21 13:08:43 +08002655 r = amdgpu_bo_validate(bo->shadow);
2656 if (r) {
2657 DRM_ERROR("bo validate failed!\n");
2658 goto err;
2659 }
2660
2661 r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
2662 if (r) {
2663 DRM_ERROR("%p bind failed\n", bo->shadow);
2664 goto err;
2665 }
2666
Roger.He23d2e502017-04-21 14:24:26 +08002667 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002668 NULL, fence, true);
Roger.He23d2e502017-04-21 14:24:26 +08002669 if (r) {
2670 DRM_ERROR("recover page table failed!\n");
2671 goto err;
2672 }
2673 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002674err:
Roger.He23d2e502017-04-21 14:24:26 +08002675 amdgpu_bo_unreserve(bo);
2676 return r;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002677}
2678
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002679/**
Monk Liua90ad3c2017-01-23 14:22:08 +08002680 * amdgpu_sriov_gpu_reset - reset the asic
2681 *
2682 * @adev: amdgpu device pointer
Monk Liu7225f872017-04-26 14:51:54 +08002683 * @job: which job trigger hang
Monk Liua90ad3c2017-01-23 14:22:08 +08002684 *
2685 * Attempt the reset the GPU if it has hung (all asics).
2686 * for SRIOV case.
2687 * Returns 0 for success or an error on failure.
2688 */
Monk Liu7225f872017-04-26 14:51:54 +08002689int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002690{
Monk Liu65781c72017-05-11 13:36:44 +08002691 int i, j, r = 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002692 int resched;
2693 struct amdgpu_bo *bo, *tmp;
2694 struct amdgpu_ring *ring;
2695 struct dma_fence *fence = NULL, *next = NULL;
2696
Monk Liu147b5982017-01-25 15:48:01 +08002697 mutex_lock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002698 atomic_inc(&adev->gpu_reset_counter);
Monk Liu1fb37a32017-01-26 15:36:37 +08002699 adev->gfx.in_reset = true;
Monk Liua90ad3c2017-01-23 14:22:08 +08002700
2701 /* block TTM */
2702 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2703
Monk Liu65781c72017-05-11 13:36:44 +08002704 /* we start from the ring trigger GPU hang */
2705 j = job ? job->ring->idx : 0;
Monk Liua90ad3c2017-01-23 14:22:08 +08002706
Monk Liu65781c72017-05-11 13:36:44 +08002707 /* block scheduler */
2708 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2709 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002710 if (!ring || !ring->sched.thread)
2711 continue;
2712
2713 kthread_park(ring->sched.thread);
Monk Liua90ad3c2017-01-23 14:22:08 +08002714
Monk Liu65781c72017-05-11 13:36:44 +08002715 if (job && j != i)
2716 continue;
2717
Monk Liu4f059ec2017-05-11 13:59:15 +08002718 /* here give the last chance to check if job removed from mirror-list
Monk Liu65781c72017-05-11 13:36:44 +08002719 * since we already pay some time on kthread_park */
Monk Liu4f059ec2017-05-11 13:59:15 +08002720 if (job && list_empty(&job->base.node)) {
Monk Liu65781c72017-05-11 13:36:44 +08002721 kthread_unpark(ring->sched.thread);
2722 goto give_up_reset;
2723 }
2724
2725 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2726 amd_sched_job_kickout(&job->base);
2727
2728 /* only do job_reset on the hang ring if @job not NULL */
2729 amd_sched_hw_job_reset(&ring->sched);
2730
2731 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2732 amdgpu_fence_driver_force_completion_ring(ring);
2733 }
Monk Liua90ad3c2017-01-23 14:22:08 +08002734
2735 /* request to take full control of GPU before re-initialization */
Monk Liu7225f872017-04-26 14:51:54 +08002736 if (job)
Monk Liua90ad3c2017-01-23 14:22:08 +08002737 amdgpu_virt_reset_gpu(adev);
2738 else
2739 amdgpu_virt_request_full_gpu(adev, true);
2740
2741
2742 /* Resume IP prior to SMC */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002743 amdgpu_sriov_reinit_early(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002744
2745 /* we need recover gart prior to run SMC/CP/SDMA resume */
2746 amdgpu_ttm_recover_gart(adev);
2747
2748 /* now we are okay to resume SMC/CP/SDMA */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002749 amdgpu_sriov_reinit_late(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002750
2751 amdgpu_irq_gpu_reset_resume_helper(adev);
2752
2753 if (amdgpu_ib_ring_tests(adev))
2754 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2755
2756 /* release full control of GPU after ib test */
2757 amdgpu_virt_release_full_gpu(adev, true);
2758
2759 DRM_INFO("recover vram bo from shadow\n");
2760
2761 ring = adev->mman.buffer_funcs_ring;
2762 mutex_lock(&adev->shadow_list_lock);
2763 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002764 next = NULL;
Monk Liua90ad3c2017-01-23 14:22:08 +08002765 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2766 if (fence) {
2767 r = dma_fence_wait(fence, false);
2768 if (r) {
2769 WARN(r, "recovery from shadow isn't completed\n");
2770 break;
2771 }
2772 }
2773
2774 dma_fence_put(fence);
2775 fence = next;
2776 }
2777 mutex_unlock(&adev->shadow_list_lock);
2778
2779 if (fence) {
2780 r = dma_fence_wait(fence, false);
2781 if (r)
2782 WARN(r, "recovery from shadow isn't completed\n");
2783 }
2784 dma_fence_put(fence);
2785
Monk Liu65781c72017-05-11 13:36:44 +08002786 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2787 ring = adev->rings[i % AMDGPU_MAX_RINGS];
Monk Liua90ad3c2017-01-23 14:22:08 +08002788 if (!ring || !ring->sched.thread)
2789 continue;
2790
Monk Liu65781c72017-05-11 13:36:44 +08002791 if (job && j != i) {
2792 kthread_unpark(ring->sched.thread);
2793 continue;
2794 }
2795
Monk Liua90ad3c2017-01-23 14:22:08 +08002796 amd_sched_job_recovery(&ring->sched);
2797 kthread_unpark(ring->sched.thread);
2798 }
2799
2800 drm_helper_resume_force_mode(adev->ddev);
Monk Liu65781c72017-05-11 13:36:44 +08002801give_up_reset:
Monk Liua90ad3c2017-01-23 14:22:08 +08002802 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2803 if (r) {
2804 /* bad news, how to tell it to userspace ? */
2805 dev_info(adev->dev, "GPU reset failed\n");
Monk Liu65781c72017-05-11 13:36:44 +08002806 } else {
2807 dev_info(adev->dev, "GPU reset successed!\n");
Monk Liua90ad3c2017-01-23 14:22:08 +08002808 }
2809
Monk Liu1fb37a32017-01-26 15:36:37 +08002810 adev->gfx.in_reset = false;
Monk Liu147b5982017-01-25 15:48:01 +08002811 mutex_unlock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002812 return r;
2813}
2814
2815/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002816 * amdgpu_gpu_reset - reset the asic
2817 *
2818 * @adev: amdgpu device pointer
2819 *
2820 * Attempt the reset the GPU if it has hung (all asics).
2821 * Returns 0 for success or an error on failure.
2822 */
2823int amdgpu_gpu_reset(struct amdgpu_device *adev)
2824{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002825 int i, r;
2826 int resched;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002827 bool need_full_reset, vram_lost = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002828
Chunming Zhou63fbf422016-07-15 11:19:20 +08002829 if (!amdgpu_check_soft_reset(adev)) {
2830 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2831 return 0;
2832 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002833
Marek Olšákd94aed52015-05-05 21:13:49 +02002834 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002835
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002836 /* block TTM */
2837 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2838
Chunming Zhou0875dc92016-06-12 15:41:58 +08002839 /* block scheduler */
2840 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2841 struct amdgpu_ring *ring = adev->rings[i];
2842
Chunming Zhou51687752017-04-24 17:09:15 +08002843 if (!ring || !ring->sched.thread)
Chunming Zhou0875dc92016-06-12 15:41:58 +08002844 continue;
2845 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002846 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002847 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002848 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2849 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002850
Chunming Zhou35d782f2016-07-15 15:57:13 +08002851 need_full_reset = amdgpu_need_full_reset(adev);
2852
2853 if (!need_full_reset) {
2854 amdgpu_pre_soft_reset(adev);
2855 r = amdgpu_soft_reset(adev);
2856 amdgpu_post_soft_reset(adev);
2857 if (r || amdgpu_check_soft_reset(adev)) {
2858 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2859 need_full_reset = true;
2860 }
2861 }
2862
2863 if (need_full_reset) {
Chunming Zhou35d782f2016-07-15 15:57:13 +08002864 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002865
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002866retry:
Chunming Zhou35d782f2016-07-15 15:57:13 +08002867 /* Disable fb access */
2868 if (adev->mode_info.num_crtc) {
2869 struct amdgpu_mode_mc_save save;
2870 amdgpu_display_stop_mc_access(adev, &save);
2871 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2872 }
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002873 if (adev->is_atom_fw)
2874 amdgpu_atomfirmware_scratch_regs_save(adev);
2875 else
2876 amdgpu_atombios_scratch_regs_save(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002877 r = amdgpu_asic_reset(adev);
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002878 if (adev->is_atom_fw)
2879 amdgpu_atomfirmware_scratch_regs_restore(adev);
2880 else
2881 amdgpu_atombios_scratch_regs_restore(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002882 /* post card */
2883 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002884
Chunming Zhou35d782f2016-07-15 15:57:13 +08002885 if (!r) {
2886 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
Chunming Zhoufcf06492017-05-05 10:33:33 +08002887 r = amdgpu_resume_phase1(adev);
2888 if (r)
2889 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002890 vram_lost = amdgpu_check_vram_lost(adev);
Chunming Zhouf1892132017-05-15 16:48:27 +08002891 if (vram_lost) {
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002892 DRM_ERROR("VRAM is lost!\n");
Chunming Zhouf1892132017-05-15 16:48:27 +08002893 atomic_inc(&adev->vram_lost_counter);
2894 }
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002895 r = amdgpu_ttm_recover_gart(adev);
2896 if (r)
Chunming Zhoufcf06492017-05-05 10:33:33 +08002897 goto out;
2898 r = amdgpu_resume_phase2(adev);
2899 if (r)
2900 goto out;
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08002901 if (vram_lost)
2902 amdgpu_fill_reset_magic(adev);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002903 }
Chunming Zhoufcf06492017-05-05 10:33:33 +08002904 }
2905out:
2906 if (!r) {
2907 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou1f465082016-06-30 15:02:26 +08002908 r = amdgpu_ib_ring_tests(adev);
2909 if (r) {
2910 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002911 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002912 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002913 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002914 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002915 /**
2916 * recovery vm page tables, since we cannot depend on VRAM is
2917 * consistent after gpu full reset.
2918 */
2919 if (need_full_reset && amdgpu_need_backup(adev)) {
2920 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2921 struct amdgpu_bo *bo, *tmp;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002922 struct dma_fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002923
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002924 DRM_INFO("recover vram bo from shadow\n");
2925 mutex_lock(&adev->shadow_list_lock);
2926 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002927 next = NULL;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002928 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2929 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002930 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002931 if (r) {
Monk Liu1d7b17b2017-01-22 18:52:56 +08002932 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002933 break;
2934 }
2935 }
2936
Chris Wilsonf54d1862016-10-25 13:00:45 +01002937 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002938 fence = next;
2939 }
2940 mutex_unlock(&adev->shadow_list_lock);
2941 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002942 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002943 if (r)
Monk Liu1d7b17b2017-01-22 18:52:56 +08002944 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002945 }
Chris Wilsonf54d1862016-10-25 13:00:45 +01002946 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002947 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002948 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2949 struct amdgpu_ring *ring = adev->rings[i];
Chunming Zhou51687752017-04-24 17:09:15 +08002950
2951 if (!ring || !ring->sched.thread)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002952 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002953
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002954 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002955 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002956 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002957 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08002958 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002959 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou51687752017-04-24 17:09:15 +08002960 if (adev->rings[i] && adev->rings[i]->sched.thread) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08002961 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002962 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002963 }
2964 }
2965
2966 drm_helper_resume_force_mode(adev->ddev);
2967
2968 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
Chunming Zhou6643be62017-05-05 10:50:09 +08002969 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002970 /* bad news, how to tell it to userspace ? */
2971 dev_info(adev->dev, "GPU reset failed\n");
Chunming Zhou6643be62017-05-05 10:50:09 +08002972 else
2973 dev_info(adev->dev, "GPU reset successed!\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002974
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002975 return r;
2976}
2977
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002978void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2979{
2980 u32 mask;
2981 int ret;
2982
Alex Deuchercd474ba2016-02-04 10:21:23 -05002983 if (amdgpu_pcie_gen_cap)
2984 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2985
2986 if (amdgpu_pcie_lane_cap)
2987 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2988
2989 /* covers APUs as well */
2990 if (pci_is_root_bus(adev->pdev->bus)) {
2991 if (adev->pm.pcie_gen_mask == 0)
2992 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2993 if (adev->pm.pcie_mlw_mask == 0)
2994 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002995 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002996 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05002997
2998 if (adev->pm.pcie_gen_mask == 0) {
2999 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3000 if (!ret) {
3001 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3002 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3003 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3004
3005 if (mask & DRM_PCIE_SPEED_25)
3006 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3007 if (mask & DRM_PCIE_SPEED_50)
3008 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3009 if (mask & DRM_PCIE_SPEED_80)
3010 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3011 } else {
3012 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3013 }
3014 }
3015 if (adev->pm.pcie_mlw_mask == 0) {
3016 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3017 if (!ret) {
3018 switch (mask) {
3019 case 32:
3020 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3021 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3022 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3023 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3024 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3025 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3026 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3027 break;
3028 case 16:
3029 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3030 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3031 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3032 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3033 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3034 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3035 break;
3036 case 12:
3037 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3038 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3039 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3040 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3041 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3042 break;
3043 case 8:
3044 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3045 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3046 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3047 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3048 break;
3049 case 4:
3050 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3051 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3052 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3053 break;
3054 case 2:
3055 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3056 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3057 break;
3058 case 1:
3059 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3060 break;
3061 default:
3062 break;
3063 }
3064 } else {
3065 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003066 }
3067 }
3068}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003069
3070/*
3071 * Debugfs
3072 */
3073int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04003074 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003075 unsigned nfiles)
3076{
3077 unsigned i;
3078
3079 for (i = 0; i < adev->debugfs_count; i++) {
3080 if (adev->debugfs[i].files == files) {
3081 /* Already registered */
3082 return 0;
3083 }
3084 }
3085
3086 i = adev->debugfs_count + 1;
3087 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3088 DRM_ERROR("Reached maximum number of debugfs components.\n");
3089 DRM_ERROR("Report so we increase "
3090 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3091 return -EINVAL;
3092 }
3093 adev->debugfs[adev->debugfs_count].files = files;
3094 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3095 adev->debugfs_count = i;
3096#if defined(CONFIG_DEBUG_FS)
3097 drm_debugfs_create_files(files, nfiles,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003098 adev->ddev->primary->debugfs_root,
3099 adev->ddev->primary);
3100#endif
3101 return 0;
3102}
3103
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003104#if defined(CONFIG_DEBUG_FS)
3105
3106static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3107 size_t size, loff_t *pos)
3108{
Al Viro45063092016-12-04 18:24:56 -05003109 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003110 ssize_t result = 0;
3111 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04003112 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04003113 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003114
3115 if (size & 0x3 || *pos & 0x3)
3116 return -EINVAL;
3117
Tom St Denisbd122672016-07-28 09:39:22 -04003118 /* are we reading registers for which a PG lock is necessary? */
3119 pm_pg_lock = (*pos >> 23) & 1;
3120
Tom St Denis566281592016-06-27 11:55:07 -04003121 if (*pos & (1ULL << 62)) {
3122 se_bank = (*pos >> 24) & 0x3FF;
3123 sh_bank = (*pos >> 34) & 0x3FF;
3124 instance_bank = (*pos >> 44) & 0x3FF;
Tom St Denis32977f92016-10-09 07:41:26 -04003125
3126 if (se_bank == 0x3FF)
3127 se_bank = 0xFFFFFFFF;
3128 if (sh_bank == 0x3FF)
3129 sh_bank = 0xFFFFFFFF;
3130 if (instance_bank == 0x3FF)
3131 instance_bank = 0xFFFFFFFF;
Tom St Denis566281592016-06-27 11:55:07 -04003132 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04003133 } else {
3134 use_bank = 0;
3135 }
3136
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003137 *pos &= (1UL << 22) - 1;
Tom St Denisbd122672016-07-28 09:39:22 -04003138
Tom St Denis566281592016-06-27 11:55:07 -04003139 if (use_bank) {
Tom St Denis32977f92016-10-09 07:41:26 -04003140 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3141 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
Tom St Denis566281592016-06-27 11:55:07 -04003142 return -EINVAL;
3143 mutex_lock(&adev->grbm_idx_mutex);
3144 amdgpu_gfx_select_se_sh(adev, se_bank,
3145 sh_bank, instance_bank);
3146 }
3147
Tom St Denisbd122672016-07-28 09:39:22 -04003148 if (pm_pg_lock)
3149 mutex_lock(&adev->pm.mutex);
3150
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003151 while (size) {
3152 uint32_t value;
3153
3154 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04003155 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003156
3157 value = RREG32(*pos >> 2);
3158 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04003159 if (r) {
3160 result = r;
3161 goto end;
3162 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003163
3164 result += 4;
3165 buf += 4;
3166 *pos += 4;
3167 size -= 4;
3168 }
3169
Tom St Denis566281592016-06-27 11:55:07 -04003170end:
3171 if (use_bank) {
3172 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3173 mutex_unlock(&adev->grbm_idx_mutex);
3174 }
3175
Tom St Denisbd122672016-07-28 09:39:22 -04003176 if (pm_pg_lock)
3177 mutex_unlock(&adev->pm.mutex);
3178
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003179 return result;
3180}
3181
3182static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3183 size_t size, loff_t *pos)
3184{
Al Viro45063092016-12-04 18:24:56 -05003185 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003186 ssize_t result = 0;
3187 int r;
Tom St Denis394fdde2016-10-10 07:31:23 -04003188 bool pm_pg_lock, use_bank;
3189 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003190
3191 if (size & 0x3 || *pos & 0x3)
3192 return -EINVAL;
3193
Tom St Denis394fdde2016-10-10 07:31:23 -04003194 /* are we reading registers for which a PG lock is necessary? */
3195 pm_pg_lock = (*pos >> 23) & 1;
3196
3197 if (*pos & (1ULL << 62)) {
3198 se_bank = (*pos >> 24) & 0x3FF;
3199 sh_bank = (*pos >> 34) & 0x3FF;
3200 instance_bank = (*pos >> 44) & 0x3FF;
3201
3202 if (se_bank == 0x3FF)
3203 se_bank = 0xFFFFFFFF;
3204 if (sh_bank == 0x3FF)
3205 sh_bank = 0xFFFFFFFF;
3206 if (instance_bank == 0x3FF)
3207 instance_bank = 0xFFFFFFFF;
3208 use_bank = 1;
3209 } else {
3210 use_bank = 0;
3211 }
3212
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003213 *pos &= (1UL << 22) - 1;
Tom St Denis394fdde2016-10-10 07:31:23 -04003214
3215 if (use_bank) {
3216 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3217 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3218 return -EINVAL;
3219 mutex_lock(&adev->grbm_idx_mutex);
3220 amdgpu_gfx_select_se_sh(adev, se_bank,
3221 sh_bank, instance_bank);
3222 }
3223
3224 if (pm_pg_lock)
3225 mutex_lock(&adev->pm.mutex);
3226
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003227 while (size) {
3228 uint32_t value;
3229
3230 if (*pos > adev->rmmio_size)
3231 return result;
3232
3233 r = get_user(value, (uint32_t *)buf);
3234 if (r)
3235 return r;
3236
3237 WREG32(*pos >> 2, value);
3238
3239 result += 4;
3240 buf += 4;
3241 *pos += 4;
3242 size -= 4;
3243 }
3244
Tom St Denis394fdde2016-10-10 07:31:23 -04003245 if (use_bank) {
3246 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3247 mutex_unlock(&adev->grbm_idx_mutex);
3248 }
3249
3250 if (pm_pg_lock)
3251 mutex_unlock(&adev->pm.mutex);
3252
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003253 return result;
3254}
3255
Tom St Denisadcec282016-04-15 13:08:44 -04003256static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3257 size_t size, loff_t *pos)
3258{
Al Viro45063092016-12-04 18:24:56 -05003259 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003260 ssize_t result = 0;
3261 int r;
3262
3263 if (size & 0x3 || *pos & 0x3)
3264 return -EINVAL;
3265
3266 while (size) {
3267 uint32_t value;
3268
3269 value = RREG32_PCIE(*pos >> 2);
3270 r = put_user(value, (uint32_t *)buf);
3271 if (r)
3272 return r;
3273
3274 result += 4;
3275 buf += 4;
3276 *pos += 4;
3277 size -= 4;
3278 }
3279
3280 return result;
3281}
3282
3283static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3284 size_t size, loff_t *pos)
3285{
Al Viro45063092016-12-04 18:24:56 -05003286 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003287 ssize_t result = 0;
3288 int r;
3289
3290 if (size & 0x3 || *pos & 0x3)
3291 return -EINVAL;
3292
3293 while (size) {
3294 uint32_t value;
3295
3296 r = get_user(value, (uint32_t *)buf);
3297 if (r)
3298 return r;
3299
3300 WREG32_PCIE(*pos >> 2, value);
3301
3302 result += 4;
3303 buf += 4;
3304 *pos += 4;
3305 size -= 4;
3306 }
3307
3308 return result;
3309}
3310
3311static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3312 size_t size, loff_t *pos)
3313{
Al Viro45063092016-12-04 18:24:56 -05003314 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003315 ssize_t result = 0;
3316 int r;
3317
3318 if (size & 0x3 || *pos & 0x3)
3319 return -EINVAL;
3320
3321 while (size) {
3322 uint32_t value;
3323
3324 value = RREG32_DIDT(*pos >> 2);
3325 r = put_user(value, (uint32_t *)buf);
3326 if (r)
3327 return r;
3328
3329 result += 4;
3330 buf += 4;
3331 *pos += 4;
3332 size -= 4;
3333 }
3334
3335 return result;
3336}
3337
3338static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3339 size_t size, loff_t *pos)
3340{
Al Viro45063092016-12-04 18:24:56 -05003341 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003342 ssize_t result = 0;
3343 int r;
3344
3345 if (size & 0x3 || *pos & 0x3)
3346 return -EINVAL;
3347
3348 while (size) {
3349 uint32_t value;
3350
3351 r = get_user(value, (uint32_t *)buf);
3352 if (r)
3353 return r;
3354
3355 WREG32_DIDT(*pos >> 2, value);
3356
3357 result += 4;
3358 buf += 4;
3359 *pos += 4;
3360 size -= 4;
3361 }
3362
3363 return result;
3364}
3365
3366static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3367 size_t size, loff_t *pos)
3368{
Al Viro45063092016-12-04 18:24:56 -05003369 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003370 ssize_t result = 0;
3371 int r;
3372
3373 if (size & 0x3 || *pos & 0x3)
3374 return -EINVAL;
3375
3376 while (size) {
3377 uint32_t value;
3378
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003379 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04003380 r = put_user(value, (uint32_t *)buf);
3381 if (r)
3382 return r;
3383
3384 result += 4;
3385 buf += 4;
3386 *pos += 4;
3387 size -= 4;
3388 }
3389
3390 return result;
3391}
3392
3393static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3394 size_t size, loff_t *pos)
3395{
Al Viro45063092016-12-04 18:24:56 -05003396 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003397 ssize_t result = 0;
3398 int r;
3399
3400 if (size & 0x3 || *pos & 0x3)
3401 return -EINVAL;
3402
3403 while (size) {
3404 uint32_t value;
3405
3406 r = get_user(value, (uint32_t *)buf);
3407 if (r)
3408 return r;
3409
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003410 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04003411
3412 result += 4;
3413 buf += 4;
3414 *pos += 4;
3415 size -= 4;
3416 }
3417
3418 return result;
3419}
3420
Tom St Denis1e051412016-06-27 09:57:18 -04003421static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3422 size_t size, loff_t *pos)
3423{
Al Viro45063092016-12-04 18:24:56 -05003424 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis1e051412016-06-27 09:57:18 -04003425 ssize_t result = 0;
3426 int r;
3427 uint32_t *config, no_regs = 0;
3428
3429 if (size & 0x3 || *pos & 0x3)
3430 return -EINVAL;
3431
Markus Elfringecab7662016-09-18 17:00:52 +02003432 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
Tom St Denis1e051412016-06-27 09:57:18 -04003433 if (!config)
3434 return -ENOMEM;
3435
3436 /* version, increment each time something is added */
Tom St Denis9a999352017-01-18 13:01:25 -05003437 config[no_regs++] = 3;
Tom St Denis1e051412016-06-27 09:57:18 -04003438 config[no_regs++] = adev->gfx.config.max_shader_engines;
3439 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3440 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3441 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3442 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3443 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3444 config[no_regs++] = adev->gfx.config.max_gprs;
3445 config[no_regs++] = adev->gfx.config.max_gs_threads;
3446 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3447 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3448 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3449 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3450 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3451 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3452 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3453 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3454 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3455 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3456 config[no_regs++] = adev->gfx.config.num_gpus;
3457 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3458 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3459 config[no_regs++] = adev->gfx.config.gb_addr_config;
3460 config[no_regs++] = adev->gfx.config.num_rbs;
3461
Tom St Denis89a8f302016-08-12 15:14:31 -04003462 /* rev==1 */
3463 config[no_regs++] = adev->rev_id;
3464 config[no_regs++] = adev->pg_flags;
3465 config[no_regs++] = adev->cg_flags;
3466
Tom St Denise9f11dc2016-08-17 12:00:51 -04003467 /* rev==2 */
3468 config[no_regs++] = adev->family;
3469 config[no_regs++] = adev->external_rev_id;
3470
Tom St Denis9a999352017-01-18 13:01:25 -05003471 /* rev==3 */
3472 config[no_regs++] = adev->pdev->device;
3473 config[no_regs++] = adev->pdev->revision;
3474 config[no_regs++] = adev->pdev->subsystem_device;
3475 config[no_regs++] = adev->pdev->subsystem_vendor;
3476
Tom St Denis1e051412016-06-27 09:57:18 -04003477 while (size && (*pos < no_regs * 4)) {
3478 uint32_t value;
3479
3480 value = config[*pos >> 2];
3481 r = put_user(value, (uint32_t *)buf);
3482 if (r) {
3483 kfree(config);
3484 return r;
3485 }
3486
3487 result += 4;
3488 buf += 4;
3489 *pos += 4;
3490 size -= 4;
3491 }
3492
3493 kfree(config);
3494 return result;
3495}
3496
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003497static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3498 size_t size, loff_t *pos)
3499{
Al Viro45063092016-12-04 18:24:56 -05003500 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003501 int idx, x, outsize, r, valuesize;
3502 uint32_t values[16];
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003503
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003504 if (size & 3 || *pos & 0x3)
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003505 return -EINVAL;
3506
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003507 if (amdgpu_dpm == 0)
3508 return -EINVAL;
3509
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003510 /* convert offset to sensor number */
3511 idx = *pos >> 2;
3512
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003513 valuesize = sizeof(values);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003514 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003515 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003516 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3517 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3518 &valuesize);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003519 else
3520 return -EINVAL;
3521
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003522 if (size > valuesize)
3523 return -EINVAL;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003524
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003525 outsize = 0;
3526 x = 0;
3527 if (!r) {
3528 while (size) {
3529 r = put_user(values[x++], (int32_t *)buf);
3530 buf += 4;
3531 size -= 4;
3532 outsize += 4;
3533 }
3534 }
3535
3536 return !r ? outsize : r;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003537}
Tom St Denis1e051412016-06-27 09:57:18 -04003538
Tom St Denis273d7aa2016-10-11 14:48:55 -04003539static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3540 size_t size, loff_t *pos)
3541{
3542 struct amdgpu_device *adev = f->f_inode->i_private;
3543 int r, x;
3544 ssize_t result=0;
Tom St Denis472259f2016-10-14 09:49:09 -04003545 uint32_t offset, se, sh, cu, wave, simd, data[32];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003546
3547 if (size & 3 || *pos & 3)
3548 return -EINVAL;
3549
3550 /* decode offset */
3551 offset = (*pos & 0x7F);
3552 se = ((*pos >> 7) & 0xFF);
3553 sh = ((*pos >> 15) & 0xFF);
3554 cu = ((*pos >> 23) & 0xFF);
3555 wave = ((*pos >> 31) & 0xFF);
3556 simd = ((*pos >> 37) & 0xFF);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003557
3558 /* switch to the specific se/sh/cu */
3559 mutex_lock(&adev->grbm_idx_mutex);
3560 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3561
3562 x = 0;
Tom St Denis472259f2016-10-14 09:49:09 -04003563 if (adev->gfx.funcs->read_wave_data)
3564 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003565
3566 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3567 mutex_unlock(&adev->grbm_idx_mutex);
3568
Tom St Denis5ecfb3b2016-10-13 12:15:03 -04003569 if (!x)
3570 return -EINVAL;
3571
Tom St Denis472259f2016-10-14 09:49:09 -04003572 while (size && (offset < x * 4)) {
Tom St Denis273d7aa2016-10-11 14:48:55 -04003573 uint32_t value;
3574
Tom St Denis472259f2016-10-14 09:49:09 -04003575 value = data[offset >> 2];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003576 r = put_user(value, (uint32_t *)buf);
3577 if (r)
3578 return r;
3579
3580 result += 4;
3581 buf += 4;
Tom St Denis472259f2016-10-14 09:49:09 -04003582 offset += 4;
Tom St Denis273d7aa2016-10-11 14:48:55 -04003583 size -= 4;
3584 }
3585
3586 return result;
3587}
3588
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003589static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3590 size_t size, loff_t *pos)
3591{
3592 struct amdgpu_device *adev = f->f_inode->i_private;
3593 int r;
3594 ssize_t result = 0;
3595 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3596
3597 if (size & 3 || *pos & 3)
3598 return -EINVAL;
3599
3600 /* decode offset */
3601 offset = (*pos & 0xFFF); /* in dwords */
3602 se = ((*pos >> 12) & 0xFF);
3603 sh = ((*pos >> 20) & 0xFF);
3604 cu = ((*pos >> 28) & 0xFF);
3605 wave = ((*pos >> 36) & 0xFF);
3606 simd = ((*pos >> 44) & 0xFF);
3607 thread = ((*pos >> 52) & 0xFF);
3608 bank = ((*pos >> 60) & 1);
3609
3610 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3611 if (!data)
3612 return -ENOMEM;
3613
3614 /* switch to the specific se/sh/cu */
3615 mutex_lock(&adev->grbm_idx_mutex);
3616 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3617
3618 if (bank == 0) {
3619 if (adev->gfx.funcs->read_wave_vgprs)
3620 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3621 } else {
3622 if (adev->gfx.funcs->read_wave_sgprs)
3623 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3624 }
3625
3626 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3627 mutex_unlock(&adev->grbm_idx_mutex);
3628
3629 while (size) {
3630 uint32_t value;
3631
3632 value = data[offset++];
3633 r = put_user(value, (uint32_t *)buf);
3634 if (r) {
3635 result = r;
3636 goto err;
3637 }
3638
3639 result += 4;
3640 buf += 4;
3641 size -= 4;
3642 }
3643
3644err:
3645 kfree(data);
3646 return result;
3647}
3648
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003649static const struct file_operations amdgpu_debugfs_regs_fops = {
3650 .owner = THIS_MODULE,
3651 .read = amdgpu_debugfs_regs_read,
3652 .write = amdgpu_debugfs_regs_write,
3653 .llseek = default_llseek
3654};
Tom St Denisadcec282016-04-15 13:08:44 -04003655static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3656 .owner = THIS_MODULE,
3657 .read = amdgpu_debugfs_regs_didt_read,
3658 .write = amdgpu_debugfs_regs_didt_write,
3659 .llseek = default_llseek
3660};
3661static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3662 .owner = THIS_MODULE,
3663 .read = amdgpu_debugfs_regs_pcie_read,
3664 .write = amdgpu_debugfs_regs_pcie_write,
3665 .llseek = default_llseek
3666};
3667static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3668 .owner = THIS_MODULE,
3669 .read = amdgpu_debugfs_regs_smc_read,
3670 .write = amdgpu_debugfs_regs_smc_write,
3671 .llseek = default_llseek
3672};
3673
Tom St Denis1e051412016-06-27 09:57:18 -04003674static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3675 .owner = THIS_MODULE,
3676 .read = amdgpu_debugfs_gca_config_read,
3677 .llseek = default_llseek
3678};
3679
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003680static const struct file_operations amdgpu_debugfs_sensors_fops = {
3681 .owner = THIS_MODULE,
3682 .read = amdgpu_debugfs_sensor_read,
3683 .llseek = default_llseek
3684};
3685
Tom St Denis273d7aa2016-10-11 14:48:55 -04003686static const struct file_operations amdgpu_debugfs_wave_fops = {
3687 .owner = THIS_MODULE,
3688 .read = amdgpu_debugfs_wave_read,
3689 .llseek = default_llseek
3690};
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003691static const struct file_operations amdgpu_debugfs_gpr_fops = {
3692 .owner = THIS_MODULE,
3693 .read = amdgpu_debugfs_gpr_read,
3694 .llseek = default_llseek
3695};
Tom St Denis273d7aa2016-10-11 14:48:55 -04003696
Tom St Denisadcec282016-04-15 13:08:44 -04003697static const struct file_operations *debugfs_regs[] = {
3698 &amdgpu_debugfs_regs_fops,
3699 &amdgpu_debugfs_regs_didt_fops,
3700 &amdgpu_debugfs_regs_pcie_fops,
3701 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04003702 &amdgpu_debugfs_gca_config_fops,
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003703 &amdgpu_debugfs_sensors_fops,
Tom St Denis273d7aa2016-10-11 14:48:55 -04003704 &amdgpu_debugfs_wave_fops,
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003705 &amdgpu_debugfs_gpr_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04003706};
3707
3708static const char *debugfs_regs_names[] = {
3709 "amdgpu_regs",
3710 "amdgpu_regs_didt",
3711 "amdgpu_regs_pcie",
3712 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04003713 "amdgpu_gca_config",
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003714 "amdgpu_sensors",
Tom St Denis273d7aa2016-10-11 14:48:55 -04003715 "amdgpu_wave",
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003716 "amdgpu_gpr",
Tom St Denisadcec282016-04-15 13:08:44 -04003717};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003718
3719static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3720{
3721 struct drm_minor *minor = adev->ddev->primary;
3722 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04003723 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003724
Tom St Denisadcec282016-04-15 13:08:44 -04003725 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3726 ent = debugfs_create_file(debugfs_regs_names[i],
3727 S_IFREG | S_IRUGO, root,
3728 adev, debugfs_regs[i]);
3729 if (IS_ERR(ent)) {
3730 for (j = 0; j < i; j++) {
3731 debugfs_remove(adev->debugfs_regs[i]);
3732 adev->debugfs_regs[i] = NULL;
3733 }
3734 return PTR_ERR(ent);
3735 }
3736
3737 if (!i)
3738 i_size_write(ent->d_inode, adev->rmmio_size);
3739 adev->debugfs_regs[i] = ent;
3740 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003741
3742 return 0;
3743}
3744
3745static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3746{
Tom St Denisadcec282016-04-15 13:08:44 -04003747 unsigned i;
3748
3749 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3750 if (adev->debugfs_regs[i]) {
3751 debugfs_remove(adev->debugfs_regs[i]);
3752 adev->debugfs_regs[i] = NULL;
3753 }
3754 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003755}
3756
Huang Rui4f0955f2017-05-10 23:04:06 +08003757static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3758{
3759 struct drm_info_node *node = (struct drm_info_node *) m->private;
3760 struct drm_device *dev = node->minor->dev;
3761 struct amdgpu_device *adev = dev->dev_private;
3762 int r = 0, i;
3763
3764 /* hold on the scheduler */
3765 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3766 struct amdgpu_ring *ring = adev->rings[i];
3767
3768 if (!ring || !ring->sched.thread)
3769 continue;
3770 kthread_park(ring->sched.thread);
3771 }
3772
3773 seq_printf(m, "run ib test:\n");
3774 r = amdgpu_ib_ring_tests(adev);
3775 if (r)
3776 seq_printf(m, "ib ring tests failed (%d).\n", r);
3777 else
3778 seq_printf(m, "ib ring tests passed.\n");
3779
3780 /* go on the scheduler */
3781 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3782 struct amdgpu_ring *ring = adev->rings[i];
3783
3784 if (!ring || !ring->sched.thread)
3785 continue;
3786 kthread_unpark(ring->sched.thread);
3787 }
3788
3789 return 0;
3790}
3791
3792static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3793 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3794};
3795
3796static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3797{
3798 return amdgpu_debugfs_add_files(adev,
3799 amdgpu_debugfs_test_ib_ring_list, 1);
3800}
3801
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003802int amdgpu_debugfs_init(struct drm_minor *minor)
3803{
3804 return 0;
3805}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003806#else
Arnd Bergmann27bad5b2017-06-21 23:51:02 +02003807static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
Huang Rui4f0955f2017-05-10 23:04:06 +08003808{
3809 return 0;
3810}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003811static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3812{
3813 return 0;
3814}
3815static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003816#endif