blob: cf45fb902aed5727334a5b9389756138926e4063 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deuchera5bde2f2016-09-23 16:23:41 -040043#include "amdgpu_atomfirmware.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050044#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080045#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040048#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040051#include "vi.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040052#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080053#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080054#include <linux/firmware.h>
Tom St Denisd1aff8e2016-08-09 18:01:55 -040055#include "amdgpu_pm.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040056
57static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
58static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
59
60static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080061 "TAHITI",
62 "PITCAIRN",
63 "VERDE",
64 "OLAND",
65 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040066 "BONAIRE",
67 "KAVERI",
68 "KABINI",
69 "HAWAII",
70 "MULLINS",
71 "TOPAZ",
72 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080073 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040074 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040075 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040076 "POLARIS10",
77 "POLARIS11",
Junwei Zhangc4642a42016-12-14 15:32:28 -050078 "POLARIS12",
Ken Wangd4196f02016-03-09 09:28:32 +080079 "VEGA10",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040080 "LAST",
81};
82
83bool amdgpu_device_is_px(struct drm_device *dev)
84{
85 struct amdgpu_device *adev = dev->dev_private;
86
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080087 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040088 return true;
89 return false;
90}
91
92/*
93 * MMIO register access helper functions.
94 */
95uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
Monk Liu15d72fd2017-01-25 15:07:40 +080096 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040097{
Tom St Denisf4b373f2016-05-31 08:02:27 -040098 uint32_t ret;
99
Monk Liu15d72fd2017-01-25 15:07:40 +0800100 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800101 BUG_ON(in_interrupt());
102 return amdgpu_virt_kiq_rreg(adev, reg);
103 }
104
Monk Liu15d72fd2017-01-25 15:07:40 +0800105 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Tom St Denisf4b373f2016-05-31 08:02:27 -0400106 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400107 else {
108 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400109
110 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
111 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
112 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
113 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400114 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400115 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
116 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400117}
118
119void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
Monk Liu15d72fd2017-01-25 15:07:40 +0800120 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400121{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400122 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800123
Monk Liu15d72fd2017-01-25 15:07:40 +0800124 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800125 BUG_ON(in_interrupt());
126 return amdgpu_virt_kiq_wreg(adev, reg, v);
127 }
128
Monk Liu15d72fd2017-01-25 15:07:40 +0800129 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400130 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
131 else {
132 unsigned long flags;
133
134 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
135 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
136 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
137 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
138 }
139}
140
141u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
142{
143 if ((reg * 4) < adev->rio_mem_size)
144 return ioread32(adev->rio_mem + (reg * 4));
145 else {
146 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
147 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
148 }
149}
150
151void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
152{
153
154 if ((reg * 4) < adev->rio_mem_size)
155 iowrite32(v, adev->rio_mem + (reg * 4));
156 else {
157 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
158 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
159 }
160}
161
162/**
163 * amdgpu_mm_rdoorbell - read a doorbell dword
164 *
165 * @adev: amdgpu_device pointer
166 * @index: doorbell index
167 *
168 * Returns the value in the doorbell aperture at the
169 * requested doorbell index (CIK).
170 */
171u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
172{
173 if (index < adev->doorbell.num_doorbells) {
174 return readl(adev->doorbell.ptr + index);
175 } else {
176 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
177 return 0;
178 }
179}
180
181/**
182 * amdgpu_mm_wdoorbell - write a doorbell dword
183 *
184 * @adev: amdgpu_device pointer
185 * @index: doorbell index
186 * @v: value to write
187 *
188 * Writes @v to the doorbell aperture at the
189 * requested doorbell index (CIK).
190 */
191void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
192{
193 if (index < adev->doorbell.num_doorbells) {
194 writel(v, adev->doorbell.ptr + index);
195 } else {
196 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
197 }
198}
199
200/**
Ken Wang832be402016-03-18 15:23:08 +0800201 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
202 *
203 * @adev: amdgpu_device pointer
204 * @index: doorbell index
205 *
206 * Returns the value in the doorbell aperture at the
207 * requested doorbell index (VEGA10+).
208 */
209u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
210{
211 if (index < adev->doorbell.num_doorbells) {
212 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
213 } else {
214 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
215 return 0;
216 }
217}
218
219/**
220 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
221 *
222 * @adev: amdgpu_device pointer
223 * @index: doorbell index
224 * @v: value to write
225 *
226 * Writes @v to the doorbell aperture at the
227 * requested doorbell index (VEGA10+).
228 */
229void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
230{
231 if (index < adev->doorbell.num_doorbells) {
232 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
233 } else {
234 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
235 }
236}
237
238/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400239 * amdgpu_invalid_rreg - dummy reg read function
240 *
241 * @adev: amdgpu device pointer
242 * @reg: offset of register
243 *
244 * Dummy register read function. Used for register blocks
245 * that certain asics don't have (all asics).
246 * Returns the value in the register.
247 */
248static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
249{
250 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
251 BUG();
252 return 0;
253}
254
255/**
256 * amdgpu_invalid_wreg - dummy reg write function
257 *
258 * @adev: amdgpu device pointer
259 * @reg: offset of register
260 * @v: value to write to the register
261 *
262 * Dummy register read function. Used for register blocks
263 * that certain asics don't have (all asics).
264 */
265static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
266{
267 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
268 reg, v);
269 BUG();
270}
271
272/**
273 * amdgpu_block_invalid_rreg - dummy reg read function
274 *
275 * @adev: amdgpu device pointer
276 * @block: offset of instance
277 * @reg: offset of register
278 *
279 * Dummy register read function. Used for register blocks
280 * that certain asics don't have (all asics).
281 * Returns the value in the register.
282 */
283static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
284 uint32_t block, uint32_t reg)
285{
286 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
287 reg, block);
288 BUG();
289 return 0;
290}
291
292/**
293 * amdgpu_block_invalid_wreg - dummy reg write function
294 *
295 * @adev: amdgpu device pointer
296 * @block: offset of instance
297 * @reg: offset of register
298 * @v: value to write to the register
299 *
300 * Dummy register read function. Used for register blocks
301 * that certain asics don't have (all asics).
302 */
303static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
304 uint32_t block,
305 uint32_t reg, uint32_t v)
306{
307 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
308 reg, block, v);
309 BUG();
310}
311
312static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
313{
314 int r;
315
316 if (adev->vram_scratch.robj == NULL) {
317 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
Alex Deucher857d9132015-08-27 00:14:16 -0400318 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
Christian König03f48dd2016-08-15 17:00:22 +0200319 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
320 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
Christian König72d76682015-09-03 17:34:59 +0200321 NULL, NULL, &adev->vram_scratch.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400322 if (r) {
323 return r;
324 }
325 }
326
327 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
328 if (unlikely(r != 0))
329 return r;
330 r = amdgpu_bo_pin(adev->vram_scratch.robj,
331 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
332 if (r) {
333 amdgpu_bo_unreserve(adev->vram_scratch.robj);
334 return r;
335 }
336 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
337 (void **)&adev->vram_scratch.ptr);
338 if (r)
339 amdgpu_bo_unpin(adev->vram_scratch.robj);
340 amdgpu_bo_unreserve(adev->vram_scratch.robj);
341
342 return r;
343}
344
345static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
346{
347 int r;
348
349 if (adev->vram_scratch.robj == NULL) {
350 return;
351 }
352 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
353 if (likely(r == 0)) {
354 amdgpu_bo_kunmap(adev->vram_scratch.robj);
355 amdgpu_bo_unpin(adev->vram_scratch.robj);
356 amdgpu_bo_unreserve(adev->vram_scratch.robj);
357 }
358 amdgpu_bo_unref(&adev->vram_scratch.robj);
359}
360
361/**
362 * amdgpu_program_register_sequence - program an array of registers.
363 *
364 * @adev: amdgpu_device pointer
365 * @registers: pointer to the register array
366 * @array_size: size of the register array
367 *
368 * Programs an array or registers with and and or masks.
369 * This is a helper for setting golden registers.
370 */
371void amdgpu_program_register_sequence(struct amdgpu_device *adev,
372 const u32 *registers,
373 const u32 array_size)
374{
375 u32 tmp, reg, and_mask, or_mask;
376 int i;
377
378 if (array_size % 3)
379 return;
380
381 for (i = 0; i < array_size; i +=3) {
382 reg = registers[i + 0];
383 and_mask = registers[i + 1];
384 or_mask = registers[i + 2];
385
386 if (and_mask == 0xffffffff) {
387 tmp = or_mask;
388 } else {
389 tmp = RREG32(reg);
390 tmp &= ~and_mask;
391 tmp |= or_mask;
392 }
393 WREG32(reg, tmp);
394 }
395}
396
397void amdgpu_pci_config_reset(struct amdgpu_device *adev)
398{
399 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
400}
401
402/*
403 * GPU doorbell aperture helpers function.
404 */
405/**
406 * amdgpu_doorbell_init - Init doorbell driver information.
407 *
408 * @adev: amdgpu_device pointer
409 *
410 * Init doorbell driver information (CIK)
411 * Returns 0 on success, error on failure.
412 */
413static int amdgpu_doorbell_init(struct amdgpu_device *adev)
414{
415 /* doorbell bar mapping */
416 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
417 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
418
Christian Königedf600d2016-05-03 15:54:54 +0200419 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400420 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
421 if (adev->doorbell.num_doorbells == 0)
422 return -EINVAL;
423
424 adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
425 if (adev->doorbell.ptr == NULL) {
426 return -ENOMEM;
427 }
428 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
429 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
430
431 return 0;
432}
433
434/**
435 * amdgpu_doorbell_fini - Tear down doorbell driver information.
436 *
437 * @adev: amdgpu_device pointer
438 *
439 * Tear down doorbell driver information (CIK)
440 */
441static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
442{
443 iounmap(adev->doorbell.ptr);
444 adev->doorbell.ptr = NULL;
445}
446
447/**
448 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
449 * setup amdkfd
450 *
451 * @adev: amdgpu_device pointer
452 * @aperture_base: output returning doorbell aperture base physical address
453 * @aperture_size: output returning doorbell aperture size in bytes
454 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
455 *
456 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
457 * takes doorbells required for its own rings and reports the setup to amdkfd.
458 * amdgpu reserved doorbells are at the start of the doorbell aperture.
459 */
460void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
461 phys_addr_t *aperture_base,
462 size_t *aperture_size,
463 size_t *start_offset)
464{
465 /*
466 * The first num_doorbells are used by amdgpu.
467 * amdkfd takes whatever's left in the aperture.
468 */
469 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
470 *aperture_base = adev->doorbell.base;
471 *aperture_size = adev->doorbell.size;
472 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
473 } else {
474 *aperture_base = 0;
475 *aperture_size = 0;
476 *start_offset = 0;
477 }
478}
479
480/*
481 * amdgpu_wb_*()
482 * Writeback is the the method by which the the GPU updates special pages
483 * in memory with the status of certain GPU events (fences, ring pointers,
484 * etc.).
485 */
486
487/**
488 * amdgpu_wb_fini - Disable Writeback and free memory
489 *
490 * @adev: amdgpu_device pointer
491 *
492 * Disables Writeback and frees the Writeback memory (all asics).
493 * Used at driver shutdown.
494 */
495static void amdgpu_wb_fini(struct amdgpu_device *adev)
496{
497 if (adev->wb.wb_obj) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400498 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
499 &adev->wb.gpu_addr,
500 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400501 adev->wb.wb_obj = NULL;
502 }
503}
504
505/**
506 * amdgpu_wb_init- Init Writeback driver info and allocate memory
507 *
508 * @adev: amdgpu_device pointer
509 *
510 * Disables Writeback and frees the Writeback memory (all asics).
511 * Used at driver startup.
512 * Returns 0 on success or an -error on failure.
513 */
514static int amdgpu_wb_init(struct amdgpu_device *adev)
515{
516 int r;
517
518 if (adev->wb.wb_obj == NULL) {
Huang Rui60a970a62017-03-15 10:13:32 +0800519 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
Alex Deuchera76ed482016-10-21 15:30:36 -0400520 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
521 &adev->wb.wb_obj, &adev->wb.gpu_addr,
522 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400523 if (r) {
524 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
525 return r;
526 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400527
528 adev->wb.num_wb = AMDGPU_MAX_WB;
529 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
530
531 /* clear wb memory */
Huang Rui60a970a62017-03-15 10:13:32 +0800532 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400533 }
534
535 return 0;
536}
537
538/**
539 * amdgpu_wb_get - Allocate a wb entry
540 *
541 * @adev: amdgpu_device pointer
542 * @wb: wb index
543 *
544 * Allocate a wb slot for use by the driver (all asics).
545 * Returns 0 on success or -EINVAL on failure.
546 */
547int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
548{
549 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
550 if (offset < adev->wb.num_wb) {
551 __set_bit(offset, adev->wb.used);
552 *wb = offset;
553 return 0;
554 } else {
555 return -EINVAL;
556 }
557}
558
559/**
Ken Wang70142852016-03-18 15:08:49 +0800560 * amdgpu_wb_get_64bit - Allocate a wb entry
561 *
562 * @adev: amdgpu_device pointer
563 * @wb: wb index
564 *
565 * Allocate a wb slot for use by the driver (all asics).
566 * Returns 0 on success or -EINVAL on failure.
567 */
568int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
569{
570 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
571 adev->wb.num_wb, 0, 2, 7, 0);
572 if ((offset + 1) < adev->wb.num_wb) {
573 __set_bit(offset, adev->wb.used);
574 __set_bit(offset + 1, adev->wb.used);
575 *wb = offset;
576 return 0;
577 } else {
578 return -EINVAL;
579 }
580}
581
582/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400583 * amdgpu_wb_free - Free a wb entry
584 *
585 * @adev: amdgpu_device pointer
586 * @wb: wb index
587 *
588 * Free a wb slot allocated for use by the driver (all asics)
589 */
590void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
591{
592 if (wb < adev->wb.num_wb)
593 __clear_bit(wb, adev->wb.used);
594}
595
596/**
Ken Wang70142852016-03-18 15:08:49 +0800597 * amdgpu_wb_free_64bit - Free a wb entry
598 *
599 * @adev: amdgpu_device pointer
600 * @wb: wb index
601 *
602 * Free a wb slot allocated for use by the driver (all asics)
603 */
604void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
605{
606 if ((wb + 1) < adev->wb.num_wb) {
607 __clear_bit(wb, adev->wb.used);
608 __clear_bit(wb + 1, adev->wb.used);
609 }
610}
611
612/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400613 * amdgpu_vram_location - try to find VRAM location
614 * @adev: amdgpu device structure holding all necessary informations
615 * @mc: memory controller structure holding memory informations
616 * @base: base address at which to put VRAM
617 *
618 * Function will place try to place VRAM at base address provided
619 * as parameter (which is so far either PCI aperture address or
620 * for IGP TOM base address).
621 *
622 * If there is not enough space to fit the unvisible VRAM in the 32bits
623 * address space then we limit the VRAM size to the aperture.
624 *
625 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
626 * this shouldn't be a problem as we are using the PCI aperture as a reference.
627 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
628 * not IGP.
629 *
630 * Note: we use mc_vram_size as on some board we need to program the mc to
631 * cover the whole aperture even if VRAM size is inferior to aperture size
632 * Novell bug 204882 + along with lots of ubuntu ones
633 *
634 * Note: when limiting vram it's safe to overwritte real_vram_size because
635 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
636 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
637 * ones)
638 *
639 * Note: IGP TOM addr should be the same as the aperture addr, we don't
640 * explicitly check for that thought.
641 *
642 * FIXME: when reducing VRAM size align new size on power of 2.
643 */
644void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
645{
646 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
647
648 mc->vram_start = base;
649 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
650 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
651 mc->real_vram_size = mc->aper_size;
652 mc->mc_vram_size = mc->aper_size;
653 }
654 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
655 if (limit && limit < mc->real_vram_size)
656 mc->real_vram_size = limit;
657 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
658 mc->mc_vram_size >> 20, mc->vram_start,
659 mc->vram_end, mc->real_vram_size >> 20);
660}
661
662/**
663 * amdgpu_gtt_location - try to find GTT location
664 * @adev: amdgpu device structure holding all necessary informations
665 * @mc: memory controller structure holding memory informations
666 *
667 * Function will place try to place GTT before or after VRAM.
668 *
669 * If GTT size is bigger than space left then we ajust GTT size.
670 * Thus function will never fails.
671 *
672 * FIXME: when reducing GTT size align new size on power of 2.
673 */
674void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
675{
676 u64 size_af, size_bf;
677
678 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
679 size_bf = mc->vram_start & ~mc->gtt_base_align;
680 if (size_bf > size_af) {
681 if (mc->gtt_size > size_bf) {
682 dev_warn(adev->dev, "limiting GTT\n");
683 mc->gtt_size = size_bf;
684 }
Alex Deucher9dc5a912016-11-17 15:40:22 -0500685 mc->gtt_start = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400686 } else {
687 if (mc->gtt_size > size_af) {
688 dev_warn(adev->dev, "limiting GTT\n");
689 mc->gtt_size = size_af;
690 }
691 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
692 }
693 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
694 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
695 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
696}
697
698/*
699 * GPU helpers function.
700 */
701/**
Jim Quc836fec2017-02-10 15:59:59 +0800702 * amdgpu_need_post - check if the hw need post or not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400703 *
704 * @adev: amdgpu_device pointer
705 *
Jim Quc836fec2017-02-10 15:59:59 +0800706 * Check if the asic has been initialized (all asics) at driver startup
707 * or post is needed if hw reset is performed.
708 * Returns true if need or false if not.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400709 */
Jim Quc836fec2017-02-10 15:59:59 +0800710bool amdgpu_need_post(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400711{
712 uint32_t reg;
713
Jim Quc836fec2017-02-10 15:59:59 +0800714 if (adev->has_hw_reset) {
715 adev->has_hw_reset = false;
716 return true;
717 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400718 /* then check MEM_SIZE, in case the crtcs are off */
Alex Deucherbbf282d2017-03-03 17:26:10 -0500719 reg = amdgpu_asic_get_config_memsize(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400720
721 if (reg)
Jim Quc836fec2017-02-10 15:59:59 +0800722 return false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400723
Jim Quc836fec2017-02-10 15:59:59 +0800724 return true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400725
726}
727
Monk Liubec86372016-09-14 19:38:08 +0800728static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
729{
730 if (amdgpu_sriov_vf(adev))
731 return false;
732
733 if (amdgpu_passthrough(adev)) {
Monk Liu1da2c322016-11-11 11:24:29 +0800734 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
735 * some old smc fw still need driver do vPost otherwise gpu hang, while
736 * those smc fw version above 22.15 doesn't have this flaw, so we force
737 * vpost executed for smc version below 22.15
Monk Liubec86372016-09-14 19:38:08 +0800738 */
739 if (adev->asic_type == CHIP_FIJI) {
740 int err;
741 uint32_t fw_ver;
742 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
743 /* force vPost if error occured */
744 if (err)
745 return true;
746
747 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
Monk Liu1da2c322016-11-11 11:24:29 +0800748 if (fw_ver < 0x00160e00)
749 return true;
Monk Liubec86372016-09-14 19:38:08 +0800750 }
Monk Liubec86372016-09-14 19:38:08 +0800751 }
Jim Quc836fec2017-02-10 15:59:59 +0800752 return amdgpu_need_post(adev);
Monk Liubec86372016-09-14 19:38:08 +0800753}
754
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400755/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400756 * amdgpu_dummy_page_init - init dummy page used by the driver
757 *
758 * @adev: amdgpu_device pointer
759 *
760 * Allocate the dummy page used by the driver (all asics).
761 * This dummy page is used by the driver as a filler for gart entries
762 * when pages are taken out of the GART
763 * Returns 0 on sucess, -ENOMEM on failure.
764 */
765int amdgpu_dummy_page_init(struct amdgpu_device *adev)
766{
767 if (adev->dummy_page.page)
768 return 0;
769 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
770 if (adev->dummy_page.page == NULL)
771 return -ENOMEM;
772 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
773 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
774 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
775 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
776 __free_page(adev->dummy_page.page);
777 adev->dummy_page.page = NULL;
778 return -ENOMEM;
779 }
780 return 0;
781}
782
783/**
784 * amdgpu_dummy_page_fini - free dummy page used by the driver
785 *
786 * @adev: amdgpu_device pointer
787 *
788 * Frees the dummy page used by the driver (all asics).
789 */
790void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
791{
792 if (adev->dummy_page.page == NULL)
793 return;
794 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
795 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
796 __free_page(adev->dummy_page.page);
797 adev->dummy_page.page = NULL;
798}
799
800
801/* ATOM accessor methods */
802/*
803 * ATOM is an interpreted byte code stored in tables in the vbios. The
804 * driver registers callbacks to access registers and the interpreter
805 * in the driver parses the tables and executes then to program specific
806 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
807 * atombios.h, and atom.c
808 */
809
810/**
811 * cail_pll_read - read PLL register
812 *
813 * @info: atom card_info pointer
814 * @reg: PLL register offset
815 *
816 * Provides a PLL register accessor for the atom interpreter (r4xx+).
817 * Returns the value of the PLL register.
818 */
819static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
820{
821 return 0;
822}
823
824/**
825 * cail_pll_write - write PLL register
826 *
827 * @info: atom card_info pointer
828 * @reg: PLL register offset
829 * @val: value to write to the pll register
830 *
831 * Provides a PLL register accessor for the atom interpreter (r4xx+).
832 */
833static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
834{
835
836}
837
838/**
839 * cail_mc_read - read MC (Memory Controller) register
840 *
841 * @info: atom card_info pointer
842 * @reg: MC register offset
843 *
844 * Provides an MC register accessor for the atom interpreter (r4xx+).
845 * Returns the value of the MC register.
846 */
847static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
848{
849 return 0;
850}
851
852/**
853 * cail_mc_write - write MC (Memory Controller) register
854 *
855 * @info: atom card_info pointer
856 * @reg: MC register offset
857 * @val: value to write to the pll register
858 *
859 * Provides a MC register accessor for the atom interpreter (r4xx+).
860 */
861static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
862{
863
864}
865
866/**
867 * cail_reg_write - write MMIO register
868 *
869 * @info: atom card_info pointer
870 * @reg: MMIO register offset
871 * @val: value to write to the pll register
872 *
873 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
874 */
875static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
876{
877 struct amdgpu_device *adev = info->dev->dev_private;
878
879 WREG32(reg, val);
880}
881
882/**
883 * cail_reg_read - read MMIO register
884 *
885 * @info: atom card_info pointer
886 * @reg: MMIO register offset
887 *
888 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
889 * Returns the value of the MMIO register.
890 */
891static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
892{
893 struct amdgpu_device *adev = info->dev->dev_private;
894 uint32_t r;
895
896 r = RREG32(reg);
897 return r;
898}
899
900/**
901 * cail_ioreg_write - write IO register
902 *
903 * @info: atom card_info pointer
904 * @reg: IO register offset
905 * @val: value to write to the pll register
906 *
907 * Provides a IO register accessor for the atom interpreter (r4xx+).
908 */
909static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
910{
911 struct amdgpu_device *adev = info->dev->dev_private;
912
913 WREG32_IO(reg, val);
914}
915
916/**
917 * cail_ioreg_read - read IO register
918 *
919 * @info: atom card_info pointer
920 * @reg: IO register offset
921 *
922 * Provides an IO register accessor for the atom interpreter (r4xx+).
923 * Returns the value of the IO register.
924 */
925static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
926{
927 struct amdgpu_device *adev = info->dev->dev_private;
928 uint32_t r;
929
930 r = RREG32_IO(reg);
931 return r;
932}
933
934/**
935 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
936 *
937 * @adev: amdgpu_device pointer
938 *
939 * Frees the driver info and register access callbacks for the ATOM
940 * interpreter (r4xx+).
941 * Called at driver shutdown.
942 */
943static void amdgpu_atombios_fini(struct amdgpu_device *adev)
944{
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800945 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400946 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800947 kfree(adev->mode_info.atom_context->iio);
948 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400949 kfree(adev->mode_info.atom_context);
950 adev->mode_info.atom_context = NULL;
951 kfree(adev->mode_info.atom_card_info);
952 adev->mode_info.atom_card_info = NULL;
953}
954
955/**
956 * amdgpu_atombios_init - init the driver info and callbacks for atombios
957 *
958 * @adev: amdgpu_device pointer
959 *
960 * Initializes the driver info and register access callbacks for the
961 * ATOM interpreter (r4xx+).
962 * Returns 0 on sucess, -ENOMEM on failure.
963 * Called at driver startup.
964 */
965static int amdgpu_atombios_init(struct amdgpu_device *adev)
966{
967 struct card_info *atom_card_info =
968 kzalloc(sizeof(struct card_info), GFP_KERNEL);
969
970 if (!atom_card_info)
971 return -ENOMEM;
972
973 adev->mode_info.atom_card_info = atom_card_info;
974 atom_card_info->dev = adev->ddev;
975 atom_card_info->reg_read = cail_reg_read;
976 atom_card_info->reg_write = cail_reg_write;
977 /* needed for iio ops */
978 if (adev->rio_mem) {
979 atom_card_info->ioreg_read = cail_ioreg_read;
980 atom_card_info->ioreg_write = cail_ioreg_write;
981 } else {
Amber Linb64a18c2017-01-04 08:06:58 -0500982 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400983 atom_card_info->ioreg_read = cail_reg_read;
984 atom_card_info->ioreg_write = cail_reg_write;
985 }
986 atom_card_info->mc_read = cail_mc_read;
987 atom_card_info->mc_write = cail_mc_write;
988 atom_card_info->pll_read = cail_pll_read;
989 atom_card_info->pll_write = cail_pll_write;
990
991 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
992 if (!adev->mode_info.atom_context) {
993 amdgpu_atombios_fini(adev);
994 return -ENOMEM;
995 }
996
997 mutex_init(&adev->mode_info.atom_context->mutex);
Alex Deuchera5bde2f2016-09-23 16:23:41 -0400998 if (adev->is_atom_fw) {
999 amdgpu_atomfirmware_scratch_regs_init(adev);
1000 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1001 } else {
1002 amdgpu_atombios_scratch_regs_init(adev);
1003 amdgpu_atombios_allocate_fb_scratch(adev);
1004 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001005 return 0;
1006}
1007
1008/* if we get transitioned to only one device, take VGA back */
1009/**
1010 * amdgpu_vga_set_decode - enable/disable vga decode
1011 *
1012 * @cookie: amdgpu_device pointer
1013 * @state: enable/disable vga decode
1014 *
1015 * Enable/disable vga decode (all asics).
1016 * Returns VGA resource flags.
1017 */
1018static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1019{
1020 struct amdgpu_device *adev = cookie;
1021 amdgpu_asic_set_vga_state(adev, state);
1022 if (state)
1023 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1024 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1025 else
1026 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1027}
1028
1029/**
1030 * amdgpu_check_pot_argument - check that argument is a power of two
1031 *
1032 * @arg: value to check
1033 *
1034 * Validates that a certain argument is a power of two (all asics).
1035 * Returns true if argument is valid.
1036 */
1037static bool amdgpu_check_pot_argument(int arg)
1038{
1039 return (arg & (arg - 1)) == 0;
1040}
1041
1042/**
1043 * amdgpu_check_arguments - validate module params
1044 *
1045 * @adev: amdgpu_device pointer
1046 *
1047 * Validates certain module parameters and updates
1048 * the associated values used by the driver (all asics).
1049 */
1050static void amdgpu_check_arguments(struct amdgpu_device *adev)
1051{
Chunming Zhou5b011232015-12-10 17:34:33 +08001052 if (amdgpu_sched_jobs < 4) {
1053 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1054 amdgpu_sched_jobs);
1055 amdgpu_sched_jobs = 4;
1056 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
1057 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1058 amdgpu_sched_jobs);
1059 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1060 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001061
1062 if (amdgpu_gart_size != -1) {
Christian Königc4e1a132016-03-17 16:25:15 +01001063 /* gtt size must be greater or equal to 32M */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001064 if (amdgpu_gart_size < 32) {
1065 dev_warn(adev->dev, "gart size (%d) too small\n",
1066 amdgpu_gart_size);
1067 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001068 }
1069 }
1070
1071 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1072 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1073 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -04001074 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001075 }
1076
1077 if (amdgpu_vm_size < 1) {
1078 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1079 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -04001080 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001081 }
1082
1083 /*
1084 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1085 */
1086 if (amdgpu_vm_size > 1024) {
1087 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1088 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -04001089 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001090 }
1091
1092 /* defines number of bits in page table versus page directory,
1093 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1094 * page table and the remaining bits are in the page directory */
1095 if (amdgpu_vm_block_size == -1) {
1096
1097 /* Total bits covered by PD + PTs */
1098 unsigned bits = ilog2(amdgpu_vm_size) + 18;
1099
1100 /* Make sure the PD is 4K in size up to 8GB address space.
1101 Above that split equal between PD and PTs */
1102 if (amdgpu_vm_size <= 8)
1103 amdgpu_vm_block_size = bits - 9;
1104 else
1105 amdgpu_vm_block_size = (bits + 3) / 2;
1106
1107 } else if (amdgpu_vm_block_size < 9) {
1108 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1109 amdgpu_vm_block_size);
1110 amdgpu_vm_block_size = 9;
1111 }
1112
1113 if (amdgpu_vm_block_size > 24 ||
1114 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1115 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1116 amdgpu_vm_block_size);
1117 amdgpu_vm_block_size = 9;
1118 }
Christian König6a7f76e2016-08-24 15:51:49 +02001119
jimqu526bae32016-11-07 09:53:10 +08001120 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1121 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
Christian König6a7f76e2016-08-24 15:51:49 +02001122 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1123 amdgpu_vram_page_split);
1124 amdgpu_vram_page_split = 1024;
1125 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001126}
1127
1128/**
1129 * amdgpu_switcheroo_set_state - set switcheroo state
1130 *
1131 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001132 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001133 *
1134 * Callback for the switcheroo driver. Suspends or resumes the
1135 * the asics before or after it is powered up using ACPI methods.
1136 */
1137static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1138{
1139 struct drm_device *dev = pci_get_drvdata(pdev);
1140
1141 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1142 return;
1143
1144 if (state == VGA_SWITCHEROO_ON) {
1145 unsigned d3_delay = dev->pdev->d3_delay;
1146
Joe Perches7ca85292017-02-28 04:55:52 -08001147 pr_info("amdgpu: switched on\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001148 /* don't suspend or resume card normally */
1149 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1150
Alex Deucher810ddc32016-08-23 13:25:49 -04001151 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001152
1153 dev->pdev->d3_delay = d3_delay;
1154
1155 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1156 drm_kms_helper_poll_enable(dev);
1157 } else {
Joe Perches7ca85292017-02-28 04:55:52 -08001158 pr_info("amdgpu: switched off\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001159 drm_kms_helper_poll_disable(dev);
1160 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001161 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001162 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1163 }
1164}
1165
1166/**
1167 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1168 *
1169 * @pdev: pci dev pointer
1170 *
1171 * Callback for the switcheroo driver. Check of the switcheroo
1172 * state can be changed.
1173 * Returns true if the state can be changed, false if not.
1174 */
1175static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1176{
1177 struct drm_device *dev = pci_get_drvdata(pdev);
1178
1179 /*
1180 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1181 * locking inversion with the driver load path. And the access here is
1182 * completely racy anyway. So don't bother with locking for now.
1183 */
1184 return dev->open_count == 0;
1185}
1186
1187static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1188 .set_gpu_state = amdgpu_switcheroo_set_state,
1189 .reprobe = NULL,
1190 .can_switch = amdgpu_switcheroo_can_switch,
1191};
1192
1193int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001194 enum amd_ip_block_type block_type,
1195 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001196{
1197 int i, r = 0;
1198
1199 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001200 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001201 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001202 if (adev->ip_blocks[i].version->type != block_type)
1203 continue;
1204 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1205 continue;
1206 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1207 (void *)adev, state);
1208 if (r)
1209 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1210 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001211 }
1212 return r;
1213}
1214
1215int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001216 enum amd_ip_block_type block_type,
1217 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001218{
1219 int i, r = 0;
1220
1221 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001222 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001223 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001224 if (adev->ip_blocks[i].version->type != block_type)
1225 continue;
1226 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1227 continue;
1228 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1229 (void *)adev, state);
1230 if (r)
1231 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1232 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001233 }
1234 return r;
1235}
1236
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001237void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1238{
1239 int i;
1240
1241 for (i = 0; i < adev->num_ip_blocks; i++) {
1242 if (!adev->ip_blocks[i].status.valid)
1243 continue;
1244 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1245 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1246 }
1247}
1248
Alex Deucher5dbbb602016-06-23 11:41:04 -04001249int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1250 enum amd_ip_block_type block_type)
1251{
1252 int i, r;
1253
1254 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001255 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001256 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001257 if (adev->ip_blocks[i].version->type == block_type) {
1258 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001259 if (r)
1260 return r;
1261 break;
1262 }
1263 }
1264 return 0;
1265
1266}
1267
1268bool amdgpu_is_idle(struct amdgpu_device *adev,
1269 enum amd_ip_block_type block_type)
1270{
1271 int i;
1272
1273 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001274 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001275 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001276 if (adev->ip_blocks[i].version->type == block_type)
1277 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001278 }
1279 return true;
1280
1281}
1282
Alex Deuchera1255102016-10-13 17:41:13 -04001283struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1284 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001285{
1286 int i;
1287
1288 for (i = 0; i < adev->num_ip_blocks; i++)
Alex Deuchera1255102016-10-13 17:41:13 -04001289 if (adev->ip_blocks[i].version->type == type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001290 return &adev->ip_blocks[i];
1291
1292 return NULL;
1293}
1294
1295/**
1296 * amdgpu_ip_block_version_cmp
1297 *
1298 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001299 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001300 * @major: major version
1301 * @minor: minor version
1302 *
1303 * return 0 if equal or greater
1304 * return 1 if smaller or the ip_block doesn't exist
1305 */
1306int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001307 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001308 u32 major, u32 minor)
1309{
Alex Deuchera1255102016-10-13 17:41:13 -04001310 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001311
Alex Deuchera1255102016-10-13 17:41:13 -04001312 if (ip_block && ((ip_block->version->major > major) ||
1313 ((ip_block->version->major == major) &&
1314 (ip_block->version->minor >= minor))))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001315 return 0;
1316
1317 return 1;
1318}
1319
Alex Deuchera1255102016-10-13 17:41:13 -04001320/**
1321 * amdgpu_ip_block_add
1322 *
1323 * @adev: amdgpu_device pointer
1324 * @ip_block_version: pointer to the IP to add
1325 *
1326 * Adds the IP block driver information to the collection of IPs
1327 * on the asic.
1328 */
1329int amdgpu_ip_block_add(struct amdgpu_device *adev,
1330 const struct amdgpu_ip_block_version *ip_block_version)
1331{
1332 if (!ip_block_version)
1333 return -EINVAL;
1334
1335 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1336
1337 return 0;
1338}
1339
Alex Deucher483ef982016-09-30 12:43:04 -04001340static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
Emily Deng9accf2f2016-08-10 16:01:25 +08001341{
1342 adev->enable_virtual_display = false;
1343
1344 if (amdgpu_virtual_display) {
1345 struct drm_device *ddev = adev->ddev;
1346 const char *pci_address_name = pci_name(ddev->pdev);
Emily Deng0f663562016-09-30 13:02:18 -04001347 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
Emily Deng9accf2f2016-08-10 16:01:25 +08001348
1349 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1350 pciaddstr_tmp = pciaddstr;
Emily Deng0f663562016-09-30 13:02:18 -04001351 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1352 pciaddname = strsep(&pciaddname_tmp, ",");
Yintian Tao967de2a2017-01-22 15:16:51 +08001353 if (!strcmp("all", pciaddname)
1354 || !strcmp(pci_address_name, pciaddname)) {
Emily Deng0f663562016-09-30 13:02:18 -04001355 long num_crtc;
1356 int res = -1;
1357
Emily Deng9accf2f2016-08-10 16:01:25 +08001358 adev->enable_virtual_display = true;
Emily Deng0f663562016-09-30 13:02:18 -04001359
1360 if (pciaddname_tmp)
1361 res = kstrtol(pciaddname_tmp, 10,
1362 &num_crtc);
1363
1364 if (!res) {
1365 if (num_crtc < 1)
1366 num_crtc = 1;
1367 if (num_crtc > 6)
1368 num_crtc = 6;
1369 adev->mode_info.num_crtc = num_crtc;
1370 } else {
1371 adev->mode_info.num_crtc = 1;
1372 }
Emily Deng9accf2f2016-08-10 16:01:25 +08001373 break;
1374 }
1375 }
1376
Emily Deng0f663562016-09-30 13:02:18 -04001377 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1378 amdgpu_virtual_display, pci_address_name,
1379 adev->enable_virtual_display, adev->mode_info.num_crtc);
Emily Deng9accf2f2016-08-10 16:01:25 +08001380
1381 kfree(pciaddstr);
1382 }
1383}
1384
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001385static int amdgpu_early_init(struct amdgpu_device *adev)
1386{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001387 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001388
Alex Deucher483ef982016-09-30 12:43:04 -04001389 amdgpu_device_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001390
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001391 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001392 case CHIP_TOPAZ:
1393 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001394 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001395 case CHIP_POLARIS11:
1396 case CHIP_POLARIS10:
Junwei Zhangc4642a42016-12-14 15:32:28 -05001397 case CHIP_POLARIS12:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001398 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001399 case CHIP_STONEY:
1400 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001401 adev->family = AMDGPU_FAMILY_CZ;
1402 else
1403 adev->family = AMDGPU_FAMILY_VI;
1404
1405 r = vi_set_ip_blocks(adev);
1406 if (r)
1407 return r;
1408 break;
Ken Wang33f34802016-01-21 17:29:41 +08001409#ifdef CONFIG_DRM_AMDGPU_SI
1410 case CHIP_VERDE:
1411 case CHIP_TAHITI:
1412 case CHIP_PITCAIRN:
1413 case CHIP_OLAND:
1414 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001415 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001416 r = si_set_ip_blocks(adev);
1417 if (r)
1418 return r;
1419 break;
1420#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001421#ifdef CONFIG_DRM_AMDGPU_CIK
1422 case CHIP_BONAIRE:
1423 case CHIP_HAWAII:
1424 case CHIP_KAVERI:
1425 case CHIP_KABINI:
1426 case CHIP_MULLINS:
1427 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1428 adev->family = AMDGPU_FAMILY_CI;
1429 else
1430 adev->family = AMDGPU_FAMILY_KV;
1431
1432 r = cik_set_ip_blocks(adev);
1433 if (r)
1434 return r;
1435 break;
1436#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001437 default:
1438 /* FIXME: not supported yet */
1439 return -EINVAL;
1440 }
1441
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001442 if (amdgpu_sriov_vf(adev)) {
1443 r = amdgpu_virt_request_full_gpu(adev, true);
1444 if (r)
1445 return r;
1446 }
1447
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001448 for (i = 0; i < adev->num_ip_blocks; i++) {
1449 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1450 DRM_ERROR("disabled ip block: %d\n", i);
Alex Deuchera1255102016-10-13 17:41:13 -04001451 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001452 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001453 if (adev->ip_blocks[i].version->funcs->early_init) {
1454 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001455 if (r == -ENOENT) {
Alex Deuchera1255102016-10-13 17:41:13 -04001456 adev->ip_blocks[i].status.valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001457 } else if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001458 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1459 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001460 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001461 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001462 adev->ip_blocks[i].status.valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001463 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001464 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001465 adev->ip_blocks[i].status.valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001466 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001467 }
1468 }
1469
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001470 adev->cg_flags &= amdgpu_cg_mask;
1471 adev->pg_flags &= amdgpu_pg_mask;
1472
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001473 return 0;
1474}
1475
1476static int amdgpu_init(struct amdgpu_device *adev)
1477{
1478 int i, r;
1479
1480 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001481 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001482 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001483 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001484 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001485 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1486 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001487 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001488 }
Alex Deuchera1255102016-10-13 17:41:13 -04001489 adev->ip_blocks[i].status.sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001490 /* need to do gmc hw init early so we can allocate gpu mem */
Alex Deuchera1255102016-10-13 17:41:13 -04001491 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001492 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001493 if (r) {
1494 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001495 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001496 }
Alex Deuchera1255102016-10-13 17:41:13 -04001497 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001498 if (r) {
1499 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001500 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001501 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001502 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001503 if (r) {
1504 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001505 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001506 }
Alex Deuchera1255102016-10-13 17:41:13 -04001507 adev->ip_blocks[i].status.hw = true;
Monk Liu24936642017-01-09 15:54:32 +08001508
1509 /* right after GMC hw init, we create CSA */
1510 if (amdgpu_sriov_vf(adev)) {
1511 r = amdgpu_allocate_static_csa(adev);
1512 if (r) {
1513 DRM_ERROR("allocate CSA failed %d\n", r);
1514 return r;
1515 }
1516 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001517 }
1518 }
1519
1520 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001521 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001522 continue;
1523 /* gmc hw init is done early */
Alex Deuchera1255102016-10-13 17:41:13 -04001524 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001525 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001526 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001527 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001528 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1529 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001530 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001531 }
Alex Deuchera1255102016-10-13 17:41:13 -04001532 adev->ip_blocks[i].status.hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001533 }
1534
1535 return 0;
1536}
1537
1538static int amdgpu_late_init(struct amdgpu_device *adev)
1539{
1540 int i = 0, r;
1541
1542 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001543 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001544 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001545 if (adev->ip_blocks[i].version->funcs->late_init) {
1546 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001547 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001548 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1549 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001550 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001551 }
Alex Deuchera1255102016-10-13 17:41:13 -04001552 adev->ip_blocks[i].status.late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001553 }
Alex Deucher4a446d52016-10-07 14:48:18 -04001554 /* skip CG for VCE/UVD, it's handled specially */
Alex Deuchera1255102016-10-13 17:41:13 -04001555 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1556 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
Alex Deucher4a446d52016-10-07 14:48:18 -04001557 /* enable clockgating to save power */
Alex Deuchera1255102016-10-13 17:41:13 -04001558 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1559 AMD_CG_STATE_GATE);
Alex Deucher4a446d52016-10-07 14:48:18 -04001560 if (r) {
1561 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001562 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher4a446d52016-10-07 14:48:18 -04001563 return r;
1564 }
Arindam Nathb0b00ff2016-10-07 19:01:37 +05301565 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001566 }
1567
Tom St Denisd1aff8e2016-08-09 18:01:55 -04001568 amdgpu_dpm_enable_uvd(adev, false);
1569 amdgpu_dpm_enable_vce(adev, false);
1570
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001571 return 0;
1572}
1573
1574static int amdgpu_fini(struct amdgpu_device *adev)
1575{
1576 int i, r;
1577
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001578 /* need to disable SMC first */
1579 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001580 if (!adev->ip_blocks[i].status.hw)
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001581 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001582 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001583 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
Alex Deuchera1255102016-10-13 17:41:13 -04001584 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1585 AMD_CG_STATE_UNGATE);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001586 if (r) {
1587 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001588 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001589 return r;
1590 }
Alex Deuchera1255102016-10-13 17:41:13 -04001591 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001592 /* XXX handle errors */
1593 if (r) {
1594 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001595 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001596 }
Alex Deuchera1255102016-10-13 17:41:13 -04001597 adev->ip_blocks[i].status.hw = false;
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001598 break;
1599 }
1600 }
1601
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001602 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001603 if (!adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001604 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001605 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001606 amdgpu_wb_fini(adev);
1607 amdgpu_vram_scratch_fini(adev);
1608 }
Rex Zhu8201a672016-11-24 21:44:44 +08001609
1610 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1611 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1612 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1613 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1614 AMD_CG_STATE_UNGATE);
1615 if (r) {
1616 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1617 adev->ip_blocks[i].version->funcs->name, r);
1618 return r;
1619 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001620 }
Rex Zhu8201a672016-11-24 21:44:44 +08001621
Alex Deuchera1255102016-10-13 17:41:13 -04001622 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001623 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001624 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001625 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1626 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001627 }
Rex Zhu8201a672016-11-24 21:44:44 +08001628
Alex Deuchera1255102016-10-13 17:41:13 -04001629 adev->ip_blocks[i].status.hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001630 }
1631
1632 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001633 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001634 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001635 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001636 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001637 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001638 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1639 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001640 }
Alex Deuchera1255102016-10-13 17:41:13 -04001641 adev->ip_blocks[i].status.sw = false;
1642 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001643 }
1644
Monk Liua6dcfd92016-05-19 14:36:34 +08001645 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001646 if (!adev->ip_blocks[i].status.late_initialized)
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001647 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001648 if (adev->ip_blocks[i].version->funcs->late_fini)
1649 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1650 adev->ip_blocks[i].status.late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001651 }
1652
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001653 if (amdgpu_sriov_vf(adev)) {
Monk Liu24936642017-01-09 15:54:32 +08001654 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001655 amdgpu_virt_release_full_gpu(adev, false);
1656 }
Monk Liu24936642017-01-09 15:54:32 +08001657
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001658 return 0;
1659}
1660
Alex Deucherfaefba92016-12-06 10:38:29 -05001661int amdgpu_suspend(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001662{
1663 int i, r;
1664
Xiangliang Yue941ea92017-01-18 12:47:55 +08001665 if (amdgpu_sriov_vf(adev))
1666 amdgpu_virt_request_full_gpu(adev, false);
1667
Flora Cuic5a93a22016-02-26 10:45:25 +08001668 /* ungate SMC block first */
1669 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1670 AMD_CG_STATE_UNGATE);
1671 if (r) {
1672 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1673 }
1674
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001675 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001676 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001677 continue;
1678 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001679 if (i != AMD_IP_BLOCK_TYPE_SMC) {
Alex Deuchera1255102016-10-13 17:41:13 -04001680 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1681 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001682 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001683 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1684 adev->ip_blocks[i].version->funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001685 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001686 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001687 /* XXX handle errors */
Alex Deuchera1255102016-10-13 17:41:13 -04001688 r = adev->ip_blocks[i].version->funcs->suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001689 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001690 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001691 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1692 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001693 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001694 }
1695
Xiangliang Yue941ea92017-01-18 12:47:55 +08001696 if (amdgpu_sriov_vf(adev))
1697 amdgpu_virt_release_full_gpu(adev, false);
1698
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001699 return 0;
1700}
1701
Monk Liue4f0fdc2017-02-09 11:55:49 +08001702static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001703{
1704 int i, r;
1705
1706 for (i = 0; i < adev->num_ip_blocks; i++) {
1707 if (!adev->ip_blocks[i].status.valid)
1708 continue;
1709
1710 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1711 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1712 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
Monk Liue4f0fdc2017-02-09 11:55:49 +08001713 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08001714
1715 if (r) {
1716 DRM_ERROR("resume of IP block <%s> failed %d\n",
1717 adev->ip_blocks[i].version->funcs->name, r);
1718 return r;
1719 }
1720 }
1721
1722 return 0;
1723}
1724
Monk Liue4f0fdc2017-02-09 11:55:49 +08001725static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001726{
1727 int i, r;
1728
1729 for (i = 0; i < adev->num_ip_blocks; i++) {
1730 if (!adev->ip_blocks[i].status.valid)
1731 continue;
1732
1733 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1734 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1735 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1736 continue;
1737
Monk Liue4f0fdc2017-02-09 11:55:49 +08001738 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08001739 if (r) {
1740 DRM_ERROR("resume of IP block <%s> failed %d\n",
1741 adev->ip_blocks[i].version->funcs->name, r);
1742 return r;
1743 }
1744 }
1745
1746 return 0;
1747}
1748
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001749static int amdgpu_resume(struct amdgpu_device *adev)
1750{
1751 int i, r;
1752
1753 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001754 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001755 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001756 r = adev->ip_blocks[i].version->funcs->resume(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001757 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001758 DRM_ERROR("resume of IP block <%s> failed %d\n",
1759 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001760 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001761 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001762 }
1763
1764 return 0;
1765}
1766
Monk Liu4e99a442016-03-31 13:26:59 +08001767static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04001768{
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001769 if (adev->is_atom_fw) {
1770 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1771 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1772 } else {
1773 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1774 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1775 }
Andres Rodriguez048765a2016-06-11 02:51:32 -04001776}
1777
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001778/**
1779 * amdgpu_device_init - initialize the driver
1780 *
1781 * @adev: amdgpu_device pointer
1782 * @pdev: drm dev pointer
1783 * @pdev: pci dev pointer
1784 * @flags: driver flags
1785 *
1786 * Initializes the driver info and hw (all asics).
1787 * Returns 0 for success or an error on failure.
1788 * Called at driver startup.
1789 */
1790int amdgpu_device_init(struct amdgpu_device *adev,
1791 struct drm_device *ddev,
1792 struct pci_dev *pdev,
1793 uint32_t flags)
1794{
1795 int r, i;
1796 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02001797 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001798
1799 adev->shutdown = false;
1800 adev->dev = &pdev->dev;
1801 adev->ddev = ddev;
1802 adev->pdev = pdev;
1803 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08001804 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001805 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1806 adev->mc.gtt_size = 512 * 1024 * 1024;
1807 adev->accel_working = false;
1808 adev->num_rings = 0;
1809 adev->mman.buffer_funcs = NULL;
1810 adev->mman.buffer_funcs_ring = NULL;
1811 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01001812 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001813 adev->gart.gart_funcs = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001814 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001815
1816 adev->smc_rreg = &amdgpu_invalid_rreg;
1817 adev->smc_wreg = &amdgpu_invalid_wreg;
1818 adev->pcie_rreg = &amdgpu_invalid_rreg;
1819 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08001820 adev->pciep_rreg = &amdgpu_invalid_rreg;
1821 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001822 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1823 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1824 adev->didt_rreg = &amdgpu_invalid_rreg;
1825 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08001826 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1827 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001828 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1829 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1830
Rex Zhuccdbb202016-06-08 12:47:41 +08001831
Alex Deucher3e39ab92015-06-05 15:04:33 -04001832 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1833 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1834 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001835
1836 /* mutex initialization are all done here so we
1837 * can recall function without having locking issues */
Christian König8d0a7ce2015-11-03 20:58:50 +01001838 mutex_init(&adev->vm_manager.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001839 atomic_set(&adev->irq.ih.lock, 0);
Huang Rui0e5ca0d2017-03-03 18:37:23 -05001840 mutex_init(&adev->firmware.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001841 mutex_init(&adev->pm.mutex);
1842 mutex_init(&adev->gfx.gpu_clock_mutex);
1843 mutex_init(&adev->srbm_mutex);
1844 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001845 mutex_init(&adev->mn_lock);
1846 hash_init(adev->mn_hash);
1847
1848 amdgpu_check_arguments(adev);
1849
1850 /* Registers mapping */
1851 /* TODO: block userspace mapping of io register */
1852 spin_lock_init(&adev->mmio_idx_lock);
1853 spin_lock_init(&adev->smc_idx_lock);
1854 spin_lock_init(&adev->pcie_idx_lock);
1855 spin_lock_init(&adev->uvd_ctx_idx_lock);
1856 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08001857 spin_lock_init(&adev->gc_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001858 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02001859 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001860
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08001861 INIT_LIST_HEAD(&adev->shadow_list);
1862 mutex_init(&adev->shadow_list_lock);
1863
Chunming Zhou5c1354b2016-08-30 16:13:10 +08001864 INIT_LIST_HEAD(&adev->gtt_list);
1865 spin_lock_init(&adev->gtt_list_lock);
1866
Ken Wangda69c1612016-01-21 19:08:55 +08001867 if (adev->asic_type >= CHIP_BONAIRE) {
1868 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1869 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1870 } else {
1871 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1872 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1873 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08001874
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001875 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1876 if (adev->rmmio == NULL) {
1877 return -ENOMEM;
1878 }
1879 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1880 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1881
Ken Wangda69c1612016-01-21 19:08:55 +08001882 if (adev->asic_type >= CHIP_BONAIRE)
1883 /* doorbell bar mapping */
1884 amdgpu_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001885
1886 /* io port mapping */
1887 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1888 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1889 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1890 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1891 break;
1892 }
1893 }
1894 if (adev->rio_mem == NULL)
Amber Linb64a18c2017-01-04 08:06:58 -05001895 DRM_INFO("PCI I/O BAR is not found.\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001896
1897 /* early init functions */
1898 r = amdgpu_early_init(adev);
1899 if (r)
1900 return r;
1901
1902 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1903 /* this will fail for cards that aren't VGA class devices, just
1904 * ignore it */
1905 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1906
1907 if (amdgpu_runtime_pm == 1)
1908 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04001909 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001910 runtime = true;
1911 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1912 if (runtime)
1913 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1914
1915 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04001916 if (!amdgpu_get_bios(adev)) {
1917 r = -EINVAL;
1918 goto failed;
1919 }
Nils Wallméniusf7e9e9f2016-12-14 21:52:45 +01001920
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001921 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001922 if (r) {
1923 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001924 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001925 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001926
Monk Liu4e99a442016-03-31 13:26:59 +08001927 /* detect if we are with an SRIOV vbios */
1928 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04001929
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001930 /* Post card if necessary */
Monk Liubec86372016-09-14 19:38:08 +08001931 if (amdgpu_vpost_needed(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001932 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08001933 dev_err(adev->dev, "no vBIOS found\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001934 r = -EINVAL;
1935 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001936 }
Monk Liubec86372016-09-14 19:38:08 +08001937 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08001938 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1939 if (r) {
1940 dev_err(adev->dev, "gpu post error!\n");
1941 goto failed;
1942 }
1943 } else {
1944 DRM_INFO("GPU post is not needed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001945 }
1946
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001947 if (!adev->is_atom_fw) {
1948 /* Initialize clocks */
1949 r = amdgpu_atombios_get_clock_info(adev);
1950 if (r) {
1951 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
1952 return r;
1953 }
1954 /* init i2c buses */
1955 amdgpu_atombios_i2c_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001956 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001957
1958 /* Fence driver */
1959 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001960 if (r) {
1961 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001962 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001963 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001964
1965 /* init the mode config */
1966 drm_mode_config_init(adev->ddev);
1967
1968 r = amdgpu_init(adev);
1969 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05001970 dev_err(adev->dev, "amdgpu_init failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001971 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04001972 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001973 }
1974
1975 adev->accel_working = true;
1976
Marek Olšák95844d22016-08-17 23:49:27 +02001977 /* Initialize the buffer migration limit. */
1978 if (amdgpu_moverate >= 0)
1979 max_MBps = amdgpu_moverate;
1980 else
1981 max_MBps = 8; /* Allow 8 MB/s. */
1982 /* Get a log2 for easy divisions. */
1983 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
1984
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001985 r = amdgpu_ib_pool_init(adev);
1986 if (r) {
1987 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Alex Deucher83ba1262016-06-03 18:21:41 -04001988 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001989 }
1990
1991 r = amdgpu_ib_ring_tests(adev);
1992 if (r)
1993 DRM_ERROR("ib ring test failed (%d).\n", r);
1994
Monk Liu9bc92b92017-02-08 17:38:13 +08001995 amdgpu_fbdev_init(adev);
1996
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001997 r = amdgpu_gem_debugfs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08001998 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001999 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002000
2001 r = amdgpu_debugfs_regs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002002 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002003 DRM_ERROR("registering register debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002004
Huang Rui50ab2532016-06-12 15:51:09 +08002005 r = amdgpu_debugfs_firmware_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002006 if (r)
Huang Rui50ab2532016-06-12 15:51:09 +08002007 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
Huang Rui50ab2532016-06-12 15:51:09 +08002008
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002009 if ((amdgpu_testing & 1)) {
2010 if (adev->accel_working)
2011 amdgpu_test_moves(adev);
2012 else
2013 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2014 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002015 if (amdgpu_benchmarking) {
2016 if (adev->accel_working)
2017 amdgpu_benchmark(adev, amdgpu_benchmarking);
2018 else
2019 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2020 }
2021
2022 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2023 * explicit gating rather than handling it automatically.
2024 */
2025 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002026 if (r) {
2027 dev_err(adev->dev, "amdgpu_late_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002028 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002029 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002030
2031 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04002032
2033failed:
2034 if (runtime)
2035 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2036 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002037}
2038
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002039/**
2040 * amdgpu_device_fini - tear down the driver
2041 *
2042 * @adev: amdgpu_device pointer
2043 *
2044 * Tear down the driver info (all asics).
2045 * Called at driver shutdown.
2046 */
2047void amdgpu_device_fini(struct amdgpu_device *adev)
2048{
2049 int r;
2050
2051 DRM_INFO("amdgpu: finishing device.\n");
2052 adev->shutdown = true;
Grazvydas Ignotasa951ed82016-09-25 23:34:48 +03002053 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002054 /* evict vram memory */
2055 amdgpu_bo_evict_vram(adev);
2056 amdgpu_ib_pool_fini(adev);
2057 amdgpu_fence_driver_fini(adev);
2058 amdgpu_fbdev_fini(adev);
2059 r = amdgpu_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002060 adev->accel_working = false;
2061 /* free i2c buses */
2062 amdgpu_i2c_fini(adev);
2063 amdgpu_atombios_fini(adev);
2064 kfree(adev->bios);
2065 adev->bios = NULL;
2066 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002067 if (adev->flags & AMD_IS_PX)
2068 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002069 vga_client_register(adev->pdev, NULL, NULL, NULL);
2070 if (adev->rio_mem)
2071 pci_iounmap(adev->pdev, adev->rio_mem);
2072 adev->rio_mem = NULL;
2073 iounmap(adev->rmmio);
2074 adev->rmmio = NULL;
Ken Wangda69c1612016-01-21 19:08:55 +08002075 if (adev->asic_type >= CHIP_BONAIRE)
2076 amdgpu_doorbell_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002077 amdgpu_debugfs_regs_cleanup(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002078}
2079
2080
2081/*
2082 * Suspend & resume.
2083 */
2084/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002085 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002086 *
2087 * @pdev: drm dev pointer
2088 * @state: suspend state
2089 *
2090 * Puts the hw in the suspend state (all asics).
2091 * Returns 0 for success or an error on failure.
2092 * Called at driver suspend.
2093 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002094int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002095{
2096 struct amdgpu_device *adev;
2097 struct drm_crtc *crtc;
2098 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002099 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002100
2101 if (dev == NULL || dev->dev_private == NULL) {
2102 return -ENODEV;
2103 }
2104
2105 adev = dev->dev_private;
2106
2107 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2108 return 0;
2109
2110 drm_kms_helper_poll_disable(dev);
2111
2112 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002113 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002114 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2115 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2116 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002117 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002118
Alex Deucher756e6882015-10-08 00:03:36 -04002119 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002120 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04002121 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002122 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2123 struct amdgpu_bo *robj;
2124
Alex Deucher756e6882015-10-08 00:03:36 -04002125 if (amdgpu_crtc->cursor_bo) {
2126 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2127 r = amdgpu_bo_reserve(aobj, false);
2128 if (r == 0) {
2129 amdgpu_bo_unpin(aobj);
2130 amdgpu_bo_unreserve(aobj);
2131 }
2132 }
2133
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002134 if (rfb == NULL || rfb->obj == NULL) {
2135 continue;
2136 }
2137 robj = gem_to_amdgpu_bo(rfb->obj);
2138 /* don't unpin kernel fb objects */
2139 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2140 r = amdgpu_bo_reserve(robj, false);
2141 if (r == 0) {
2142 amdgpu_bo_unpin(robj);
2143 amdgpu_bo_unreserve(robj);
2144 }
2145 }
2146 }
2147 /* evict vram memory */
2148 amdgpu_bo_evict_vram(adev);
2149
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002150 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002151
2152 r = amdgpu_suspend(adev);
2153
Alex Deuchera0a71e42016-10-10 12:41:36 -04002154 /* evict remaining vram memory
2155 * This second call to evict vram is to evict the gart page table
2156 * using the CPU.
2157 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002158 amdgpu_bo_evict_vram(adev);
2159
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002160 if (adev->is_atom_fw)
2161 amdgpu_atomfirmware_scratch_regs_save(adev);
2162 else
2163 amdgpu_atombios_scratch_regs_save(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002164 pci_save_state(dev->pdev);
2165 if (suspend) {
2166 /* Shut down the device */
2167 pci_disable_device(dev->pdev);
2168 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08002169 } else {
2170 r = amdgpu_asic_reset(adev);
2171 if (r)
2172 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002173 }
2174
2175 if (fbcon) {
2176 console_lock();
2177 amdgpu_fbdev_set_suspend(adev, 1);
2178 console_unlock();
2179 }
2180 return 0;
2181}
2182
2183/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002184 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002185 *
2186 * @pdev: drm dev pointer
2187 *
2188 * Bring the hw back to operating state (all asics).
2189 * Returns 0 for success or an error on failure.
2190 * Called at driver resume.
2191 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002192int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002193{
2194 struct drm_connector *connector;
2195 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04002196 struct drm_crtc *crtc;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002197 int r;
2198
2199 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2200 return 0;
2201
jimqu74b0b152016-09-07 17:09:12 +08002202 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002203 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08002204
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002205 if (resume) {
2206 pci_set_power_state(dev->pdev, PCI_D0);
2207 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08002208 r = pci_enable_device(dev->pdev);
2209 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002210 if (fbcon)
2211 console_unlock();
jimqu74b0b152016-09-07 17:09:12 +08002212 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002213 }
2214 }
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002215 if (adev->is_atom_fw)
2216 amdgpu_atomfirmware_scratch_regs_restore(adev);
2217 else
2218 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002219
2220 /* post card */
Jim Quc836fec2017-02-10 15:59:59 +08002221 if (amdgpu_need_post(adev)) {
jimqu74b0b152016-09-07 17:09:12 +08002222 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2223 if (r)
2224 DRM_ERROR("amdgpu asic init failed\n");
2225 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002226
2227 r = amdgpu_resume(adev);
Flora Cuica198522016-02-04 15:10:08 +08002228 if (r)
2229 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002230
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002231 amdgpu_fence_driver_resume(adev);
2232
Flora Cuica198522016-02-04 15:10:08 +08002233 if (resume) {
2234 r = amdgpu_ib_ring_tests(adev);
2235 if (r)
2236 DRM_ERROR("ib ring test failed (%d).\n", r);
2237 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002238
2239 r = amdgpu_late_init(adev);
Jim Quc085bd52017-03-01 15:53:29 +08002240 if (r) {
2241 if (fbcon)
2242 console_unlock();
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002243 return r;
Jim Quc085bd52017-03-01 15:53:29 +08002244 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002245
Alex Deucher756e6882015-10-08 00:03:36 -04002246 /* pin cursors */
2247 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2248 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2249
2250 if (amdgpu_crtc->cursor_bo) {
2251 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2252 r = amdgpu_bo_reserve(aobj, false);
2253 if (r == 0) {
2254 r = amdgpu_bo_pin(aobj,
2255 AMDGPU_GEM_DOMAIN_VRAM,
2256 &amdgpu_crtc->cursor_addr);
2257 if (r != 0)
2258 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2259 amdgpu_bo_unreserve(aobj);
2260 }
2261 }
2262 }
2263
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002264 /* blat the mode back in */
2265 if (fbcon) {
2266 drm_helper_resume_force_mode(dev);
2267 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002268 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002269 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2270 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2271 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002272 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002273 }
2274
2275 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002276
2277 /*
2278 * Most of the connector probing functions try to acquire runtime pm
2279 * refs to ensure that the GPU is powered on when connector polling is
2280 * performed. Since we're calling this from a runtime PM callback,
2281 * trying to acquire rpm refs will cause us to deadlock.
2282 *
2283 * Since we're guaranteed to be holding the rpm lock, it's safe to
2284 * temporarily disable the rpm helpers so this doesn't deadlock us.
2285 */
2286#ifdef CONFIG_PM
2287 dev->dev->power.disable_depth++;
2288#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05002289 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002290#ifdef CONFIG_PM
2291 dev->dev->power.disable_depth--;
2292#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002293
2294 if (fbcon) {
2295 amdgpu_fbdev_set_suspend(adev, 0);
2296 console_unlock();
2297 }
2298
2299 return 0;
2300}
2301
Chunming Zhou63fbf422016-07-15 11:19:20 +08002302static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2303{
2304 int i;
2305 bool asic_hang = false;
2306
2307 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002308 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002309 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002310 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2311 adev->ip_blocks[i].status.hang =
2312 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2313 if (adev->ip_blocks[i].status.hang) {
2314 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
Chunming Zhou63fbf422016-07-15 11:19:20 +08002315 asic_hang = true;
2316 }
2317 }
2318 return asic_hang;
2319}
2320
Baoyou Xie4d446652016-09-18 22:09:35 +08002321static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002322{
2323 int i, r = 0;
2324
2325 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002326 if (!adev->ip_blocks[i].status.valid)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002327 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002328 if (adev->ip_blocks[i].status.hang &&
2329 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2330 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
Chunming Zhoud31a5012016-07-18 10:04:34 +08002331 if (r)
2332 return r;
2333 }
2334 }
2335
2336 return 0;
2337}
2338
Chunming Zhou35d782f2016-07-15 15:57:13 +08002339static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2340{
Alex Deucherda146d32016-10-13 16:07:03 -04002341 int i;
2342
2343 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002344 if (!adev->ip_blocks[i].status.valid)
Alex Deucherda146d32016-10-13 16:07:03 -04002345 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002346 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2347 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2348 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2349 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2350 if (adev->ip_blocks[i].status.hang) {
Alex Deucherda146d32016-10-13 16:07:03 -04002351 DRM_INFO("Some block need full reset!\n");
2352 return true;
2353 }
2354 }
Chunming Zhou35d782f2016-07-15 15:57:13 +08002355 }
2356 return false;
2357}
2358
2359static int amdgpu_soft_reset(struct amdgpu_device *adev)
2360{
2361 int i, r = 0;
2362
2363 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002364 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002365 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002366 if (adev->ip_blocks[i].status.hang &&
2367 adev->ip_blocks[i].version->funcs->soft_reset) {
2368 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002369 if (r)
2370 return r;
2371 }
2372 }
2373
2374 return 0;
2375}
2376
2377static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2378{
2379 int i, r = 0;
2380
2381 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002382 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002383 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002384 if (adev->ip_blocks[i].status.hang &&
2385 adev->ip_blocks[i].version->funcs->post_soft_reset)
2386 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002387 if (r)
2388 return r;
2389 }
2390
2391 return 0;
2392}
2393
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002394bool amdgpu_need_backup(struct amdgpu_device *adev)
2395{
2396 if (adev->flags & AMD_IS_APU)
2397 return false;
2398
2399 return amdgpu_lockup_timeout > 0 ? true : false;
2400}
2401
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002402static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2403 struct amdgpu_ring *ring,
2404 struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +01002405 struct dma_fence **fence)
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002406{
2407 uint32_t domain;
2408 int r;
2409
2410 if (!bo->shadow)
2411 return 0;
2412
2413 r = amdgpu_bo_reserve(bo, false);
2414 if (r)
2415 return r;
2416 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2417 /* if bo has been evicted, then no need to recover */
2418 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2419 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2420 NULL, fence, true);
2421 if (r) {
2422 DRM_ERROR("recover page table failed!\n");
2423 goto err;
2424 }
2425 }
2426err:
2427 amdgpu_bo_unreserve(bo);
2428 return r;
2429}
2430
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002431/**
Monk Liua90ad3c2017-01-23 14:22:08 +08002432 * amdgpu_sriov_gpu_reset - reset the asic
2433 *
2434 * @adev: amdgpu device pointer
2435 * @voluntary: if this reset is requested by guest.
2436 * (true means by guest and false means by HYPERVISOR )
2437 *
2438 * Attempt the reset the GPU if it has hung (all asics).
2439 * for SRIOV case.
2440 * Returns 0 for success or an error on failure.
2441 */
2442int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
2443{
2444 int i, r = 0;
2445 int resched;
2446 struct amdgpu_bo *bo, *tmp;
2447 struct amdgpu_ring *ring;
2448 struct dma_fence *fence = NULL, *next = NULL;
2449
Monk Liu147b5982017-01-25 15:48:01 +08002450 mutex_lock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002451 atomic_inc(&adev->gpu_reset_counter);
Monk Liu1fb37a32017-01-26 15:36:37 +08002452 adev->gfx.in_reset = true;
Monk Liua90ad3c2017-01-23 14:22:08 +08002453
2454 /* block TTM */
2455 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2456
2457 /* block scheduler */
2458 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2459 ring = adev->rings[i];
2460
2461 if (!ring || !ring->sched.thread)
2462 continue;
2463
2464 kthread_park(ring->sched.thread);
2465 amd_sched_hw_job_reset(&ring->sched);
2466 }
2467
2468 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2469 amdgpu_fence_driver_force_completion(adev);
2470
2471 /* request to take full control of GPU before re-initialization */
2472 if (voluntary)
2473 amdgpu_virt_reset_gpu(adev);
2474 else
2475 amdgpu_virt_request_full_gpu(adev, true);
2476
2477
2478 /* Resume IP prior to SMC */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002479 amdgpu_sriov_reinit_early(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002480
2481 /* we need recover gart prior to run SMC/CP/SDMA resume */
2482 amdgpu_ttm_recover_gart(adev);
2483
2484 /* now we are okay to resume SMC/CP/SDMA */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002485 amdgpu_sriov_reinit_late(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002486
2487 amdgpu_irq_gpu_reset_resume_helper(adev);
2488
2489 if (amdgpu_ib_ring_tests(adev))
2490 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2491
2492 /* release full control of GPU after ib test */
2493 amdgpu_virt_release_full_gpu(adev, true);
2494
2495 DRM_INFO("recover vram bo from shadow\n");
2496
2497 ring = adev->mman.buffer_funcs_ring;
2498 mutex_lock(&adev->shadow_list_lock);
2499 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2500 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2501 if (fence) {
2502 r = dma_fence_wait(fence, false);
2503 if (r) {
2504 WARN(r, "recovery from shadow isn't completed\n");
2505 break;
2506 }
2507 }
2508
2509 dma_fence_put(fence);
2510 fence = next;
2511 }
2512 mutex_unlock(&adev->shadow_list_lock);
2513
2514 if (fence) {
2515 r = dma_fence_wait(fence, false);
2516 if (r)
2517 WARN(r, "recovery from shadow isn't completed\n");
2518 }
2519 dma_fence_put(fence);
2520
2521 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2522 struct amdgpu_ring *ring = adev->rings[i];
2523 if (!ring || !ring->sched.thread)
2524 continue;
2525
2526 amd_sched_job_recovery(&ring->sched);
2527 kthread_unpark(ring->sched.thread);
2528 }
2529
2530 drm_helper_resume_force_mode(adev->ddev);
2531 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2532 if (r) {
2533 /* bad news, how to tell it to userspace ? */
2534 dev_info(adev->dev, "GPU reset failed\n");
2535 }
2536
Monk Liu1fb37a32017-01-26 15:36:37 +08002537 adev->gfx.in_reset = false;
Monk Liu147b5982017-01-25 15:48:01 +08002538 mutex_unlock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002539 return r;
2540}
2541
2542/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002543 * amdgpu_gpu_reset - reset the asic
2544 *
2545 * @adev: amdgpu device pointer
2546 *
2547 * Attempt the reset the GPU if it has hung (all asics).
2548 * Returns 0 for success or an error on failure.
2549 */
2550int amdgpu_gpu_reset(struct amdgpu_device *adev)
2551{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002552 int i, r;
2553 int resched;
Chunming Zhou35d782f2016-07-15 15:57:13 +08002554 bool need_full_reset;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002555
Xiangliang Yufb140b22016-12-17 22:48:57 +08002556 if (amdgpu_sriov_vf(adev))
Monk Liua90ad3c2017-01-23 14:22:08 +08002557 return amdgpu_sriov_gpu_reset(adev, true);
Xiangliang Yufb140b22016-12-17 22:48:57 +08002558
Chunming Zhou63fbf422016-07-15 11:19:20 +08002559 if (!amdgpu_check_soft_reset(adev)) {
2560 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2561 return 0;
2562 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002563
Marek Olšákd94aed52015-05-05 21:13:49 +02002564 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002565
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002566 /* block TTM */
2567 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2568
Chunming Zhou0875dc92016-06-12 15:41:58 +08002569 /* block scheduler */
2570 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2571 struct amdgpu_ring *ring = adev->rings[i];
2572
2573 if (!ring)
2574 continue;
2575 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002576 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002577 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002578 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2579 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002580
Chunming Zhou35d782f2016-07-15 15:57:13 +08002581 need_full_reset = amdgpu_need_full_reset(adev);
2582
2583 if (!need_full_reset) {
2584 amdgpu_pre_soft_reset(adev);
2585 r = amdgpu_soft_reset(adev);
2586 amdgpu_post_soft_reset(adev);
2587 if (r || amdgpu_check_soft_reset(adev)) {
2588 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2589 need_full_reset = true;
2590 }
2591 }
2592
2593 if (need_full_reset) {
Chunming Zhou35d782f2016-07-15 15:57:13 +08002594 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002595
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002596retry:
Chunming Zhou35d782f2016-07-15 15:57:13 +08002597 /* Disable fb access */
2598 if (adev->mode_info.num_crtc) {
2599 struct amdgpu_mode_mc_save save;
2600 amdgpu_display_stop_mc_access(adev, &save);
2601 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2602 }
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002603 if (adev->is_atom_fw)
2604 amdgpu_atomfirmware_scratch_regs_save(adev);
2605 else
2606 amdgpu_atombios_scratch_regs_save(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002607 r = amdgpu_asic_reset(adev);
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002608 if (adev->is_atom_fw)
2609 amdgpu_atomfirmware_scratch_regs_restore(adev);
2610 else
2611 amdgpu_atombios_scratch_regs_restore(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002612 /* post card */
2613 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002614
Chunming Zhou35d782f2016-07-15 15:57:13 +08002615 if (!r) {
2616 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2617 r = amdgpu_resume(adev);
2618 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002619 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002620 if (!r) {
Chunming Zhoue72cfd52016-07-27 13:15:20 +08002621 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002622 if (need_full_reset && amdgpu_need_backup(adev)) {
2623 r = amdgpu_ttm_recover_gart(adev);
2624 if (r)
2625 DRM_ERROR("gart recovery failed!!!\n");
2626 }
Chunming Zhou1f465082016-06-30 15:02:26 +08002627 r = amdgpu_ib_ring_tests(adev);
2628 if (r) {
2629 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002630 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002631 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002632 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002633 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002634 /**
2635 * recovery vm page tables, since we cannot depend on VRAM is
2636 * consistent after gpu full reset.
2637 */
2638 if (need_full_reset && amdgpu_need_backup(adev)) {
2639 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2640 struct amdgpu_bo *bo, *tmp;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002641 struct dma_fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002642
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002643 DRM_INFO("recover vram bo from shadow\n");
2644 mutex_lock(&adev->shadow_list_lock);
2645 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2646 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2647 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002648 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002649 if (r) {
Monk Liu1d7b17b2017-01-22 18:52:56 +08002650 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002651 break;
2652 }
2653 }
2654
Chris Wilsonf54d1862016-10-25 13:00:45 +01002655 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002656 fence = next;
2657 }
2658 mutex_unlock(&adev->shadow_list_lock);
2659 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002660 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002661 if (r)
Monk Liu1d7b17b2017-01-22 18:52:56 +08002662 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002663 }
Chris Wilsonf54d1862016-10-25 13:00:45 +01002664 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002665 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002666 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2667 struct amdgpu_ring *ring = adev->rings[i];
2668 if (!ring)
2669 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002670
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002671 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002672 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002673 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002674 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08002675 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002676 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08002677 if (adev->rings[i]) {
2678 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002679 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002680 }
2681 }
2682
2683 drm_helper_resume_force_mode(adev->ddev);
2684
2685 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2686 if (r) {
2687 /* bad news, how to tell it to userspace ? */
2688 dev_info(adev->dev, "GPU reset failed\n");
2689 }
2690
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002691 return r;
2692}
2693
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002694void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2695{
2696 u32 mask;
2697 int ret;
2698
Alex Deuchercd474ba2016-02-04 10:21:23 -05002699 if (amdgpu_pcie_gen_cap)
2700 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2701
2702 if (amdgpu_pcie_lane_cap)
2703 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2704
2705 /* covers APUs as well */
2706 if (pci_is_root_bus(adev->pdev->bus)) {
2707 if (adev->pm.pcie_gen_mask == 0)
2708 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2709 if (adev->pm.pcie_mlw_mask == 0)
2710 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002711 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002712 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05002713
2714 if (adev->pm.pcie_gen_mask == 0) {
2715 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2716 if (!ret) {
2717 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2718 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2719 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2720
2721 if (mask & DRM_PCIE_SPEED_25)
2722 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2723 if (mask & DRM_PCIE_SPEED_50)
2724 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2725 if (mask & DRM_PCIE_SPEED_80)
2726 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2727 } else {
2728 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2729 }
2730 }
2731 if (adev->pm.pcie_mlw_mask == 0) {
2732 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2733 if (!ret) {
2734 switch (mask) {
2735 case 32:
2736 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2737 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2738 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2739 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2740 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2741 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2742 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2743 break;
2744 case 16:
2745 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2746 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2747 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2748 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2749 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2750 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2751 break;
2752 case 12:
2753 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2754 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2755 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2756 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2757 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2758 break;
2759 case 8:
2760 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2761 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2762 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2763 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2764 break;
2765 case 4:
2766 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2767 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2768 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2769 break;
2770 case 2:
2771 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2772 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2773 break;
2774 case 1:
2775 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2776 break;
2777 default:
2778 break;
2779 }
2780 } else {
2781 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002782 }
2783 }
2784}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002785
2786/*
2787 * Debugfs
2788 */
2789int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04002790 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002791 unsigned nfiles)
2792{
2793 unsigned i;
2794
2795 for (i = 0; i < adev->debugfs_count; i++) {
2796 if (adev->debugfs[i].files == files) {
2797 /* Already registered */
2798 return 0;
2799 }
2800 }
2801
2802 i = adev->debugfs_count + 1;
2803 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2804 DRM_ERROR("Reached maximum number of debugfs components.\n");
2805 DRM_ERROR("Report so we increase "
2806 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2807 return -EINVAL;
2808 }
2809 adev->debugfs[adev->debugfs_count].files = files;
2810 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2811 adev->debugfs_count = i;
2812#if defined(CONFIG_DEBUG_FS)
2813 drm_debugfs_create_files(files, nfiles,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002814 adev->ddev->primary->debugfs_root,
2815 adev->ddev->primary);
2816#endif
2817 return 0;
2818}
2819
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002820#if defined(CONFIG_DEBUG_FS)
2821
2822static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2823 size_t size, loff_t *pos)
2824{
Al Viro45063092016-12-04 18:24:56 -05002825 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002826 ssize_t result = 0;
2827 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04002828 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04002829 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002830
2831 if (size & 0x3 || *pos & 0x3)
2832 return -EINVAL;
2833
Tom St Denisbd122672016-07-28 09:39:22 -04002834 /* are we reading registers for which a PG lock is necessary? */
2835 pm_pg_lock = (*pos >> 23) & 1;
2836
Tom St Denis566281592016-06-27 11:55:07 -04002837 if (*pos & (1ULL << 62)) {
2838 se_bank = (*pos >> 24) & 0x3FF;
2839 sh_bank = (*pos >> 34) & 0x3FF;
2840 instance_bank = (*pos >> 44) & 0x3FF;
Tom St Denis32977f92016-10-09 07:41:26 -04002841
2842 if (se_bank == 0x3FF)
2843 se_bank = 0xFFFFFFFF;
2844 if (sh_bank == 0x3FF)
2845 sh_bank = 0xFFFFFFFF;
2846 if (instance_bank == 0x3FF)
2847 instance_bank = 0xFFFFFFFF;
Tom St Denis566281592016-06-27 11:55:07 -04002848 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04002849 } else {
2850 use_bank = 0;
2851 }
2852
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04002853 *pos &= (1UL << 22) - 1;
Tom St Denisbd122672016-07-28 09:39:22 -04002854
Tom St Denis566281592016-06-27 11:55:07 -04002855 if (use_bank) {
Tom St Denis32977f92016-10-09 07:41:26 -04002856 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2857 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
Tom St Denis566281592016-06-27 11:55:07 -04002858 return -EINVAL;
2859 mutex_lock(&adev->grbm_idx_mutex);
2860 amdgpu_gfx_select_se_sh(adev, se_bank,
2861 sh_bank, instance_bank);
2862 }
2863
Tom St Denisbd122672016-07-28 09:39:22 -04002864 if (pm_pg_lock)
2865 mutex_lock(&adev->pm.mutex);
2866
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002867 while (size) {
2868 uint32_t value;
2869
2870 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04002871 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002872
2873 value = RREG32(*pos >> 2);
2874 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04002875 if (r) {
2876 result = r;
2877 goto end;
2878 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002879
2880 result += 4;
2881 buf += 4;
2882 *pos += 4;
2883 size -= 4;
2884 }
2885
Tom St Denis566281592016-06-27 11:55:07 -04002886end:
2887 if (use_bank) {
2888 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2889 mutex_unlock(&adev->grbm_idx_mutex);
2890 }
2891
Tom St Denisbd122672016-07-28 09:39:22 -04002892 if (pm_pg_lock)
2893 mutex_unlock(&adev->pm.mutex);
2894
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002895 return result;
2896}
2897
2898static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2899 size_t size, loff_t *pos)
2900{
Al Viro45063092016-12-04 18:24:56 -05002901 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002902 ssize_t result = 0;
2903 int r;
Tom St Denis394fdde2016-10-10 07:31:23 -04002904 bool pm_pg_lock, use_bank;
2905 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002906
2907 if (size & 0x3 || *pos & 0x3)
2908 return -EINVAL;
2909
Tom St Denis394fdde2016-10-10 07:31:23 -04002910 /* are we reading registers for which a PG lock is necessary? */
2911 pm_pg_lock = (*pos >> 23) & 1;
2912
2913 if (*pos & (1ULL << 62)) {
2914 se_bank = (*pos >> 24) & 0x3FF;
2915 sh_bank = (*pos >> 34) & 0x3FF;
2916 instance_bank = (*pos >> 44) & 0x3FF;
2917
2918 if (se_bank == 0x3FF)
2919 se_bank = 0xFFFFFFFF;
2920 if (sh_bank == 0x3FF)
2921 sh_bank = 0xFFFFFFFF;
2922 if (instance_bank == 0x3FF)
2923 instance_bank = 0xFFFFFFFF;
2924 use_bank = 1;
2925 } else {
2926 use_bank = 0;
2927 }
2928
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04002929 *pos &= (1UL << 22) - 1;
Tom St Denis394fdde2016-10-10 07:31:23 -04002930
2931 if (use_bank) {
2932 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2933 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2934 return -EINVAL;
2935 mutex_lock(&adev->grbm_idx_mutex);
2936 amdgpu_gfx_select_se_sh(adev, se_bank,
2937 sh_bank, instance_bank);
2938 }
2939
2940 if (pm_pg_lock)
2941 mutex_lock(&adev->pm.mutex);
2942
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002943 while (size) {
2944 uint32_t value;
2945
2946 if (*pos > adev->rmmio_size)
2947 return result;
2948
2949 r = get_user(value, (uint32_t *)buf);
2950 if (r)
2951 return r;
2952
2953 WREG32(*pos >> 2, value);
2954
2955 result += 4;
2956 buf += 4;
2957 *pos += 4;
2958 size -= 4;
2959 }
2960
Tom St Denis394fdde2016-10-10 07:31:23 -04002961 if (use_bank) {
2962 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2963 mutex_unlock(&adev->grbm_idx_mutex);
2964 }
2965
2966 if (pm_pg_lock)
2967 mutex_unlock(&adev->pm.mutex);
2968
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002969 return result;
2970}
2971
Tom St Denisadcec282016-04-15 13:08:44 -04002972static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
2973 size_t size, loff_t *pos)
2974{
Al Viro45063092016-12-04 18:24:56 -05002975 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04002976 ssize_t result = 0;
2977 int r;
2978
2979 if (size & 0x3 || *pos & 0x3)
2980 return -EINVAL;
2981
2982 while (size) {
2983 uint32_t value;
2984
2985 value = RREG32_PCIE(*pos >> 2);
2986 r = put_user(value, (uint32_t *)buf);
2987 if (r)
2988 return r;
2989
2990 result += 4;
2991 buf += 4;
2992 *pos += 4;
2993 size -= 4;
2994 }
2995
2996 return result;
2997}
2998
2999static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3000 size_t size, loff_t *pos)
3001{
Al Viro45063092016-12-04 18:24:56 -05003002 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003003 ssize_t result = 0;
3004 int r;
3005
3006 if (size & 0x3 || *pos & 0x3)
3007 return -EINVAL;
3008
3009 while (size) {
3010 uint32_t value;
3011
3012 r = get_user(value, (uint32_t *)buf);
3013 if (r)
3014 return r;
3015
3016 WREG32_PCIE(*pos >> 2, value);
3017
3018 result += 4;
3019 buf += 4;
3020 *pos += 4;
3021 size -= 4;
3022 }
3023
3024 return result;
3025}
3026
3027static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3028 size_t size, loff_t *pos)
3029{
Al Viro45063092016-12-04 18:24:56 -05003030 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003031 ssize_t result = 0;
3032 int r;
3033
3034 if (size & 0x3 || *pos & 0x3)
3035 return -EINVAL;
3036
3037 while (size) {
3038 uint32_t value;
3039
3040 value = RREG32_DIDT(*pos >> 2);
3041 r = put_user(value, (uint32_t *)buf);
3042 if (r)
3043 return r;
3044
3045 result += 4;
3046 buf += 4;
3047 *pos += 4;
3048 size -= 4;
3049 }
3050
3051 return result;
3052}
3053
3054static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3055 size_t size, loff_t *pos)
3056{
Al Viro45063092016-12-04 18:24:56 -05003057 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003058 ssize_t result = 0;
3059 int r;
3060
3061 if (size & 0x3 || *pos & 0x3)
3062 return -EINVAL;
3063
3064 while (size) {
3065 uint32_t value;
3066
3067 r = get_user(value, (uint32_t *)buf);
3068 if (r)
3069 return r;
3070
3071 WREG32_DIDT(*pos >> 2, value);
3072
3073 result += 4;
3074 buf += 4;
3075 *pos += 4;
3076 size -= 4;
3077 }
3078
3079 return result;
3080}
3081
3082static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3083 size_t size, loff_t *pos)
3084{
Al Viro45063092016-12-04 18:24:56 -05003085 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003086 ssize_t result = 0;
3087 int r;
3088
3089 if (size & 0x3 || *pos & 0x3)
3090 return -EINVAL;
3091
3092 while (size) {
3093 uint32_t value;
3094
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003095 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04003096 r = put_user(value, (uint32_t *)buf);
3097 if (r)
3098 return r;
3099
3100 result += 4;
3101 buf += 4;
3102 *pos += 4;
3103 size -= 4;
3104 }
3105
3106 return result;
3107}
3108
3109static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3110 size_t size, loff_t *pos)
3111{
Al Viro45063092016-12-04 18:24:56 -05003112 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003113 ssize_t result = 0;
3114 int r;
3115
3116 if (size & 0x3 || *pos & 0x3)
3117 return -EINVAL;
3118
3119 while (size) {
3120 uint32_t value;
3121
3122 r = get_user(value, (uint32_t *)buf);
3123 if (r)
3124 return r;
3125
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003126 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04003127
3128 result += 4;
3129 buf += 4;
3130 *pos += 4;
3131 size -= 4;
3132 }
3133
3134 return result;
3135}
3136
Tom St Denis1e051412016-06-27 09:57:18 -04003137static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3138 size_t size, loff_t *pos)
3139{
Al Viro45063092016-12-04 18:24:56 -05003140 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis1e051412016-06-27 09:57:18 -04003141 ssize_t result = 0;
3142 int r;
3143 uint32_t *config, no_regs = 0;
3144
3145 if (size & 0x3 || *pos & 0x3)
3146 return -EINVAL;
3147
Markus Elfringecab7662016-09-18 17:00:52 +02003148 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
Tom St Denis1e051412016-06-27 09:57:18 -04003149 if (!config)
3150 return -ENOMEM;
3151
3152 /* version, increment each time something is added */
Tom St Denis9a999352017-01-18 13:01:25 -05003153 config[no_regs++] = 3;
Tom St Denis1e051412016-06-27 09:57:18 -04003154 config[no_regs++] = adev->gfx.config.max_shader_engines;
3155 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3156 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3157 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3158 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3159 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3160 config[no_regs++] = adev->gfx.config.max_gprs;
3161 config[no_regs++] = adev->gfx.config.max_gs_threads;
3162 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3163 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3164 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3165 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3166 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3167 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3168 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3169 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3170 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3171 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3172 config[no_regs++] = adev->gfx.config.num_gpus;
3173 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3174 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3175 config[no_regs++] = adev->gfx.config.gb_addr_config;
3176 config[no_regs++] = adev->gfx.config.num_rbs;
3177
Tom St Denis89a8f302016-08-12 15:14:31 -04003178 /* rev==1 */
3179 config[no_regs++] = adev->rev_id;
3180 config[no_regs++] = adev->pg_flags;
3181 config[no_regs++] = adev->cg_flags;
3182
Tom St Denise9f11dc2016-08-17 12:00:51 -04003183 /* rev==2 */
3184 config[no_regs++] = adev->family;
3185 config[no_regs++] = adev->external_rev_id;
3186
Tom St Denis9a999352017-01-18 13:01:25 -05003187 /* rev==3 */
3188 config[no_regs++] = adev->pdev->device;
3189 config[no_regs++] = adev->pdev->revision;
3190 config[no_regs++] = adev->pdev->subsystem_device;
3191 config[no_regs++] = adev->pdev->subsystem_vendor;
3192
Tom St Denis1e051412016-06-27 09:57:18 -04003193 while (size && (*pos < no_regs * 4)) {
3194 uint32_t value;
3195
3196 value = config[*pos >> 2];
3197 r = put_user(value, (uint32_t *)buf);
3198 if (r) {
3199 kfree(config);
3200 return r;
3201 }
3202
3203 result += 4;
3204 buf += 4;
3205 *pos += 4;
3206 size -= 4;
3207 }
3208
3209 kfree(config);
3210 return result;
3211}
3212
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003213static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3214 size_t size, loff_t *pos)
3215{
Al Viro45063092016-12-04 18:24:56 -05003216 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003217 int idx, x, outsize, r, valuesize;
3218 uint32_t values[16];
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003219
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003220 if (size & 3 || *pos & 0x3)
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003221 return -EINVAL;
3222
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003223 if (amdgpu_dpm == 0)
3224 return -EINVAL;
3225
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003226 /* convert offset to sensor number */
3227 idx = *pos >> 2;
3228
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003229 valuesize = sizeof(values);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003230 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003231 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003232 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3233 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3234 &valuesize);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003235 else
3236 return -EINVAL;
3237
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003238 if (size > valuesize)
3239 return -EINVAL;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003240
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003241 outsize = 0;
3242 x = 0;
3243 if (!r) {
3244 while (size) {
3245 r = put_user(values[x++], (int32_t *)buf);
3246 buf += 4;
3247 size -= 4;
3248 outsize += 4;
3249 }
3250 }
3251
3252 return !r ? outsize : r;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003253}
Tom St Denis1e051412016-06-27 09:57:18 -04003254
Tom St Denis273d7aa2016-10-11 14:48:55 -04003255static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3256 size_t size, loff_t *pos)
3257{
3258 struct amdgpu_device *adev = f->f_inode->i_private;
3259 int r, x;
3260 ssize_t result=0;
Tom St Denis472259f2016-10-14 09:49:09 -04003261 uint32_t offset, se, sh, cu, wave, simd, data[32];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003262
3263 if (size & 3 || *pos & 3)
3264 return -EINVAL;
3265
3266 /* decode offset */
3267 offset = (*pos & 0x7F);
3268 se = ((*pos >> 7) & 0xFF);
3269 sh = ((*pos >> 15) & 0xFF);
3270 cu = ((*pos >> 23) & 0xFF);
3271 wave = ((*pos >> 31) & 0xFF);
3272 simd = ((*pos >> 37) & 0xFF);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003273
3274 /* switch to the specific se/sh/cu */
3275 mutex_lock(&adev->grbm_idx_mutex);
3276 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3277
3278 x = 0;
Tom St Denis472259f2016-10-14 09:49:09 -04003279 if (adev->gfx.funcs->read_wave_data)
3280 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003281
3282 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3283 mutex_unlock(&adev->grbm_idx_mutex);
3284
Tom St Denis5ecfb3b2016-10-13 12:15:03 -04003285 if (!x)
3286 return -EINVAL;
3287
Tom St Denis472259f2016-10-14 09:49:09 -04003288 while (size && (offset < x * 4)) {
Tom St Denis273d7aa2016-10-11 14:48:55 -04003289 uint32_t value;
3290
Tom St Denis472259f2016-10-14 09:49:09 -04003291 value = data[offset >> 2];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003292 r = put_user(value, (uint32_t *)buf);
3293 if (r)
3294 return r;
3295
3296 result += 4;
3297 buf += 4;
Tom St Denis472259f2016-10-14 09:49:09 -04003298 offset += 4;
Tom St Denis273d7aa2016-10-11 14:48:55 -04003299 size -= 4;
3300 }
3301
3302 return result;
3303}
3304
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003305static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3306 size_t size, loff_t *pos)
3307{
3308 struct amdgpu_device *adev = f->f_inode->i_private;
3309 int r;
3310 ssize_t result = 0;
3311 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3312
3313 if (size & 3 || *pos & 3)
3314 return -EINVAL;
3315
3316 /* decode offset */
3317 offset = (*pos & 0xFFF); /* in dwords */
3318 se = ((*pos >> 12) & 0xFF);
3319 sh = ((*pos >> 20) & 0xFF);
3320 cu = ((*pos >> 28) & 0xFF);
3321 wave = ((*pos >> 36) & 0xFF);
3322 simd = ((*pos >> 44) & 0xFF);
3323 thread = ((*pos >> 52) & 0xFF);
3324 bank = ((*pos >> 60) & 1);
3325
3326 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3327 if (!data)
3328 return -ENOMEM;
3329
3330 /* switch to the specific se/sh/cu */
3331 mutex_lock(&adev->grbm_idx_mutex);
3332 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3333
3334 if (bank == 0) {
3335 if (adev->gfx.funcs->read_wave_vgprs)
3336 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3337 } else {
3338 if (adev->gfx.funcs->read_wave_sgprs)
3339 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3340 }
3341
3342 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3343 mutex_unlock(&adev->grbm_idx_mutex);
3344
3345 while (size) {
3346 uint32_t value;
3347
3348 value = data[offset++];
3349 r = put_user(value, (uint32_t *)buf);
3350 if (r) {
3351 result = r;
3352 goto err;
3353 }
3354
3355 result += 4;
3356 buf += 4;
3357 size -= 4;
3358 }
3359
3360err:
3361 kfree(data);
3362 return result;
3363}
3364
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003365static const struct file_operations amdgpu_debugfs_regs_fops = {
3366 .owner = THIS_MODULE,
3367 .read = amdgpu_debugfs_regs_read,
3368 .write = amdgpu_debugfs_regs_write,
3369 .llseek = default_llseek
3370};
Tom St Denisadcec282016-04-15 13:08:44 -04003371static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3372 .owner = THIS_MODULE,
3373 .read = amdgpu_debugfs_regs_didt_read,
3374 .write = amdgpu_debugfs_regs_didt_write,
3375 .llseek = default_llseek
3376};
3377static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3378 .owner = THIS_MODULE,
3379 .read = amdgpu_debugfs_regs_pcie_read,
3380 .write = amdgpu_debugfs_regs_pcie_write,
3381 .llseek = default_llseek
3382};
3383static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3384 .owner = THIS_MODULE,
3385 .read = amdgpu_debugfs_regs_smc_read,
3386 .write = amdgpu_debugfs_regs_smc_write,
3387 .llseek = default_llseek
3388};
3389
Tom St Denis1e051412016-06-27 09:57:18 -04003390static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3391 .owner = THIS_MODULE,
3392 .read = amdgpu_debugfs_gca_config_read,
3393 .llseek = default_llseek
3394};
3395
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003396static const struct file_operations amdgpu_debugfs_sensors_fops = {
3397 .owner = THIS_MODULE,
3398 .read = amdgpu_debugfs_sensor_read,
3399 .llseek = default_llseek
3400};
3401
Tom St Denis273d7aa2016-10-11 14:48:55 -04003402static const struct file_operations amdgpu_debugfs_wave_fops = {
3403 .owner = THIS_MODULE,
3404 .read = amdgpu_debugfs_wave_read,
3405 .llseek = default_llseek
3406};
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003407static const struct file_operations amdgpu_debugfs_gpr_fops = {
3408 .owner = THIS_MODULE,
3409 .read = amdgpu_debugfs_gpr_read,
3410 .llseek = default_llseek
3411};
Tom St Denis273d7aa2016-10-11 14:48:55 -04003412
Tom St Denisadcec282016-04-15 13:08:44 -04003413static const struct file_operations *debugfs_regs[] = {
3414 &amdgpu_debugfs_regs_fops,
3415 &amdgpu_debugfs_regs_didt_fops,
3416 &amdgpu_debugfs_regs_pcie_fops,
3417 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04003418 &amdgpu_debugfs_gca_config_fops,
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003419 &amdgpu_debugfs_sensors_fops,
Tom St Denis273d7aa2016-10-11 14:48:55 -04003420 &amdgpu_debugfs_wave_fops,
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003421 &amdgpu_debugfs_gpr_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04003422};
3423
3424static const char *debugfs_regs_names[] = {
3425 "amdgpu_regs",
3426 "amdgpu_regs_didt",
3427 "amdgpu_regs_pcie",
3428 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04003429 "amdgpu_gca_config",
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003430 "amdgpu_sensors",
Tom St Denis273d7aa2016-10-11 14:48:55 -04003431 "amdgpu_wave",
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003432 "amdgpu_gpr",
Tom St Denisadcec282016-04-15 13:08:44 -04003433};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003434
3435static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3436{
3437 struct drm_minor *minor = adev->ddev->primary;
3438 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04003439 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003440
Tom St Denisadcec282016-04-15 13:08:44 -04003441 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3442 ent = debugfs_create_file(debugfs_regs_names[i],
3443 S_IFREG | S_IRUGO, root,
3444 adev, debugfs_regs[i]);
3445 if (IS_ERR(ent)) {
3446 for (j = 0; j < i; j++) {
3447 debugfs_remove(adev->debugfs_regs[i]);
3448 adev->debugfs_regs[i] = NULL;
3449 }
3450 return PTR_ERR(ent);
3451 }
3452
3453 if (!i)
3454 i_size_write(ent->d_inode, adev->rmmio_size);
3455 adev->debugfs_regs[i] = ent;
3456 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003457
3458 return 0;
3459}
3460
3461static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3462{
Tom St Denisadcec282016-04-15 13:08:44 -04003463 unsigned i;
3464
3465 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3466 if (adev->debugfs_regs[i]) {
3467 debugfs_remove(adev->debugfs_regs[i]);
3468 adev->debugfs_regs[i] = NULL;
3469 }
3470 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003471}
3472
3473int amdgpu_debugfs_init(struct drm_minor *minor)
3474{
3475 return 0;
3476}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003477#else
3478static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3479{
3480 return 0;
3481}
3482static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003483#endif