blob: 7f996ac089b4ad352480c2ff77eeba25d312e95c [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050043#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080044#ifdef CONFIG_DRM_AMDGPU_SI
45#include "si.h"
46#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040047#ifdef CONFIG_DRM_AMDGPU_CIK
48#include "cik.h"
49#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040050#include "vi.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040051#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080052#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080053#include <linux/firmware.h>
Tom St Denisd1aff8e2016-08-09 18:01:55 -040054#include "amdgpu_pm.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040055
56static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
57static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
58
59static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080060 "TAHITI",
61 "PITCAIRN",
62 "VERDE",
63 "OLAND",
64 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040065 "BONAIRE",
66 "KAVERI",
67 "KABINI",
68 "HAWAII",
69 "MULLINS",
70 "TOPAZ",
71 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080072 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040073 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040074 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040075 "POLARIS10",
76 "POLARIS11",
Junwei Zhangc4642a42016-12-14 15:32:28 -050077 "POLARIS12",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040078 "LAST",
79};
80
81bool amdgpu_device_is_px(struct drm_device *dev)
82{
83 struct amdgpu_device *adev = dev->dev_private;
84
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080085 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040086 return true;
87 return false;
88}
89
90/*
91 * MMIO register access helper functions.
92 */
93uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
Monk Liu15d72fd2017-01-25 15:07:40 +080094 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040095{
Tom St Denisf4b373f2016-05-31 08:02:27 -040096 uint32_t ret;
97
Monk Liu15d72fd2017-01-25 15:07:40 +080098 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +080099 BUG_ON(in_interrupt());
100 return amdgpu_virt_kiq_rreg(adev, reg);
101 }
102
Monk Liu15d72fd2017-01-25 15:07:40 +0800103 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Tom St Denisf4b373f2016-05-31 08:02:27 -0400104 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400105 else {
106 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400107
108 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
109 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
110 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
111 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400112 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400113 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
114 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400115}
116
117void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
Monk Liu15d72fd2017-01-25 15:07:40 +0800118 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400119{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400120 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800121
Monk Liu15d72fd2017-01-25 15:07:40 +0800122 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800123 BUG_ON(in_interrupt());
124 return amdgpu_virt_kiq_wreg(adev, reg, v);
125 }
126
Monk Liu15d72fd2017-01-25 15:07:40 +0800127 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400128 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
129 else {
130 unsigned long flags;
131
132 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
133 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
134 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
135 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
136 }
137}
138
139u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
140{
141 if ((reg * 4) < adev->rio_mem_size)
142 return ioread32(adev->rio_mem + (reg * 4));
143 else {
144 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
145 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
146 }
147}
148
149void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
150{
151
152 if ((reg * 4) < adev->rio_mem_size)
153 iowrite32(v, adev->rio_mem + (reg * 4));
154 else {
155 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
156 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
157 }
158}
159
160/**
161 * amdgpu_mm_rdoorbell - read a doorbell dword
162 *
163 * @adev: amdgpu_device pointer
164 * @index: doorbell index
165 *
166 * Returns the value in the doorbell aperture at the
167 * requested doorbell index (CIK).
168 */
169u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
170{
171 if (index < adev->doorbell.num_doorbells) {
172 return readl(adev->doorbell.ptr + index);
173 } else {
174 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
175 return 0;
176 }
177}
178
179/**
180 * amdgpu_mm_wdoorbell - write a doorbell dword
181 *
182 * @adev: amdgpu_device pointer
183 * @index: doorbell index
184 * @v: value to write
185 *
186 * Writes @v to the doorbell aperture at the
187 * requested doorbell index (CIK).
188 */
189void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
190{
191 if (index < adev->doorbell.num_doorbells) {
192 writel(v, adev->doorbell.ptr + index);
193 } else {
194 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
195 }
196}
197
198/**
Ken Wang832be402016-03-18 15:23:08 +0800199 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
200 *
201 * @adev: amdgpu_device pointer
202 * @index: doorbell index
203 *
204 * Returns the value in the doorbell aperture at the
205 * requested doorbell index (VEGA10+).
206 */
207u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
208{
209 if (index < adev->doorbell.num_doorbells) {
210 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
211 } else {
212 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
213 return 0;
214 }
215}
216
217/**
218 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
219 *
220 * @adev: amdgpu_device pointer
221 * @index: doorbell index
222 * @v: value to write
223 *
224 * Writes @v to the doorbell aperture at the
225 * requested doorbell index (VEGA10+).
226 */
227void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
228{
229 if (index < adev->doorbell.num_doorbells) {
230 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
231 } else {
232 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
233 }
234}
235
236/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400237 * amdgpu_invalid_rreg - dummy reg read function
238 *
239 * @adev: amdgpu device pointer
240 * @reg: offset of register
241 *
242 * Dummy register read function. Used for register blocks
243 * that certain asics don't have (all asics).
244 * Returns the value in the register.
245 */
246static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
247{
248 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
249 BUG();
250 return 0;
251}
252
253/**
254 * amdgpu_invalid_wreg - dummy reg write function
255 *
256 * @adev: amdgpu device pointer
257 * @reg: offset of register
258 * @v: value to write to the register
259 *
260 * Dummy register read function. Used for register blocks
261 * that certain asics don't have (all asics).
262 */
263static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
264{
265 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
266 reg, v);
267 BUG();
268}
269
270/**
271 * amdgpu_block_invalid_rreg - dummy reg read function
272 *
273 * @adev: amdgpu device pointer
274 * @block: offset of instance
275 * @reg: offset of register
276 *
277 * Dummy register read function. Used for register blocks
278 * that certain asics don't have (all asics).
279 * Returns the value in the register.
280 */
281static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
282 uint32_t block, uint32_t reg)
283{
284 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
285 reg, block);
286 BUG();
287 return 0;
288}
289
290/**
291 * amdgpu_block_invalid_wreg - dummy reg write function
292 *
293 * @adev: amdgpu device pointer
294 * @block: offset of instance
295 * @reg: offset of register
296 * @v: value to write to the register
297 *
298 * Dummy register read function. Used for register blocks
299 * that certain asics don't have (all asics).
300 */
301static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
302 uint32_t block,
303 uint32_t reg, uint32_t v)
304{
305 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
306 reg, block, v);
307 BUG();
308}
309
310static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
311{
312 int r;
313
314 if (adev->vram_scratch.robj == NULL) {
315 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
Alex Deucher857d9132015-08-27 00:14:16 -0400316 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
Christian König03f48dd2016-08-15 17:00:22 +0200317 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
318 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
Christian König72d76682015-09-03 17:34:59 +0200319 NULL, NULL, &adev->vram_scratch.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400320 if (r) {
321 return r;
322 }
323 }
324
325 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
326 if (unlikely(r != 0))
327 return r;
328 r = amdgpu_bo_pin(adev->vram_scratch.robj,
329 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
330 if (r) {
331 amdgpu_bo_unreserve(adev->vram_scratch.robj);
332 return r;
333 }
334 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
335 (void **)&adev->vram_scratch.ptr);
336 if (r)
337 amdgpu_bo_unpin(adev->vram_scratch.robj);
338 amdgpu_bo_unreserve(adev->vram_scratch.robj);
339
340 return r;
341}
342
343static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
344{
345 int r;
346
347 if (adev->vram_scratch.robj == NULL) {
348 return;
349 }
350 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
351 if (likely(r == 0)) {
352 amdgpu_bo_kunmap(adev->vram_scratch.robj);
353 amdgpu_bo_unpin(adev->vram_scratch.robj);
354 amdgpu_bo_unreserve(adev->vram_scratch.robj);
355 }
356 amdgpu_bo_unref(&adev->vram_scratch.robj);
357}
358
359/**
360 * amdgpu_program_register_sequence - program an array of registers.
361 *
362 * @adev: amdgpu_device pointer
363 * @registers: pointer to the register array
364 * @array_size: size of the register array
365 *
366 * Programs an array or registers with and and or masks.
367 * This is a helper for setting golden registers.
368 */
369void amdgpu_program_register_sequence(struct amdgpu_device *adev,
370 const u32 *registers,
371 const u32 array_size)
372{
373 u32 tmp, reg, and_mask, or_mask;
374 int i;
375
376 if (array_size % 3)
377 return;
378
379 for (i = 0; i < array_size; i +=3) {
380 reg = registers[i + 0];
381 and_mask = registers[i + 1];
382 or_mask = registers[i + 2];
383
384 if (and_mask == 0xffffffff) {
385 tmp = or_mask;
386 } else {
387 tmp = RREG32(reg);
388 tmp &= ~and_mask;
389 tmp |= or_mask;
390 }
391 WREG32(reg, tmp);
392 }
393}
394
395void amdgpu_pci_config_reset(struct amdgpu_device *adev)
396{
397 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
398}
399
400/*
401 * GPU doorbell aperture helpers function.
402 */
403/**
404 * amdgpu_doorbell_init - Init doorbell driver information.
405 *
406 * @adev: amdgpu_device pointer
407 *
408 * Init doorbell driver information (CIK)
409 * Returns 0 on success, error on failure.
410 */
411static int amdgpu_doorbell_init(struct amdgpu_device *adev)
412{
413 /* doorbell bar mapping */
414 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
415 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
416
Christian Königedf600d2016-05-03 15:54:54 +0200417 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400418 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
419 if (adev->doorbell.num_doorbells == 0)
420 return -EINVAL;
421
422 adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
423 if (adev->doorbell.ptr == NULL) {
424 return -ENOMEM;
425 }
426 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
427 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
428
429 return 0;
430}
431
432/**
433 * amdgpu_doorbell_fini - Tear down doorbell driver information.
434 *
435 * @adev: amdgpu_device pointer
436 *
437 * Tear down doorbell driver information (CIK)
438 */
439static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
440{
441 iounmap(adev->doorbell.ptr);
442 adev->doorbell.ptr = NULL;
443}
444
445/**
446 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
447 * setup amdkfd
448 *
449 * @adev: amdgpu_device pointer
450 * @aperture_base: output returning doorbell aperture base physical address
451 * @aperture_size: output returning doorbell aperture size in bytes
452 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
453 *
454 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
455 * takes doorbells required for its own rings and reports the setup to amdkfd.
456 * amdgpu reserved doorbells are at the start of the doorbell aperture.
457 */
458void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
459 phys_addr_t *aperture_base,
460 size_t *aperture_size,
461 size_t *start_offset)
462{
463 /*
464 * The first num_doorbells are used by amdgpu.
465 * amdkfd takes whatever's left in the aperture.
466 */
467 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
468 *aperture_base = adev->doorbell.base;
469 *aperture_size = adev->doorbell.size;
470 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
471 } else {
472 *aperture_base = 0;
473 *aperture_size = 0;
474 *start_offset = 0;
475 }
476}
477
478/*
479 * amdgpu_wb_*()
480 * Writeback is the the method by which the the GPU updates special pages
481 * in memory with the status of certain GPU events (fences, ring pointers,
482 * etc.).
483 */
484
485/**
486 * amdgpu_wb_fini - Disable Writeback and free memory
487 *
488 * @adev: amdgpu_device pointer
489 *
490 * Disables Writeback and frees the Writeback memory (all asics).
491 * Used at driver shutdown.
492 */
493static void amdgpu_wb_fini(struct amdgpu_device *adev)
494{
495 if (adev->wb.wb_obj) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400496 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
497 &adev->wb.gpu_addr,
498 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400499 adev->wb.wb_obj = NULL;
500 }
501}
502
503/**
504 * amdgpu_wb_init- Init Writeback driver info and allocate memory
505 *
506 * @adev: amdgpu_device pointer
507 *
508 * Disables Writeback and frees the Writeback memory (all asics).
509 * Used at driver startup.
510 * Returns 0 on success or an -error on failure.
511 */
512static int amdgpu_wb_init(struct amdgpu_device *adev)
513{
514 int r;
515
516 if (adev->wb.wb_obj == NULL) {
Huang Rui60a970a62017-03-15 10:13:32 +0800517 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
Alex Deuchera76ed482016-10-21 15:30:36 -0400518 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
519 &adev->wb.wb_obj, &adev->wb.gpu_addr,
520 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400521 if (r) {
522 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
523 return r;
524 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400525
526 adev->wb.num_wb = AMDGPU_MAX_WB;
527 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
528
529 /* clear wb memory */
Huang Rui60a970a62017-03-15 10:13:32 +0800530 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400531 }
532
533 return 0;
534}
535
536/**
537 * amdgpu_wb_get - Allocate a wb entry
538 *
539 * @adev: amdgpu_device pointer
540 * @wb: wb index
541 *
542 * Allocate a wb slot for use by the driver (all asics).
543 * Returns 0 on success or -EINVAL on failure.
544 */
545int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
546{
547 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
548 if (offset < adev->wb.num_wb) {
549 __set_bit(offset, adev->wb.used);
550 *wb = offset;
551 return 0;
552 } else {
553 return -EINVAL;
554 }
555}
556
557/**
Ken Wang70142852016-03-18 15:08:49 +0800558 * amdgpu_wb_get_64bit - Allocate a wb entry
559 *
560 * @adev: amdgpu_device pointer
561 * @wb: wb index
562 *
563 * Allocate a wb slot for use by the driver (all asics).
564 * Returns 0 on success or -EINVAL on failure.
565 */
566int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
567{
568 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
569 adev->wb.num_wb, 0, 2, 7, 0);
570 if ((offset + 1) < adev->wb.num_wb) {
571 __set_bit(offset, adev->wb.used);
572 __set_bit(offset + 1, adev->wb.used);
573 *wb = offset;
574 return 0;
575 } else {
576 return -EINVAL;
577 }
578}
579
580/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400581 * amdgpu_wb_free - Free a wb entry
582 *
583 * @adev: amdgpu_device pointer
584 * @wb: wb index
585 *
586 * Free a wb slot allocated for use by the driver (all asics)
587 */
588void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
589{
590 if (wb < adev->wb.num_wb)
591 __clear_bit(wb, adev->wb.used);
592}
593
594/**
Ken Wang70142852016-03-18 15:08:49 +0800595 * amdgpu_wb_free_64bit - Free a wb entry
596 *
597 * @adev: amdgpu_device pointer
598 * @wb: wb index
599 *
600 * Free a wb slot allocated for use by the driver (all asics)
601 */
602void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
603{
604 if ((wb + 1) < adev->wb.num_wb) {
605 __clear_bit(wb, adev->wb.used);
606 __clear_bit(wb + 1, adev->wb.used);
607 }
608}
609
610/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400611 * amdgpu_vram_location - try to find VRAM location
612 * @adev: amdgpu device structure holding all necessary informations
613 * @mc: memory controller structure holding memory informations
614 * @base: base address at which to put VRAM
615 *
616 * Function will place try to place VRAM at base address provided
617 * as parameter (which is so far either PCI aperture address or
618 * for IGP TOM base address).
619 *
620 * If there is not enough space to fit the unvisible VRAM in the 32bits
621 * address space then we limit the VRAM size to the aperture.
622 *
623 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
624 * this shouldn't be a problem as we are using the PCI aperture as a reference.
625 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
626 * not IGP.
627 *
628 * Note: we use mc_vram_size as on some board we need to program the mc to
629 * cover the whole aperture even if VRAM size is inferior to aperture size
630 * Novell bug 204882 + along with lots of ubuntu ones
631 *
632 * Note: when limiting vram it's safe to overwritte real_vram_size because
633 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
634 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
635 * ones)
636 *
637 * Note: IGP TOM addr should be the same as the aperture addr, we don't
638 * explicitly check for that thought.
639 *
640 * FIXME: when reducing VRAM size align new size on power of 2.
641 */
642void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
643{
644 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
645
646 mc->vram_start = base;
647 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
648 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
649 mc->real_vram_size = mc->aper_size;
650 mc->mc_vram_size = mc->aper_size;
651 }
652 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
653 if (limit && limit < mc->real_vram_size)
654 mc->real_vram_size = limit;
655 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
656 mc->mc_vram_size >> 20, mc->vram_start,
657 mc->vram_end, mc->real_vram_size >> 20);
658}
659
660/**
661 * amdgpu_gtt_location - try to find GTT location
662 * @adev: amdgpu device structure holding all necessary informations
663 * @mc: memory controller structure holding memory informations
664 *
665 * Function will place try to place GTT before or after VRAM.
666 *
667 * If GTT size is bigger than space left then we ajust GTT size.
668 * Thus function will never fails.
669 *
670 * FIXME: when reducing GTT size align new size on power of 2.
671 */
672void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
673{
674 u64 size_af, size_bf;
675
676 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
677 size_bf = mc->vram_start & ~mc->gtt_base_align;
678 if (size_bf > size_af) {
679 if (mc->gtt_size > size_bf) {
680 dev_warn(adev->dev, "limiting GTT\n");
681 mc->gtt_size = size_bf;
682 }
Alex Deucher9dc5a912016-11-17 15:40:22 -0500683 mc->gtt_start = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400684 } else {
685 if (mc->gtt_size > size_af) {
686 dev_warn(adev->dev, "limiting GTT\n");
687 mc->gtt_size = size_af;
688 }
689 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
690 }
691 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
692 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
693 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
694}
695
696/*
697 * GPU helpers function.
698 */
699/**
Jim Quc836fec2017-02-10 15:59:59 +0800700 * amdgpu_need_post - check if the hw need post or not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400701 *
702 * @adev: amdgpu_device pointer
703 *
Jim Quc836fec2017-02-10 15:59:59 +0800704 * Check if the asic has been initialized (all asics) at driver startup
705 * or post is needed if hw reset is performed.
706 * Returns true if need or false if not.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400707 */
Jim Quc836fec2017-02-10 15:59:59 +0800708bool amdgpu_need_post(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400709{
710 uint32_t reg;
711
Jim Quc836fec2017-02-10 15:59:59 +0800712 if (adev->has_hw_reset) {
713 adev->has_hw_reset = false;
714 return true;
715 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400716 /* then check MEM_SIZE, in case the crtcs are off */
Alex Deucherbbf282d2017-03-03 17:26:10 -0500717 reg = amdgpu_asic_get_config_memsize(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400718
719 if (reg)
Jim Quc836fec2017-02-10 15:59:59 +0800720 return false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400721
Jim Quc836fec2017-02-10 15:59:59 +0800722 return true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400723
724}
725
Monk Liubec86372016-09-14 19:38:08 +0800726static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
727{
728 if (amdgpu_sriov_vf(adev))
729 return false;
730
731 if (amdgpu_passthrough(adev)) {
Monk Liu1da2c322016-11-11 11:24:29 +0800732 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
733 * some old smc fw still need driver do vPost otherwise gpu hang, while
734 * those smc fw version above 22.15 doesn't have this flaw, so we force
735 * vpost executed for smc version below 22.15
Monk Liubec86372016-09-14 19:38:08 +0800736 */
737 if (adev->asic_type == CHIP_FIJI) {
738 int err;
739 uint32_t fw_ver;
740 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
741 /* force vPost if error occured */
742 if (err)
743 return true;
744
745 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
Monk Liu1da2c322016-11-11 11:24:29 +0800746 if (fw_ver < 0x00160e00)
747 return true;
Monk Liubec86372016-09-14 19:38:08 +0800748 }
Monk Liubec86372016-09-14 19:38:08 +0800749 }
Jim Quc836fec2017-02-10 15:59:59 +0800750 return amdgpu_need_post(adev);
Monk Liubec86372016-09-14 19:38:08 +0800751}
752
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400753/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400754 * amdgpu_dummy_page_init - init dummy page used by the driver
755 *
756 * @adev: amdgpu_device pointer
757 *
758 * Allocate the dummy page used by the driver (all asics).
759 * This dummy page is used by the driver as a filler for gart entries
760 * when pages are taken out of the GART
761 * Returns 0 on sucess, -ENOMEM on failure.
762 */
763int amdgpu_dummy_page_init(struct amdgpu_device *adev)
764{
765 if (adev->dummy_page.page)
766 return 0;
767 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
768 if (adev->dummy_page.page == NULL)
769 return -ENOMEM;
770 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
771 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
772 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
773 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
774 __free_page(adev->dummy_page.page);
775 adev->dummy_page.page = NULL;
776 return -ENOMEM;
777 }
778 return 0;
779}
780
781/**
782 * amdgpu_dummy_page_fini - free dummy page used by the driver
783 *
784 * @adev: amdgpu_device pointer
785 *
786 * Frees the dummy page used by the driver (all asics).
787 */
788void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
789{
790 if (adev->dummy_page.page == NULL)
791 return;
792 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
793 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
794 __free_page(adev->dummy_page.page);
795 adev->dummy_page.page = NULL;
796}
797
798
799/* ATOM accessor methods */
800/*
801 * ATOM is an interpreted byte code stored in tables in the vbios. The
802 * driver registers callbacks to access registers and the interpreter
803 * in the driver parses the tables and executes then to program specific
804 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
805 * atombios.h, and atom.c
806 */
807
808/**
809 * cail_pll_read - read PLL register
810 *
811 * @info: atom card_info pointer
812 * @reg: PLL register offset
813 *
814 * Provides a PLL register accessor for the atom interpreter (r4xx+).
815 * Returns the value of the PLL register.
816 */
817static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
818{
819 return 0;
820}
821
822/**
823 * cail_pll_write - write PLL register
824 *
825 * @info: atom card_info pointer
826 * @reg: PLL register offset
827 * @val: value to write to the pll register
828 *
829 * Provides a PLL register accessor for the atom interpreter (r4xx+).
830 */
831static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
832{
833
834}
835
836/**
837 * cail_mc_read - read MC (Memory Controller) register
838 *
839 * @info: atom card_info pointer
840 * @reg: MC register offset
841 *
842 * Provides an MC register accessor for the atom interpreter (r4xx+).
843 * Returns the value of the MC register.
844 */
845static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
846{
847 return 0;
848}
849
850/**
851 * cail_mc_write - write MC (Memory Controller) register
852 *
853 * @info: atom card_info pointer
854 * @reg: MC register offset
855 * @val: value to write to the pll register
856 *
857 * Provides a MC register accessor for the atom interpreter (r4xx+).
858 */
859static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
860{
861
862}
863
864/**
865 * cail_reg_write - write MMIO register
866 *
867 * @info: atom card_info pointer
868 * @reg: MMIO register offset
869 * @val: value to write to the pll register
870 *
871 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
872 */
873static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
874{
875 struct amdgpu_device *adev = info->dev->dev_private;
876
877 WREG32(reg, val);
878}
879
880/**
881 * cail_reg_read - read MMIO register
882 *
883 * @info: atom card_info pointer
884 * @reg: MMIO register offset
885 *
886 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
887 * Returns the value of the MMIO register.
888 */
889static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
890{
891 struct amdgpu_device *adev = info->dev->dev_private;
892 uint32_t r;
893
894 r = RREG32(reg);
895 return r;
896}
897
898/**
899 * cail_ioreg_write - write IO register
900 *
901 * @info: atom card_info pointer
902 * @reg: IO register offset
903 * @val: value to write to the pll register
904 *
905 * Provides a IO register accessor for the atom interpreter (r4xx+).
906 */
907static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
908{
909 struct amdgpu_device *adev = info->dev->dev_private;
910
911 WREG32_IO(reg, val);
912}
913
914/**
915 * cail_ioreg_read - read IO register
916 *
917 * @info: atom card_info pointer
918 * @reg: IO register offset
919 *
920 * Provides an IO register accessor for the atom interpreter (r4xx+).
921 * Returns the value of the IO register.
922 */
923static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
924{
925 struct amdgpu_device *adev = info->dev->dev_private;
926 uint32_t r;
927
928 r = RREG32_IO(reg);
929 return r;
930}
931
932/**
933 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
934 *
935 * @adev: amdgpu_device pointer
936 *
937 * Frees the driver info and register access callbacks for the ATOM
938 * interpreter (r4xx+).
939 * Called at driver shutdown.
940 */
941static void amdgpu_atombios_fini(struct amdgpu_device *adev)
942{
Monk Liu89e0ec92016-05-27 19:34:11 +0800943 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400944 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec92016-05-27 19:34:11 +0800945 kfree(adev->mode_info.atom_context->iio);
946 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400947 kfree(adev->mode_info.atom_context);
948 adev->mode_info.atom_context = NULL;
949 kfree(adev->mode_info.atom_card_info);
950 adev->mode_info.atom_card_info = NULL;
951}
952
953/**
954 * amdgpu_atombios_init - init the driver info and callbacks for atombios
955 *
956 * @adev: amdgpu_device pointer
957 *
958 * Initializes the driver info and register access callbacks for the
959 * ATOM interpreter (r4xx+).
960 * Returns 0 on sucess, -ENOMEM on failure.
961 * Called at driver startup.
962 */
963static int amdgpu_atombios_init(struct amdgpu_device *adev)
964{
965 struct card_info *atom_card_info =
966 kzalloc(sizeof(struct card_info), GFP_KERNEL);
967
968 if (!atom_card_info)
969 return -ENOMEM;
970
971 adev->mode_info.atom_card_info = atom_card_info;
972 atom_card_info->dev = adev->ddev;
973 atom_card_info->reg_read = cail_reg_read;
974 atom_card_info->reg_write = cail_reg_write;
975 /* needed for iio ops */
976 if (adev->rio_mem) {
977 atom_card_info->ioreg_read = cail_ioreg_read;
978 atom_card_info->ioreg_write = cail_ioreg_write;
979 } else {
Amber Linb64a18c2017-01-04 08:06:58 -0500980 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400981 atom_card_info->ioreg_read = cail_reg_read;
982 atom_card_info->ioreg_write = cail_reg_write;
983 }
984 atom_card_info->mc_read = cail_mc_read;
985 atom_card_info->mc_write = cail_mc_write;
986 atom_card_info->pll_read = cail_pll_read;
987 atom_card_info->pll_write = cail_pll_write;
988
989 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
990 if (!adev->mode_info.atom_context) {
991 amdgpu_atombios_fini(adev);
992 return -ENOMEM;
993 }
994
995 mutex_init(&adev->mode_info.atom_context->mutex);
996 amdgpu_atombios_scratch_regs_init(adev);
997 amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
998 return 0;
999}
1000
1001/* if we get transitioned to only one device, take VGA back */
1002/**
1003 * amdgpu_vga_set_decode - enable/disable vga decode
1004 *
1005 * @cookie: amdgpu_device pointer
1006 * @state: enable/disable vga decode
1007 *
1008 * Enable/disable vga decode (all asics).
1009 * Returns VGA resource flags.
1010 */
1011static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1012{
1013 struct amdgpu_device *adev = cookie;
1014 amdgpu_asic_set_vga_state(adev, state);
1015 if (state)
1016 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1017 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1018 else
1019 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1020}
1021
1022/**
1023 * amdgpu_check_pot_argument - check that argument is a power of two
1024 *
1025 * @arg: value to check
1026 *
1027 * Validates that a certain argument is a power of two (all asics).
1028 * Returns true if argument is valid.
1029 */
1030static bool amdgpu_check_pot_argument(int arg)
1031{
1032 return (arg & (arg - 1)) == 0;
1033}
1034
1035/**
1036 * amdgpu_check_arguments - validate module params
1037 *
1038 * @adev: amdgpu_device pointer
1039 *
1040 * Validates certain module parameters and updates
1041 * the associated values used by the driver (all asics).
1042 */
1043static void amdgpu_check_arguments(struct amdgpu_device *adev)
1044{
Chunming Zhou5b011232015-12-10 17:34:33 +08001045 if (amdgpu_sched_jobs < 4) {
1046 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1047 amdgpu_sched_jobs);
1048 amdgpu_sched_jobs = 4;
1049 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
1050 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1051 amdgpu_sched_jobs);
1052 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1053 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001054
1055 if (amdgpu_gart_size != -1) {
Christian Königc4e1a132016-03-17 16:25:15 +01001056 /* gtt size must be greater or equal to 32M */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001057 if (amdgpu_gart_size < 32) {
1058 dev_warn(adev->dev, "gart size (%d) too small\n",
1059 amdgpu_gart_size);
1060 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001061 }
1062 }
1063
1064 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1065 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1066 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -04001067 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001068 }
1069
1070 if (amdgpu_vm_size < 1) {
1071 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1072 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -04001073 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001074 }
1075
1076 /*
1077 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1078 */
1079 if (amdgpu_vm_size > 1024) {
1080 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1081 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -04001082 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001083 }
1084
1085 /* defines number of bits in page table versus page directory,
1086 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1087 * page table and the remaining bits are in the page directory */
1088 if (amdgpu_vm_block_size == -1) {
1089
1090 /* Total bits covered by PD + PTs */
1091 unsigned bits = ilog2(amdgpu_vm_size) + 18;
1092
1093 /* Make sure the PD is 4K in size up to 8GB address space.
1094 Above that split equal between PD and PTs */
1095 if (amdgpu_vm_size <= 8)
1096 amdgpu_vm_block_size = bits - 9;
1097 else
1098 amdgpu_vm_block_size = (bits + 3) / 2;
1099
1100 } else if (amdgpu_vm_block_size < 9) {
1101 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1102 amdgpu_vm_block_size);
1103 amdgpu_vm_block_size = 9;
1104 }
1105
1106 if (amdgpu_vm_block_size > 24 ||
1107 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1108 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1109 amdgpu_vm_block_size);
1110 amdgpu_vm_block_size = 9;
1111 }
Christian König6a7f76e2016-08-24 15:51:49 +02001112
jimqu526bae32016-11-07 09:53:10 +08001113 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1114 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
Christian König6a7f76e2016-08-24 15:51:49 +02001115 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1116 amdgpu_vram_page_split);
1117 amdgpu_vram_page_split = 1024;
1118 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001119}
1120
1121/**
1122 * amdgpu_switcheroo_set_state - set switcheroo state
1123 *
1124 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001125 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001126 *
1127 * Callback for the switcheroo driver. Suspends or resumes the
1128 * the asics before or after it is powered up using ACPI methods.
1129 */
1130static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1131{
1132 struct drm_device *dev = pci_get_drvdata(pdev);
1133
1134 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1135 return;
1136
1137 if (state == VGA_SWITCHEROO_ON) {
1138 unsigned d3_delay = dev->pdev->d3_delay;
1139
Joe Perches7ca85292017-02-28 04:55:52 -08001140 pr_info("amdgpu: switched on\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001141 /* don't suspend or resume card normally */
1142 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1143
Alex Deucher810ddc32016-08-23 13:25:49 -04001144 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001145
1146 dev->pdev->d3_delay = d3_delay;
1147
1148 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1149 drm_kms_helper_poll_enable(dev);
1150 } else {
Joe Perches7ca85292017-02-28 04:55:52 -08001151 pr_info("amdgpu: switched off\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001152 drm_kms_helper_poll_disable(dev);
1153 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001154 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001155 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1156 }
1157}
1158
1159/**
1160 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1161 *
1162 * @pdev: pci dev pointer
1163 *
1164 * Callback for the switcheroo driver. Check of the switcheroo
1165 * state can be changed.
1166 * Returns true if the state can be changed, false if not.
1167 */
1168static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1169{
1170 struct drm_device *dev = pci_get_drvdata(pdev);
1171
1172 /*
1173 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1174 * locking inversion with the driver load path. And the access here is
1175 * completely racy anyway. So don't bother with locking for now.
1176 */
1177 return dev->open_count == 0;
1178}
1179
1180static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1181 .set_gpu_state = amdgpu_switcheroo_set_state,
1182 .reprobe = NULL,
1183 .can_switch = amdgpu_switcheroo_can_switch,
1184};
1185
1186int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001187 enum amd_ip_block_type block_type,
1188 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001189{
1190 int i, r = 0;
1191
1192 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001193 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001194 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001195 if (adev->ip_blocks[i].version->type != block_type)
1196 continue;
1197 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1198 continue;
1199 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1200 (void *)adev, state);
1201 if (r)
1202 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1203 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001204 }
1205 return r;
1206}
1207
1208int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001209 enum amd_ip_block_type block_type,
1210 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001211{
1212 int i, r = 0;
1213
1214 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001215 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001216 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001217 if (adev->ip_blocks[i].version->type != block_type)
1218 continue;
1219 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1220 continue;
1221 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1222 (void *)adev, state);
1223 if (r)
1224 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1225 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001226 }
1227 return r;
1228}
1229
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001230void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1231{
1232 int i;
1233
1234 for (i = 0; i < adev->num_ip_blocks; i++) {
1235 if (!adev->ip_blocks[i].status.valid)
1236 continue;
1237 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1238 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1239 }
1240}
1241
Alex Deucher5dbbb602016-06-23 11:41:04 -04001242int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1243 enum amd_ip_block_type block_type)
1244{
1245 int i, r;
1246
1247 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001248 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001249 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001250 if (adev->ip_blocks[i].version->type == block_type) {
1251 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001252 if (r)
1253 return r;
1254 break;
1255 }
1256 }
1257 return 0;
1258
1259}
1260
1261bool amdgpu_is_idle(struct amdgpu_device *adev,
1262 enum amd_ip_block_type block_type)
1263{
1264 int i;
1265
1266 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001267 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001268 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001269 if (adev->ip_blocks[i].version->type == block_type)
1270 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001271 }
1272 return true;
1273
1274}
1275
Alex Deuchera1255102016-10-13 17:41:13 -04001276struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1277 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001278{
1279 int i;
1280
1281 for (i = 0; i < adev->num_ip_blocks; i++)
Alex Deuchera1255102016-10-13 17:41:13 -04001282 if (adev->ip_blocks[i].version->type == type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001283 return &adev->ip_blocks[i];
1284
1285 return NULL;
1286}
1287
1288/**
1289 * amdgpu_ip_block_version_cmp
1290 *
1291 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001292 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001293 * @major: major version
1294 * @minor: minor version
1295 *
1296 * return 0 if equal or greater
1297 * return 1 if smaller or the ip_block doesn't exist
1298 */
1299int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001300 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001301 u32 major, u32 minor)
1302{
Alex Deuchera1255102016-10-13 17:41:13 -04001303 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001304
Alex Deuchera1255102016-10-13 17:41:13 -04001305 if (ip_block && ((ip_block->version->major > major) ||
1306 ((ip_block->version->major == major) &&
1307 (ip_block->version->minor >= minor))))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001308 return 0;
1309
1310 return 1;
1311}
1312
Alex Deuchera1255102016-10-13 17:41:13 -04001313/**
1314 * amdgpu_ip_block_add
1315 *
1316 * @adev: amdgpu_device pointer
1317 * @ip_block_version: pointer to the IP to add
1318 *
1319 * Adds the IP block driver information to the collection of IPs
1320 * on the asic.
1321 */
1322int amdgpu_ip_block_add(struct amdgpu_device *adev,
1323 const struct amdgpu_ip_block_version *ip_block_version)
1324{
1325 if (!ip_block_version)
1326 return -EINVAL;
1327
1328 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1329
1330 return 0;
1331}
1332
Alex Deucher483ef982016-09-30 12:43:04 -04001333static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
Emily Deng9accf2f2016-08-10 16:01:25 +08001334{
1335 adev->enable_virtual_display = false;
1336
1337 if (amdgpu_virtual_display) {
1338 struct drm_device *ddev = adev->ddev;
1339 const char *pci_address_name = pci_name(ddev->pdev);
Emily Deng0f663562016-09-30 13:02:18 -04001340 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
Emily Deng9accf2f2016-08-10 16:01:25 +08001341
1342 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1343 pciaddstr_tmp = pciaddstr;
Emily Deng0f663562016-09-30 13:02:18 -04001344 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1345 pciaddname = strsep(&pciaddname_tmp, ",");
Yintian Tao967de2a2017-01-22 15:16:51 +08001346 if (!strcmp("all", pciaddname)
1347 || !strcmp(pci_address_name, pciaddname)) {
Emily Deng0f663562016-09-30 13:02:18 -04001348 long num_crtc;
1349 int res = -1;
1350
Emily Deng9accf2f2016-08-10 16:01:25 +08001351 adev->enable_virtual_display = true;
Emily Deng0f663562016-09-30 13:02:18 -04001352
1353 if (pciaddname_tmp)
1354 res = kstrtol(pciaddname_tmp, 10,
1355 &num_crtc);
1356
1357 if (!res) {
1358 if (num_crtc < 1)
1359 num_crtc = 1;
1360 if (num_crtc > 6)
1361 num_crtc = 6;
1362 adev->mode_info.num_crtc = num_crtc;
1363 } else {
1364 adev->mode_info.num_crtc = 1;
1365 }
Emily Deng9accf2f2016-08-10 16:01:25 +08001366 break;
1367 }
1368 }
1369
Emily Deng0f663562016-09-30 13:02:18 -04001370 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1371 amdgpu_virtual_display, pci_address_name,
1372 adev->enable_virtual_display, adev->mode_info.num_crtc);
Emily Deng9accf2f2016-08-10 16:01:25 +08001373
1374 kfree(pciaddstr);
1375 }
1376}
1377
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001378static int amdgpu_early_init(struct amdgpu_device *adev)
1379{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001380 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001381
Alex Deucher483ef982016-09-30 12:43:04 -04001382 amdgpu_device_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001383
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001384 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001385 case CHIP_TOPAZ:
1386 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001387 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001388 case CHIP_POLARIS11:
1389 case CHIP_POLARIS10:
Junwei Zhangc4642a42016-12-14 15:32:28 -05001390 case CHIP_POLARIS12:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001391 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001392 case CHIP_STONEY:
1393 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001394 adev->family = AMDGPU_FAMILY_CZ;
1395 else
1396 adev->family = AMDGPU_FAMILY_VI;
1397
1398 r = vi_set_ip_blocks(adev);
1399 if (r)
1400 return r;
1401 break;
Ken Wang33f34802016-01-21 17:29:41 +08001402#ifdef CONFIG_DRM_AMDGPU_SI
1403 case CHIP_VERDE:
1404 case CHIP_TAHITI:
1405 case CHIP_PITCAIRN:
1406 case CHIP_OLAND:
1407 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001408 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001409 r = si_set_ip_blocks(adev);
1410 if (r)
1411 return r;
1412 break;
1413#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001414#ifdef CONFIG_DRM_AMDGPU_CIK
1415 case CHIP_BONAIRE:
1416 case CHIP_HAWAII:
1417 case CHIP_KAVERI:
1418 case CHIP_KABINI:
1419 case CHIP_MULLINS:
1420 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1421 adev->family = AMDGPU_FAMILY_CI;
1422 else
1423 adev->family = AMDGPU_FAMILY_KV;
1424
1425 r = cik_set_ip_blocks(adev);
1426 if (r)
1427 return r;
1428 break;
1429#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001430 default:
1431 /* FIXME: not supported yet */
1432 return -EINVAL;
1433 }
1434
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001435 if (amdgpu_sriov_vf(adev)) {
1436 r = amdgpu_virt_request_full_gpu(adev, true);
1437 if (r)
1438 return r;
1439 }
1440
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001441 for (i = 0; i < adev->num_ip_blocks; i++) {
1442 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1443 DRM_ERROR("disabled ip block: %d\n", i);
Alex Deuchera1255102016-10-13 17:41:13 -04001444 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001445 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001446 if (adev->ip_blocks[i].version->funcs->early_init) {
1447 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001448 if (r == -ENOENT) {
Alex Deuchera1255102016-10-13 17:41:13 -04001449 adev->ip_blocks[i].status.valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001450 } else if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001451 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1452 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001453 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001454 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001455 adev->ip_blocks[i].status.valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001456 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001457 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001458 adev->ip_blocks[i].status.valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001459 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001460 }
1461 }
1462
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001463 adev->cg_flags &= amdgpu_cg_mask;
1464 adev->pg_flags &= amdgpu_pg_mask;
1465
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001466 return 0;
1467}
1468
1469static int amdgpu_init(struct amdgpu_device *adev)
1470{
1471 int i, r;
1472
1473 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001474 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001475 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001476 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001477 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001478 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1479 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001480 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001481 }
Alex Deuchera1255102016-10-13 17:41:13 -04001482 adev->ip_blocks[i].status.sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001483 /* need to do gmc hw init early so we can allocate gpu mem */
Alex Deuchera1255102016-10-13 17:41:13 -04001484 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001485 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001486 if (r) {
1487 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001488 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001489 }
Alex Deuchera1255102016-10-13 17:41:13 -04001490 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001491 if (r) {
1492 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001493 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001494 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001495 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001496 if (r) {
1497 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001498 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001499 }
Alex Deuchera1255102016-10-13 17:41:13 -04001500 adev->ip_blocks[i].status.hw = true;
Monk Liu24936642017-01-09 15:54:32 +08001501
1502 /* right after GMC hw init, we create CSA */
1503 if (amdgpu_sriov_vf(adev)) {
1504 r = amdgpu_allocate_static_csa(adev);
1505 if (r) {
1506 DRM_ERROR("allocate CSA failed %d\n", r);
1507 return r;
1508 }
1509 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001510 }
1511 }
1512
1513 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001514 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001515 continue;
1516 /* gmc hw init is done early */
Alex Deuchera1255102016-10-13 17:41:13 -04001517 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001518 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001519 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001520 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001521 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1522 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001523 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001524 }
Alex Deuchera1255102016-10-13 17:41:13 -04001525 adev->ip_blocks[i].status.hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001526 }
1527
1528 return 0;
1529}
1530
1531static int amdgpu_late_init(struct amdgpu_device *adev)
1532{
1533 int i = 0, r;
1534
1535 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001536 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001537 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001538 if (adev->ip_blocks[i].version->funcs->late_init) {
1539 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001540 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001541 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1542 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001543 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001544 }
Alex Deuchera1255102016-10-13 17:41:13 -04001545 adev->ip_blocks[i].status.late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001546 }
Alex Deucher4a446d52016-10-07 14:48:18 -04001547 /* skip CG for VCE/UVD, it's handled specially */
Alex Deuchera1255102016-10-13 17:41:13 -04001548 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1549 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
Alex Deucher4a446d52016-10-07 14:48:18 -04001550 /* enable clockgating to save power */
Alex Deuchera1255102016-10-13 17:41:13 -04001551 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1552 AMD_CG_STATE_GATE);
Alex Deucher4a446d52016-10-07 14:48:18 -04001553 if (r) {
1554 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001555 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher4a446d52016-10-07 14:48:18 -04001556 return r;
1557 }
Arindam Nathb0b00ff2016-10-07 19:01:37 +05301558 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001559 }
1560
Tom St Denisd1aff8e2016-08-09 18:01:55 -04001561 amdgpu_dpm_enable_uvd(adev, false);
1562 amdgpu_dpm_enable_vce(adev, false);
1563
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001564 return 0;
1565}
1566
1567static int amdgpu_fini(struct amdgpu_device *adev)
1568{
1569 int i, r;
1570
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001571 /* need to disable SMC first */
1572 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001573 if (!adev->ip_blocks[i].status.hw)
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001574 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001575 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001576 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
Alex Deuchera1255102016-10-13 17:41:13 -04001577 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1578 AMD_CG_STATE_UNGATE);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001579 if (r) {
1580 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001581 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001582 return r;
1583 }
Alex Deuchera1255102016-10-13 17:41:13 -04001584 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001585 /* XXX handle errors */
1586 if (r) {
1587 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001588 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001589 }
Alex Deuchera1255102016-10-13 17:41:13 -04001590 adev->ip_blocks[i].status.hw = false;
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001591 break;
1592 }
1593 }
1594
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001595 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001596 if (!adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001597 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001598 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001599 amdgpu_wb_fini(adev);
1600 amdgpu_vram_scratch_fini(adev);
1601 }
Rex Zhu8201a672016-11-24 21:44:44 +08001602
1603 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1604 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1605 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1606 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1607 AMD_CG_STATE_UNGATE);
1608 if (r) {
1609 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1610 adev->ip_blocks[i].version->funcs->name, r);
1611 return r;
1612 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001613 }
Rex Zhu8201a672016-11-24 21:44:44 +08001614
Alex Deuchera1255102016-10-13 17:41:13 -04001615 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001616 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001617 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001618 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1619 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001620 }
Rex Zhu8201a672016-11-24 21:44:44 +08001621
Alex Deuchera1255102016-10-13 17:41:13 -04001622 adev->ip_blocks[i].status.hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001623 }
1624
1625 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001626 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001627 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001628 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001629 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001630 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001631 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1632 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001633 }
Alex Deuchera1255102016-10-13 17:41:13 -04001634 adev->ip_blocks[i].status.sw = false;
1635 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001636 }
1637
Monk Liua6dcfd92016-05-19 14:36:34 +08001638 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001639 if (!adev->ip_blocks[i].status.late_initialized)
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001640 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001641 if (adev->ip_blocks[i].version->funcs->late_fini)
1642 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1643 adev->ip_blocks[i].status.late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001644 }
1645
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001646 if (amdgpu_sriov_vf(adev)) {
Monk Liu24936642017-01-09 15:54:32 +08001647 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001648 amdgpu_virt_release_full_gpu(adev, false);
1649 }
Monk Liu24936642017-01-09 15:54:32 +08001650
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001651 return 0;
1652}
1653
Alex Deucherfaefba92016-12-06 10:38:29 -05001654int amdgpu_suspend(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001655{
1656 int i, r;
1657
Xiangliang Yue941ea92017-01-18 12:47:55 +08001658 if (amdgpu_sriov_vf(adev))
1659 amdgpu_virt_request_full_gpu(adev, false);
1660
Flora Cuic5a93a22016-02-26 10:45:25 +08001661 /* ungate SMC block first */
1662 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1663 AMD_CG_STATE_UNGATE);
1664 if (r) {
1665 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1666 }
1667
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001668 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001669 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001670 continue;
1671 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001672 if (i != AMD_IP_BLOCK_TYPE_SMC) {
Alex Deuchera1255102016-10-13 17:41:13 -04001673 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1674 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001675 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001676 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1677 adev->ip_blocks[i].version->funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001678 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001679 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001680 /* XXX handle errors */
Alex Deuchera1255102016-10-13 17:41:13 -04001681 r = adev->ip_blocks[i].version->funcs->suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001682 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001683 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001684 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1685 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001686 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001687 }
1688
Xiangliang Yue941ea92017-01-18 12:47:55 +08001689 if (amdgpu_sriov_vf(adev))
1690 amdgpu_virt_release_full_gpu(adev, false);
1691
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001692 return 0;
1693}
1694
Monk Liue4f0fdc2017-02-09 11:55:49 +08001695static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001696{
1697 int i, r;
1698
1699 for (i = 0; i < adev->num_ip_blocks; i++) {
1700 if (!adev->ip_blocks[i].status.valid)
1701 continue;
1702
1703 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1704 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1705 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
Monk Liue4f0fdc2017-02-09 11:55:49 +08001706 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08001707
1708 if (r) {
1709 DRM_ERROR("resume of IP block <%s> failed %d\n",
1710 adev->ip_blocks[i].version->funcs->name, r);
1711 return r;
1712 }
1713 }
1714
1715 return 0;
1716}
1717
Monk Liue4f0fdc2017-02-09 11:55:49 +08001718static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001719{
1720 int i, r;
1721
1722 for (i = 0; i < adev->num_ip_blocks; i++) {
1723 if (!adev->ip_blocks[i].status.valid)
1724 continue;
1725
1726 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1727 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1728 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1729 continue;
1730
Monk Liue4f0fdc2017-02-09 11:55:49 +08001731 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08001732 if (r) {
1733 DRM_ERROR("resume of IP block <%s> failed %d\n",
1734 adev->ip_blocks[i].version->funcs->name, r);
1735 return r;
1736 }
1737 }
1738
1739 return 0;
1740}
1741
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001742static int amdgpu_resume(struct amdgpu_device *adev)
1743{
1744 int i, r;
1745
1746 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001747 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001748 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001749 r = adev->ip_blocks[i].version->funcs->resume(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001750 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001751 DRM_ERROR("resume of IP block <%s> failed %d\n",
1752 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001753 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001754 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001755 }
1756
1757 return 0;
1758}
1759
Monk Liu4e99a442016-03-31 13:26:59 +08001760static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04001761{
Monk Liu4e99a442016-03-31 13:26:59 +08001762 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
Xiangliang Yu5a5099c2017-01-09 18:06:57 -05001763 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
Andres Rodriguez048765a2016-06-11 02:51:32 -04001764}
1765
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001766/**
1767 * amdgpu_device_init - initialize the driver
1768 *
1769 * @adev: amdgpu_device pointer
1770 * @pdev: drm dev pointer
1771 * @pdev: pci dev pointer
1772 * @flags: driver flags
1773 *
1774 * Initializes the driver info and hw (all asics).
1775 * Returns 0 for success or an error on failure.
1776 * Called at driver startup.
1777 */
1778int amdgpu_device_init(struct amdgpu_device *adev,
1779 struct drm_device *ddev,
1780 struct pci_dev *pdev,
1781 uint32_t flags)
1782{
1783 int r, i;
1784 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02001785 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001786
1787 adev->shutdown = false;
1788 adev->dev = &pdev->dev;
1789 adev->ddev = ddev;
1790 adev->pdev = pdev;
1791 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08001792 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001793 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1794 adev->mc.gtt_size = 512 * 1024 * 1024;
1795 adev->accel_working = false;
1796 adev->num_rings = 0;
1797 adev->mman.buffer_funcs = NULL;
1798 adev->mman.buffer_funcs_ring = NULL;
1799 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01001800 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001801 adev->gart.gart_funcs = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001802 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001803
1804 adev->smc_rreg = &amdgpu_invalid_rreg;
1805 adev->smc_wreg = &amdgpu_invalid_wreg;
1806 adev->pcie_rreg = &amdgpu_invalid_rreg;
1807 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08001808 adev->pciep_rreg = &amdgpu_invalid_rreg;
1809 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001810 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1811 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1812 adev->didt_rreg = &amdgpu_invalid_rreg;
1813 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08001814 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1815 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001816 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1817 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1818
Rex Zhuccdbb202016-06-08 12:47:41 +08001819
Alex Deucher3e39ab92015-06-05 15:04:33 -04001820 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1821 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1822 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001823
1824 /* mutex initialization are all done here so we
1825 * can recall function without having locking issues */
Christian König8d0a7ce2015-11-03 20:58:50 +01001826 mutex_init(&adev->vm_manager.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001827 atomic_set(&adev->irq.ih.lock, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001828 mutex_init(&adev->pm.mutex);
1829 mutex_init(&adev->gfx.gpu_clock_mutex);
1830 mutex_init(&adev->srbm_mutex);
1831 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001832 mutex_init(&adev->mn_lock);
1833 hash_init(adev->mn_hash);
1834
1835 amdgpu_check_arguments(adev);
1836
1837 /* Registers mapping */
1838 /* TODO: block userspace mapping of io register */
1839 spin_lock_init(&adev->mmio_idx_lock);
1840 spin_lock_init(&adev->smc_idx_lock);
1841 spin_lock_init(&adev->pcie_idx_lock);
1842 spin_lock_init(&adev->uvd_ctx_idx_lock);
1843 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08001844 spin_lock_init(&adev->gc_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001845 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02001846 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001847
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08001848 INIT_LIST_HEAD(&adev->shadow_list);
1849 mutex_init(&adev->shadow_list_lock);
1850
Chunming Zhou5c1354b2016-08-30 16:13:10 +08001851 INIT_LIST_HEAD(&adev->gtt_list);
1852 spin_lock_init(&adev->gtt_list_lock);
1853
Ken Wangda69c1612016-01-21 19:08:55 +08001854 if (adev->asic_type >= CHIP_BONAIRE) {
1855 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1856 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1857 } else {
1858 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1859 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1860 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08001861
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001862 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1863 if (adev->rmmio == NULL) {
1864 return -ENOMEM;
1865 }
1866 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1867 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1868
Ken Wangda69c1612016-01-21 19:08:55 +08001869 if (adev->asic_type >= CHIP_BONAIRE)
1870 /* doorbell bar mapping */
1871 amdgpu_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001872
1873 /* io port mapping */
1874 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1875 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1876 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1877 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1878 break;
1879 }
1880 }
1881 if (adev->rio_mem == NULL)
Amber Linb64a18c2017-01-04 08:06:58 -05001882 DRM_INFO("PCI I/O BAR is not found.\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001883
1884 /* early init functions */
1885 r = amdgpu_early_init(adev);
1886 if (r)
1887 return r;
1888
1889 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1890 /* this will fail for cards that aren't VGA class devices, just
1891 * ignore it */
1892 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1893
1894 if (amdgpu_runtime_pm == 1)
1895 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04001896 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001897 runtime = true;
1898 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1899 if (runtime)
1900 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1901
1902 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04001903 if (!amdgpu_get_bios(adev)) {
1904 r = -EINVAL;
1905 goto failed;
1906 }
Nils Wallméniusf7e9e9f2016-12-14 21:52:45 +01001907
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001908 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001909 if (r) {
1910 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001911 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001912 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001913
Monk Liu4e99a442016-03-31 13:26:59 +08001914 /* detect if we are with an SRIOV vbios */
1915 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04001916
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001917 /* Post card if necessary */
Monk Liubec86372016-09-14 19:38:08 +08001918 if (amdgpu_vpost_needed(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001919 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08001920 dev_err(adev->dev, "no vBIOS found\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001921 r = -EINVAL;
1922 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001923 }
Monk Liubec86372016-09-14 19:38:08 +08001924 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08001925 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1926 if (r) {
1927 dev_err(adev->dev, "gpu post error!\n");
1928 goto failed;
1929 }
1930 } else {
1931 DRM_INFO("GPU post is not needed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001932 }
1933
1934 /* Initialize clocks */
1935 r = amdgpu_atombios_get_clock_info(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001936 if (r) {
1937 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001938 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001939 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001940 /* init i2c buses */
1941 amdgpu_atombios_i2c_init(adev);
1942
1943 /* Fence driver */
1944 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001945 if (r) {
1946 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001947 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001948 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001949
1950 /* init the mode config */
1951 drm_mode_config_init(adev->ddev);
1952
1953 r = amdgpu_init(adev);
1954 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05001955 dev_err(adev->dev, "amdgpu_init failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001956 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04001957 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001958 }
1959
1960 adev->accel_working = true;
1961
Marek Olšák95844d22016-08-17 23:49:27 +02001962 /* Initialize the buffer migration limit. */
1963 if (amdgpu_moverate >= 0)
1964 max_MBps = amdgpu_moverate;
1965 else
1966 max_MBps = 8; /* Allow 8 MB/s. */
1967 /* Get a log2 for easy divisions. */
1968 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
1969
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001970 r = amdgpu_ib_pool_init(adev);
1971 if (r) {
1972 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Alex Deucher83ba1262016-06-03 18:21:41 -04001973 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001974 }
1975
1976 r = amdgpu_ib_ring_tests(adev);
1977 if (r)
1978 DRM_ERROR("ib ring test failed (%d).\n", r);
1979
Monk Liu9bc92b92017-02-08 17:38:13 +08001980 amdgpu_fbdev_init(adev);
1981
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001982 r = amdgpu_gem_debugfs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08001983 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001984 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001985
1986 r = amdgpu_debugfs_regs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08001987 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001988 DRM_ERROR("registering register debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001989
Huang Rui50ab2532016-06-12 15:51:09 +08001990 r = amdgpu_debugfs_firmware_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08001991 if (r)
Huang Rui50ab2532016-06-12 15:51:09 +08001992 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
Huang Rui50ab2532016-06-12 15:51:09 +08001993
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001994 if ((amdgpu_testing & 1)) {
1995 if (adev->accel_working)
1996 amdgpu_test_moves(adev);
1997 else
1998 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
1999 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002000 if (amdgpu_benchmarking) {
2001 if (adev->accel_working)
2002 amdgpu_benchmark(adev, amdgpu_benchmarking);
2003 else
2004 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2005 }
2006
2007 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2008 * explicit gating rather than handling it automatically.
2009 */
2010 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002011 if (r) {
2012 dev_err(adev->dev, "amdgpu_late_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002013 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002014 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002015
2016 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04002017
2018failed:
2019 if (runtime)
2020 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2021 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002022}
2023
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002024/**
2025 * amdgpu_device_fini - tear down the driver
2026 *
2027 * @adev: amdgpu_device pointer
2028 *
2029 * Tear down the driver info (all asics).
2030 * Called at driver shutdown.
2031 */
2032void amdgpu_device_fini(struct amdgpu_device *adev)
2033{
2034 int r;
2035
2036 DRM_INFO("amdgpu: finishing device.\n");
2037 adev->shutdown = true;
Grazvydas Ignotasa951ed82016-09-25 23:34:48 +03002038 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002039 /* evict vram memory */
2040 amdgpu_bo_evict_vram(adev);
2041 amdgpu_ib_pool_fini(adev);
2042 amdgpu_fence_driver_fini(adev);
2043 amdgpu_fbdev_fini(adev);
2044 r = amdgpu_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002045 adev->accel_working = false;
2046 /* free i2c buses */
2047 amdgpu_i2c_fini(adev);
2048 amdgpu_atombios_fini(adev);
2049 kfree(adev->bios);
2050 adev->bios = NULL;
2051 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002052 if (adev->flags & AMD_IS_PX)
2053 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002054 vga_client_register(adev->pdev, NULL, NULL, NULL);
2055 if (adev->rio_mem)
2056 pci_iounmap(adev->pdev, adev->rio_mem);
2057 adev->rio_mem = NULL;
2058 iounmap(adev->rmmio);
2059 adev->rmmio = NULL;
Ken Wangda69c1612016-01-21 19:08:55 +08002060 if (adev->asic_type >= CHIP_BONAIRE)
2061 amdgpu_doorbell_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002062 amdgpu_debugfs_regs_cleanup(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002063}
2064
2065
2066/*
2067 * Suspend & resume.
2068 */
2069/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002070 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002071 *
2072 * @pdev: drm dev pointer
2073 * @state: suspend state
2074 *
2075 * Puts the hw in the suspend state (all asics).
2076 * Returns 0 for success or an error on failure.
2077 * Called at driver suspend.
2078 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002079int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002080{
2081 struct amdgpu_device *adev;
2082 struct drm_crtc *crtc;
2083 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002084 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002085
2086 if (dev == NULL || dev->dev_private == NULL) {
2087 return -ENODEV;
2088 }
2089
2090 adev = dev->dev_private;
2091
2092 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2093 return 0;
2094
2095 drm_kms_helper_poll_disable(dev);
2096
2097 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002098 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002099 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2100 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2101 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002102 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002103
Alex Deucher756e6882015-10-08 00:03:36 -04002104 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002105 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04002106 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002107 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2108 struct amdgpu_bo *robj;
2109
Alex Deucher756e6882015-10-08 00:03:36 -04002110 if (amdgpu_crtc->cursor_bo) {
2111 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2112 r = amdgpu_bo_reserve(aobj, false);
2113 if (r == 0) {
2114 amdgpu_bo_unpin(aobj);
2115 amdgpu_bo_unreserve(aobj);
2116 }
2117 }
2118
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002119 if (rfb == NULL || rfb->obj == NULL) {
2120 continue;
2121 }
2122 robj = gem_to_amdgpu_bo(rfb->obj);
2123 /* don't unpin kernel fb objects */
2124 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2125 r = amdgpu_bo_reserve(robj, false);
2126 if (r == 0) {
2127 amdgpu_bo_unpin(robj);
2128 amdgpu_bo_unreserve(robj);
2129 }
2130 }
2131 }
2132 /* evict vram memory */
2133 amdgpu_bo_evict_vram(adev);
2134
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002135 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002136
2137 r = amdgpu_suspend(adev);
2138
Alex Deuchera0a71e42016-10-10 12:41:36 -04002139 /* evict remaining vram memory
2140 * This second call to evict vram is to evict the gart page table
2141 * using the CPU.
2142 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002143 amdgpu_bo_evict_vram(adev);
2144
Alex Deuchere695e772016-10-19 14:40:58 -04002145 amdgpu_atombios_scratch_regs_save(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002146 pci_save_state(dev->pdev);
2147 if (suspend) {
2148 /* Shut down the device */
2149 pci_disable_device(dev->pdev);
2150 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08002151 } else {
2152 r = amdgpu_asic_reset(adev);
2153 if (r)
2154 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002155 }
2156
2157 if (fbcon) {
2158 console_lock();
2159 amdgpu_fbdev_set_suspend(adev, 1);
2160 console_unlock();
2161 }
2162 return 0;
2163}
2164
2165/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002166 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002167 *
2168 * @pdev: drm dev pointer
2169 *
2170 * Bring the hw back to operating state (all asics).
2171 * Returns 0 for success or an error on failure.
2172 * Called at driver resume.
2173 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002174int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002175{
2176 struct drm_connector *connector;
2177 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04002178 struct drm_crtc *crtc;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002179 int r;
2180
2181 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2182 return 0;
2183
jimqu74b0b152016-09-07 17:09:12 +08002184 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002185 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08002186
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002187 if (resume) {
2188 pci_set_power_state(dev->pdev, PCI_D0);
2189 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08002190 r = pci_enable_device(dev->pdev);
2191 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002192 if (fbcon)
2193 console_unlock();
jimqu74b0b152016-09-07 17:09:12 +08002194 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002195 }
2196 }
Alex Deuchere695e772016-10-19 14:40:58 -04002197 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002198
2199 /* post card */
Jim Quc836fec2017-02-10 15:59:59 +08002200 if (amdgpu_need_post(adev)) {
jimqu74b0b152016-09-07 17:09:12 +08002201 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2202 if (r)
2203 DRM_ERROR("amdgpu asic init failed\n");
2204 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002205
2206 r = amdgpu_resume(adev);
Flora Cuica198522016-02-04 15:10:08 +08002207 if (r)
2208 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002209
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002210 amdgpu_fence_driver_resume(adev);
2211
Flora Cuica198522016-02-04 15:10:08 +08002212 if (resume) {
2213 r = amdgpu_ib_ring_tests(adev);
2214 if (r)
2215 DRM_ERROR("ib ring test failed (%d).\n", r);
2216 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002217
2218 r = amdgpu_late_init(adev);
Jim Quc085bd52017-03-01 15:53:29 +08002219 if (r) {
2220 if (fbcon)
2221 console_unlock();
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002222 return r;
Jim Quc085bd52017-03-01 15:53:29 +08002223 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002224
Alex Deucher756e6882015-10-08 00:03:36 -04002225 /* pin cursors */
2226 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2227 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2228
2229 if (amdgpu_crtc->cursor_bo) {
2230 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2231 r = amdgpu_bo_reserve(aobj, false);
2232 if (r == 0) {
2233 r = amdgpu_bo_pin(aobj,
2234 AMDGPU_GEM_DOMAIN_VRAM,
2235 &amdgpu_crtc->cursor_addr);
2236 if (r != 0)
2237 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2238 amdgpu_bo_unreserve(aobj);
2239 }
2240 }
2241 }
2242
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002243 /* blat the mode back in */
2244 if (fbcon) {
2245 drm_helper_resume_force_mode(dev);
2246 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002247 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002248 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2249 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2250 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002251 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002252 }
2253
2254 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002255
2256 /*
2257 * Most of the connector probing functions try to acquire runtime pm
2258 * refs to ensure that the GPU is powered on when connector polling is
2259 * performed. Since we're calling this from a runtime PM callback,
2260 * trying to acquire rpm refs will cause us to deadlock.
2261 *
2262 * Since we're guaranteed to be holding the rpm lock, it's safe to
2263 * temporarily disable the rpm helpers so this doesn't deadlock us.
2264 */
2265#ifdef CONFIG_PM
2266 dev->dev->power.disable_depth++;
2267#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05002268 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002269#ifdef CONFIG_PM
2270 dev->dev->power.disable_depth--;
2271#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002272
2273 if (fbcon) {
2274 amdgpu_fbdev_set_suspend(adev, 0);
2275 console_unlock();
2276 }
2277
2278 return 0;
2279}
2280
Chunming Zhou63fbf422016-07-15 11:19:20 +08002281static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2282{
2283 int i;
2284 bool asic_hang = false;
2285
2286 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002287 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002288 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002289 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2290 adev->ip_blocks[i].status.hang =
2291 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2292 if (adev->ip_blocks[i].status.hang) {
2293 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
Chunming Zhou63fbf422016-07-15 11:19:20 +08002294 asic_hang = true;
2295 }
2296 }
2297 return asic_hang;
2298}
2299
Baoyou Xie4d446652016-09-18 22:09:35 +08002300static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002301{
2302 int i, r = 0;
2303
2304 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002305 if (!adev->ip_blocks[i].status.valid)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002306 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002307 if (adev->ip_blocks[i].status.hang &&
2308 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2309 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
Chunming Zhoud31a5012016-07-18 10:04:34 +08002310 if (r)
2311 return r;
2312 }
2313 }
2314
2315 return 0;
2316}
2317
Chunming Zhou35d782f2016-07-15 15:57:13 +08002318static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2319{
Alex Deucherda146d32016-10-13 16:07:03 -04002320 int i;
2321
2322 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002323 if (!adev->ip_blocks[i].status.valid)
Alex Deucherda146d32016-10-13 16:07:03 -04002324 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002325 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2326 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2327 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2328 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2329 if (adev->ip_blocks[i].status.hang) {
Alex Deucherda146d32016-10-13 16:07:03 -04002330 DRM_INFO("Some block need full reset!\n");
2331 return true;
2332 }
2333 }
Chunming Zhou35d782f2016-07-15 15:57:13 +08002334 }
2335 return false;
2336}
2337
2338static int amdgpu_soft_reset(struct amdgpu_device *adev)
2339{
2340 int i, r = 0;
2341
2342 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002343 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002344 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002345 if (adev->ip_blocks[i].status.hang &&
2346 adev->ip_blocks[i].version->funcs->soft_reset) {
2347 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002348 if (r)
2349 return r;
2350 }
2351 }
2352
2353 return 0;
2354}
2355
2356static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2357{
2358 int i, r = 0;
2359
2360 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002361 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002362 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002363 if (adev->ip_blocks[i].status.hang &&
2364 adev->ip_blocks[i].version->funcs->post_soft_reset)
2365 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002366 if (r)
2367 return r;
2368 }
2369
2370 return 0;
2371}
2372
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002373bool amdgpu_need_backup(struct amdgpu_device *adev)
2374{
2375 if (adev->flags & AMD_IS_APU)
2376 return false;
2377
2378 return amdgpu_lockup_timeout > 0 ? true : false;
2379}
2380
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002381static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2382 struct amdgpu_ring *ring,
2383 struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +01002384 struct dma_fence **fence)
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002385{
2386 uint32_t domain;
2387 int r;
2388
2389 if (!bo->shadow)
2390 return 0;
2391
2392 r = amdgpu_bo_reserve(bo, false);
2393 if (r)
2394 return r;
2395 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2396 /* if bo has been evicted, then no need to recover */
2397 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2398 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2399 NULL, fence, true);
2400 if (r) {
2401 DRM_ERROR("recover page table failed!\n");
2402 goto err;
2403 }
2404 }
2405err:
2406 amdgpu_bo_unreserve(bo);
2407 return r;
2408}
2409
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002410/**
Monk Liua90ad3c2017-01-23 14:22:08 +08002411 * amdgpu_sriov_gpu_reset - reset the asic
2412 *
2413 * @adev: amdgpu device pointer
2414 * @voluntary: if this reset is requested by guest.
2415 * (true means by guest and false means by HYPERVISOR )
2416 *
2417 * Attempt the reset the GPU if it has hung (all asics).
2418 * for SRIOV case.
2419 * Returns 0 for success or an error on failure.
2420 */
2421int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
2422{
2423 int i, r = 0;
2424 int resched;
2425 struct amdgpu_bo *bo, *tmp;
2426 struct amdgpu_ring *ring;
2427 struct dma_fence *fence = NULL, *next = NULL;
2428
Monk Liu147b5982017-01-25 15:48:01 +08002429 mutex_lock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002430 atomic_inc(&adev->gpu_reset_counter);
Monk Liu1fb37a32017-01-26 15:36:37 +08002431 adev->gfx.in_reset = true;
Monk Liua90ad3c2017-01-23 14:22:08 +08002432
2433 /* block TTM */
2434 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2435
2436 /* block scheduler */
2437 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2438 ring = adev->rings[i];
2439
2440 if (!ring || !ring->sched.thread)
2441 continue;
2442
2443 kthread_park(ring->sched.thread);
2444 amd_sched_hw_job_reset(&ring->sched);
2445 }
2446
2447 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2448 amdgpu_fence_driver_force_completion(adev);
2449
2450 /* request to take full control of GPU before re-initialization */
2451 if (voluntary)
2452 amdgpu_virt_reset_gpu(adev);
2453 else
2454 amdgpu_virt_request_full_gpu(adev, true);
2455
2456
2457 /* Resume IP prior to SMC */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002458 amdgpu_sriov_reinit_early(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002459
2460 /* we need recover gart prior to run SMC/CP/SDMA resume */
2461 amdgpu_ttm_recover_gart(adev);
2462
2463 /* now we are okay to resume SMC/CP/SDMA */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002464 amdgpu_sriov_reinit_late(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002465
2466 amdgpu_irq_gpu_reset_resume_helper(adev);
2467
2468 if (amdgpu_ib_ring_tests(adev))
2469 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2470
2471 /* release full control of GPU after ib test */
2472 amdgpu_virt_release_full_gpu(adev, true);
2473
2474 DRM_INFO("recover vram bo from shadow\n");
2475
2476 ring = adev->mman.buffer_funcs_ring;
2477 mutex_lock(&adev->shadow_list_lock);
2478 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2479 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2480 if (fence) {
2481 r = dma_fence_wait(fence, false);
2482 if (r) {
2483 WARN(r, "recovery from shadow isn't completed\n");
2484 break;
2485 }
2486 }
2487
2488 dma_fence_put(fence);
2489 fence = next;
2490 }
2491 mutex_unlock(&adev->shadow_list_lock);
2492
2493 if (fence) {
2494 r = dma_fence_wait(fence, false);
2495 if (r)
2496 WARN(r, "recovery from shadow isn't completed\n");
2497 }
2498 dma_fence_put(fence);
2499
2500 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2501 struct amdgpu_ring *ring = adev->rings[i];
2502 if (!ring || !ring->sched.thread)
2503 continue;
2504
2505 amd_sched_job_recovery(&ring->sched);
2506 kthread_unpark(ring->sched.thread);
2507 }
2508
2509 drm_helper_resume_force_mode(adev->ddev);
2510 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2511 if (r) {
2512 /* bad news, how to tell it to userspace ? */
2513 dev_info(adev->dev, "GPU reset failed\n");
2514 }
2515
Monk Liu1fb37a32017-01-26 15:36:37 +08002516 adev->gfx.in_reset = false;
Monk Liu147b5982017-01-25 15:48:01 +08002517 mutex_unlock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002518 return r;
2519}
2520
2521/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002522 * amdgpu_gpu_reset - reset the asic
2523 *
2524 * @adev: amdgpu device pointer
2525 *
2526 * Attempt the reset the GPU if it has hung (all asics).
2527 * Returns 0 for success or an error on failure.
2528 */
2529int amdgpu_gpu_reset(struct amdgpu_device *adev)
2530{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002531 int i, r;
2532 int resched;
Chunming Zhou35d782f2016-07-15 15:57:13 +08002533 bool need_full_reset;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002534
Xiangliang Yufb140b22016-12-17 22:48:57 +08002535 if (amdgpu_sriov_vf(adev))
Monk Liua90ad3c2017-01-23 14:22:08 +08002536 return amdgpu_sriov_gpu_reset(adev, true);
Xiangliang Yufb140b22016-12-17 22:48:57 +08002537
Chunming Zhou63fbf422016-07-15 11:19:20 +08002538 if (!amdgpu_check_soft_reset(adev)) {
2539 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2540 return 0;
2541 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002542
Marek Olšákd94aed52015-05-05 21:13:49 +02002543 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002544
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002545 /* block TTM */
2546 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2547
Chunming Zhou0875dc92016-06-12 15:41:58 +08002548 /* block scheduler */
2549 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2550 struct amdgpu_ring *ring = adev->rings[i];
2551
2552 if (!ring)
2553 continue;
2554 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002555 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002556 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002557 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2558 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002559
Chunming Zhou35d782f2016-07-15 15:57:13 +08002560 need_full_reset = amdgpu_need_full_reset(adev);
2561
2562 if (!need_full_reset) {
2563 amdgpu_pre_soft_reset(adev);
2564 r = amdgpu_soft_reset(adev);
2565 amdgpu_post_soft_reset(adev);
2566 if (r || amdgpu_check_soft_reset(adev)) {
2567 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2568 need_full_reset = true;
2569 }
2570 }
2571
2572 if (need_full_reset) {
Chunming Zhou35d782f2016-07-15 15:57:13 +08002573 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002574
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002575retry:
Chunming Zhou35d782f2016-07-15 15:57:13 +08002576 /* Disable fb access */
2577 if (adev->mode_info.num_crtc) {
2578 struct amdgpu_mode_mc_save save;
2579 amdgpu_display_stop_mc_access(adev, &save);
2580 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2581 }
Alex Deuchere695e772016-10-19 14:40:58 -04002582 amdgpu_atombios_scratch_regs_save(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002583 r = amdgpu_asic_reset(adev);
Alex Deuchere695e772016-10-19 14:40:58 -04002584 amdgpu_atombios_scratch_regs_restore(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002585 /* post card */
2586 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002587
Chunming Zhou35d782f2016-07-15 15:57:13 +08002588 if (!r) {
2589 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2590 r = amdgpu_resume(adev);
2591 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002592 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002593 if (!r) {
Chunming Zhoue72cfd52016-07-27 13:15:20 +08002594 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002595 if (need_full_reset && amdgpu_need_backup(adev)) {
2596 r = amdgpu_ttm_recover_gart(adev);
2597 if (r)
2598 DRM_ERROR("gart recovery failed!!!\n");
2599 }
Chunming Zhou1f465082016-06-30 15:02:26 +08002600 r = amdgpu_ib_ring_tests(adev);
2601 if (r) {
2602 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002603 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002604 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002605 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002606 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002607 /**
2608 * recovery vm page tables, since we cannot depend on VRAM is
2609 * consistent after gpu full reset.
2610 */
2611 if (need_full_reset && amdgpu_need_backup(adev)) {
2612 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2613 struct amdgpu_bo *bo, *tmp;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002614 struct dma_fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002615
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002616 DRM_INFO("recover vram bo from shadow\n");
2617 mutex_lock(&adev->shadow_list_lock);
2618 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2619 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2620 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002621 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002622 if (r) {
Monk Liu1d7b17b2017-01-22 18:52:56 +08002623 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002624 break;
2625 }
2626 }
2627
Chris Wilsonf54d1862016-10-25 13:00:45 +01002628 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002629 fence = next;
2630 }
2631 mutex_unlock(&adev->shadow_list_lock);
2632 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002633 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002634 if (r)
Monk Liu1d7b17b2017-01-22 18:52:56 +08002635 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002636 }
Chris Wilsonf54d1862016-10-25 13:00:45 +01002637 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002638 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002639 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2640 struct amdgpu_ring *ring = adev->rings[i];
2641 if (!ring)
2642 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002643
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002644 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002645 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002646 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002647 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08002648 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002649 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08002650 if (adev->rings[i]) {
2651 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002652 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002653 }
2654 }
2655
2656 drm_helper_resume_force_mode(adev->ddev);
2657
2658 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2659 if (r) {
2660 /* bad news, how to tell it to userspace ? */
2661 dev_info(adev->dev, "GPU reset failed\n");
2662 }
2663
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002664 return r;
2665}
2666
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002667void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2668{
2669 u32 mask;
2670 int ret;
2671
Alex Deuchercd474ba2016-02-04 10:21:23 -05002672 if (amdgpu_pcie_gen_cap)
2673 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2674
2675 if (amdgpu_pcie_lane_cap)
2676 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2677
2678 /* covers APUs as well */
2679 if (pci_is_root_bus(adev->pdev->bus)) {
2680 if (adev->pm.pcie_gen_mask == 0)
2681 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2682 if (adev->pm.pcie_mlw_mask == 0)
2683 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002684 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002685 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05002686
2687 if (adev->pm.pcie_gen_mask == 0) {
2688 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2689 if (!ret) {
2690 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2691 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2692 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2693
2694 if (mask & DRM_PCIE_SPEED_25)
2695 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2696 if (mask & DRM_PCIE_SPEED_50)
2697 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2698 if (mask & DRM_PCIE_SPEED_80)
2699 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2700 } else {
2701 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2702 }
2703 }
2704 if (adev->pm.pcie_mlw_mask == 0) {
2705 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2706 if (!ret) {
2707 switch (mask) {
2708 case 32:
2709 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2710 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2711 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2712 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2713 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2714 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2715 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2716 break;
2717 case 16:
2718 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2719 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2720 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2721 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2722 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2723 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2724 break;
2725 case 12:
2726 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2727 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2728 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2729 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2730 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2731 break;
2732 case 8:
2733 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2734 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2735 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2736 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2737 break;
2738 case 4:
2739 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2740 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2741 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2742 break;
2743 case 2:
2744 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2745 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2746 break;
2747 case 1:
2748 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2749 break;
2750 default:
2751 break;
2752 }
2753 } else {
2754 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002755 }
2756 }
2757}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002758
2759/*
2760 * Debugfs
2761 */
2762int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04002763 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002764 unsigned nfiles)
2765{
2766 unsigned i;
2767
2768 for (i = 0; i < adev->debugfs_count; i++) {
2769 if (adev->debugfs[i].files == files) {
2770 /* Already registered */
2771 return 0;
2772 }
2773 }
2774
2775 i = adev->debugfs_count + 1;
2776 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2777 DRM_ERROR("Reached maximum number of debugfs components.\n");
2778 DRM_ERROR("Report so we increase "
2779 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2780 return -EINVAL;
2781 }
2782 adev->debugfs[adev->debugfs_count].files = files;
2783 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2784 adev->debugfs_count = i;
2785#if defined(CONFIG_DEBUG_FS)
2786 drm_debugfs_create_files(files, nfiles,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002787 adev->ddev->primary->debugfs_root,
2788 adev->ddev->primary);
2789#endif
2790 return 0;
2791}
2792
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002793#if defined(CONFIG_DEBUG_FS)
2794
2795static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2796 size_t size, loff_t *pos)
2797{
Al Viro45063092016-12-04 18:24:56 -05002798 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002799 ssize_t result = 0;
2800 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04002801 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04002802 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002803
2804 if (size & 0x3 || *pos & 0x3)
2805 return -EINVAL;
2806
Tom St Denisbd122672016-07-28 09:39:22 -04002807 /* are we reading registers for which a PG lock is necessary? */
2808 pm_pg_lock = (*pos >> 23) & 1;
2809
Tom St Denis566281592016-06-27 11:55:07 -04002810 if (*pos & (1ULL << 62)) {
2811 se_bank = (*pos >> 24) & 0x3FF;
2812 sh_bank = (*pos >> 34) & 0x3FF;
2813 instance_bank = (*pos >> 44) & 0x3FF;
Tom St Denis32977f92016-10-09 07:41:26 -04002814
2815 if (se_bank == 0x3FF)
2816 se_bank = 0xFFFFFFFF;
2817 if (sh_bank == 0x3FF)
2818 sh_bank = 0xFFFFFFFF;
2819 if (instance_bank == 0x3FF)
2820 instance_bank = 0xFFFFFFFF;
Tom St Denis566281592016-06-27 11:55:07 -04002821 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04002822 } else {
2823 use_bank = 0;
2824 }
2825
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04002826 *pos &= (1UL << 22) - 1;
Tom St Denisbd122672016-07-28 09:39:22 -04002827
Tom St Denis566281592016-06-27 11:55:07 -04002828 if (use_bank) {
Tom St Denis32977f92016-10-09 07:41:26 -04002829 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2830 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
Tom St Denis566281592016-06-27 11:55:07 -04002831 return -EINVAL;
2832 mutex_lock(&adev->grbm_idx_mutex);
2833 amdgpu_gfx_select_se_sh(adev, se_bank,
2834 sh_bank, instance_bank);
2835 }
2836
Tom St Denisbd122672016-07-28 09:39:22 -04002837 if (pm_pg_lock)
2838 mutex_lock(&adev->pm.mutex);
2839
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002840 while (size) {
2841 uint32_t value;
2842
2843 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04002844 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002845
2846 value = RREG32(*pos >> 2);
2847 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04002848 if (r) {
2849 result = r;
2850 goto end;
2851 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002852
2853 result += 4;
2854 buf += 4;
2855 *pos += 4;
2856 size -= 4;
2857 }
2858
Tom St Denis566281592016-06-27 11:55:07 -04002859end:
2860 if (use_bank) {
2861 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2862 mutex_unlock(&adev->grbm_idx_mutex);
2863 }
2864
Tom St Denisbd122672016-07-28 09:39:22 -04002865 if (pm_pg_lock)
2866 mutex_unlock(&adev->pm.mutex);
2867
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002868 return result;
2869}
2870
2871static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2872 size_t size, loff_t *pos)
2873{
Al Viro45063092016-12-04 18:24:56 -05002874 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002875 ssize_t result = 0;
2876 int r;
Tom St Denis394fdde2016-10-10 07:31:23 -04002877 bool pm_pg_lock, use_bank;
2878 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002879
2880 if (size & 0x3 || *pos & 0x3)
2881 return -EINVAL;
2882
Tom St Denis394fdde2016-10-10 07:31:23 -04002883 /* are we reading registers for which a PG lock is necessary? */
2884 pm_pg_lock = (*pos >> 23) & 1;
2885
2886 if (*pos & (1ULL << 62)) {
2887 se_bank = (*pos >> 24) & 0x3FF;
2888 sh_bank = (*pos >> 34) & 0x3FF;
2889 instance_bank = (*pos >> 44) & 0x3FF;
2890
2891 if (se_bank == 0x3FF)
2892 se_bank = 0xFFFFFFFF;
2893 if (sh_bank == 0x3FF)
2894 sh_bank = 0xFFFFFFFF;
2895 if (instance_bank == 0x3FF)
2896 instance_bank = 0xFFFFFFFF;
2897 use_bank = 1;
2898 } else {
2899 use_bank = 0;
2900 }
2901
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04002902 *pos &= (1UL << 22) - 1;
Tom St Denis394fdde2016-10-10 07:31:23 -04002903
2904 if (use_bank) {
2905 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2906 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2907 return -EINVAL;
2908 mutex_lock(&adev->grbm_idx_mutex);
2909 amdgpu_gfx_select_se_sh(adev, se_bank,
2910 sh_bank, instance_bank);
2911 }
2912
2913 if (pm_pg_lock)
2914 mutex_lock(&adev->pm.mutex);
2915
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002916 while (size) {
2917 uint32_t value;
2918
2919 if (*pos > adev->rmmio_size)
2920 return result;
2921
2922 r = get_user(value, (uint32_t *)buf);
2923 if (r)
2924 return r;
2925
2926 WREG32(*pos >> 2, value);
2927
2928 result += 4;
2929 buf += 4;
2930 *pos += 4;
2931 size -= 4;
2932 }
2933
Tom St Denis394fdde2016-10-10 07:31:23 -04002934 if (use_bank) {
2935 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2936 mutex_unlock(&adev->grbm_idx_mutex);
2937 }
2938
2939 if (pm_pg_lock)
2940 mutex_unlock(&adev->pm.mutex);
2941
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002942 return result;
2943}
2944
Tom St Denisadcec282016-04-15 13:08:44 -04002945static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
2946 size_t size, loff_t *pos)
2947{
Al Viro45063092016-12-04 18:24:56 -05002948 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04002949 ssize_t result = 0;
2950 int r;
2951
2952 if (size & 0x3 || *pos & 0x3)
2953 return -EINVAL;
2954
2955 while (size) {
2956 uint32_t value;
2957
2958 value = RREG32_PCIE(*pos >> 2);
2959 r = put_user(value, (uint32_t *)buf);
2960 if (r)
2961 return r;
2962
2963 result += 4;
2964 buf += 4;
2965 *pos += 4;
2966 size -= 4;
2967 }
2968
2969 return result;
2970}
2971
2972static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
2973 size_t size, loff_t *pos)
2974{
Al Viro45063092016-12-04 18:24:56 -05002975 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04002976 ssize_t result = 0;
2977 int r;
2978
2979 if (size & 0x3 || *pos & 0x3)
2980 return -EINVAL;
2981
2982 while (size) {
2983 uint32_t value;
2984
2985 r = get_user(value, (uint32_t *)buf);
2986 if (r)
2987 return r;
2988
2989 WREG32_PCIE(*pos >> 2, value);
2990
2991 result += 4;
2992 buf += 4;
2993 *pos += 4;
2994 size -= 4;
2995 }
2996
2997 return result;
2998}
2999
3000static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3001 size_t size, loff_t *pos)
3002{
Al Viro45063092016-12-04 18:24:56 -05003003 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003004 ssize_t result = 0;
3005 int r;
3006
3007 if (size & 0x3 || *pos & 0x3)
3008 return -EINVAL;
3009
3010 while (size) {
3011 uint32_t value;
3012
3013 value = RREG32_DIDT(*pos >> 2);
3014 r = put_user(value, (uint32_t *)buf);
3015 if (r)
3016 return r;
3017
3018 result += 4;
3019 buf += 4;
3020 *pos += 4;
3021 size -= 4;
3022 }
3023
3024 return result;
3025}
3026
3027static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3028 size_t size, loff_t *pos)
3029{
Al Viro45063092016-12-04 18:24:56 -05003030 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003031 ssize_t result = 0;
3032 int r;
3033
3034 if (size & 0x3 || *pos & 0x3)
3035 return -EINVAL;
3036
3037 while (size) {
3038 uint32_t value;
3039
3040 r = get_user(value, (uint32_t *)buf);
3041 if (r)
3042 return r;
3043
3044 WREG32_DIDT(*pos >> 2, value);
3045
3046 result += 4;
3047 buf += 4;
3048 *pos += 4;
3049 size -= 4;
3050 }
3051
3052 return result;
3053}
3054
3055static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3056 size_t size, loff_t *pos)
3057{
Al Viro45063092016-12-04 18:24:56 -05003058 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003059 ssize_t result = 0;
3060 int r;
3061
3062 if (size & 0x3 || *pos & 0x3)
3063 return -EINVAL;
3064
3065 while (size) {
3066 uint32_t value;
3067
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003068 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04003069 r = put_user(value, (uint32_t *)buf);
3070 if (r)
3071 return r;
3072
3073 result += 4;
3074 buf += 4;
3075 *pos += 4;
3076 size -= 4;
3077 }
3078
3079 return result;
3080}
3081
3082static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3083 size_t size, loff_t *pos)
3084{
Al Viro45063092016-12-04 18:24:56 -05003085 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003086 ssize_t result = 0;
3087 int r;
3088
3089 if (size & 0x3 || *pos & 0x3)
3090 return -EINVAL;
3091
3092 while (size) {
3093 uint32_t value;
3094
3095 r = get_user(value, (uint32_t *)buf);
3096 if (r)
3097 return r;
3098
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003099 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04003100
3101 result += 4;
3102 buf += 4;
3103 *pos += 4;
3104 size -= 4;
3105 }
3106
3107 return result;
3108}
3109
Tom St Denis1e051412016-06-27 09:57:18 -04003110static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3111 size_t size, loff_t *pos)
3112{
Al Viro45063092016-12-04 18:24:56 -05003113 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis1e051412016-06-27 09:57:18 -04003114 ssize_t result = 0;
3115 int r;
3116 uint32_t *config, no_regs = 0;
3117
3118 if (size & 0x3 || *pos & 0x3)
3119 return -EINVAL;
3120
Markus Elfringecab7662016-09-18 17:00:52 +02003121 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
Tom St Denis1e051412016-06-27 09:57:18 -04003122 if (!config)
3123 return -ENOMEM;
3124
3125 /* version, increment each time something is added */
Tom St Denis9a999352017-01-18 13:01:25 -05003126 config[no_regs++] = 3;
Tom St Denis1e051412016-06-27 09:57:18 -04003127 config[no_regs++] = adev->gfx.config.max_shader_engines;
3128 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3129 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3130 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3131 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3132 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3133 config[no_regs++] = adev->gfx.config.max_gprs;
3134 config[no_regs++] = adev->gfx.config.max_gs_threads;
3135 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3136 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3137 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3138 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3139 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3140 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3141 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3142 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3143 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3144 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3145 config[no_regs++] = adev->gfx.config.num_gpus;
3146 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3147 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3148 config[no_regs++] = adev->gfx.config.gb_addr_config;
3149 config[no_regs++] = adev->gfx.config.num_rbs;
3150
Tom St Denis89a8f302016-08-12 15:14:31 -04003151 /* rev==1 */
3152 config[no_regs++] = adev->rev_id;
3153 config[no_regs++] = adev->pg_flags;
3154 config[no_regs++] = adev->cg_flags;
3155
Tom St Denise9f11dc2016-08-17 12:00:51 -04003156 /* rev==2 */
3157 config[no_regs++] = adev->family;
3158 config[no_regs++] = adev->external_rev_id;
3159
Tom St Denis9a999352017-01-18 13:01:25 -05003160 /* rev==3 */
3161 config[no_regs++] = adev->pdev->device;
3162 config[no_regs++] = adev->pdev->revision;
3163 config[no_regs++] = adev->pdev->subsystem_device;
3164 config[no_regs++] = adev->pdev->subsystem_vendor;
3165
Tom St Denis1e051412016-06-27 09:57:18 -04003166 while (size && (*pos < no_regs * 4)) {
3167 uint32_t value;
3168
3169 value = config[*pos >> 2];
3170 r = put_user(value, (uint32_t *)buf);
3171 if (r) {
3172 kfree(config);
3173 return r;
3174 }
3175
3176 result += 4;
3177 buf += 4;
3178 *pos += 4;
3179 size -= 4;
3180 }
3181
3182 kfree(config);
3183 return result;
3184}
3185
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003186static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3187 size_t size, loff_t *pos)
3188{
Al Viro45063092016-12-04 18:24:56 -05003189 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003190 int idx, x, outsize, r, valuesize;
3191 uint32_t values[16];
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003192
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003193 if (size & 3 || *pos & 0x3)
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003194 return -EINVAL;
3195
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003196 if (amdgpu_dpm == 0)
3197 return -EINVAL;
3198
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003199 /* convert offset to sensor number */
3200 idx = *pos >> 2;
3201
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003202 valuesize = sizeof(values);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003203 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003204 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003205 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3206 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3207 &valuesize);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003208 else
3209 return -EINVAL;
3210
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003211 if (size > valuesize)
3212 return -EINVAL;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003213
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003214 outsize = 0;
3215 x = 0;
3216 if (!r) {
3217 while (size) {
3218 r = put_user(values[x++], (int32_t *)buf);
3219 buf += 4;
3220 size -= 4;
3221 outsize += 4;
3222 }
3223 }
3224
3225 return !r ? outsize : r;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003226}
Tom St Denis1e051412016-06-27 09:57:18 -04003227
Tom St Denis273d7aa2016-10-11 14:48:55 -04003228static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3229 size_t size, loff_t *pos)
3230{
3231 struct amdgpu_device *adev = f->f_inode->i_private;
3232 int r, x;
3233 ssize_t result=0;
Tom St Denis472259f2016-10-14 09:49:09 -04003234 uint32_t offset, se, sh, cu, wave, simd, data[32];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003235
3236 if (size & 3 || *pos & 3)
3237 return -EINVAL;
3238
3239 /* decode offset */
3240 offset = (*pos & 0x7F);
3241 se = ((*pos >> 7) & 0xFF);
3242 sh = ((*pos >> 15) & 0xFF);
3243 cu = ((*pos >> 23) & 0xFF);
3244 wave = ((*pos >> 31) & 0xFF);
3245 simd = ((*pos >> 37) & 0xFF);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003246
3247 /* switch to the specific se/sh/cu */
3248 mutex_lock(&adev->grbm_idx_mutex);
3249 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3250
3251 x = 0;
Tom St Denis472259f2016-10-14 09:49:09 -04003252 if (adev->gfx.funcs->read_wave_data)
3253 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003254
3255 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3256 mutex_unlock(&adev->grbm_idx_mutex);
3257
Tom St Denis5ecfb3b2016-10-13 12:15:03 -04003258 if (!x)
3259 return -EINVAL;
3260
Tom St Denis472259f2016-10-14 09:49:09 -04003261 while (size && (offset < x * 4)) {
Tom St Denis273d7aa2016-10-11 14:48:55 -04003262 uint32_t value;
3263
Tom St Denis472259f2016-10-14 09:49:09 -04003264 value = data[offset >> 2];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003265 r = put_user(value, (uint32_t *)buf);
3266 if (r)
3267 return r;
3268
3269 result += 4;
3270 buf += 4;
Tom St Denis472259f2016-10-14 09:49:09 -04003271 offset += 4;
Tom St Denis273d7aa2016-10-11 14:48:55 -04003272 size -= 4;
3273 }
3274
3275 return result;
3276}
3277
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003278static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3279 size_t size, loff_t *pos)
3280{
3281 struct amdgpu_device *adev = f->f_inode->i_private;
3282 int r;
3283 ssize_t result = 0;
3284 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3285
3286 if (size & 3 || *pos & 3)
3287 return -EINVAL;
3288
3289 /* decode offset */
3290 offset = (*pos & 0xFFF); /* in dwords */
3291 se = ((*pos >> 12) & 0xFF);
3292 sh = ((*pos >> 20) & 0xFF);
3293 cu = ((*pos >> 28) & 0xFF);
3294 wave = ((*pos >> 36) & 0xFF);
3295 simd = ((*pos >> 44) & 0xFF);
3296 thread = ((*pos >> 52) & 0xFF);
3297 bank = ((*pos >> 60) & 1);
3298
3299 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3300 if (!data)
3301 return -ENOMEM;
3302
3303 /* switch to the specific se/sh/cu */
3304 mutex_lock(&adev->grbm_idx_mutex);
3305 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3306
3307 if (bank == 0) {
3308 if (adev->gfx.funcs->read_wave_vgprs)
3309 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3310 } else {
3311 if (adev->gfx.funcs->read_wave_sgprs)
3312 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3313 }
3314
3315 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3316 mutex_unlock(&adev->grbm_idx_mutex);
3317
3318 while (size) {
3319 uint32_t value;
3320
3321 value = data[offset++];
3322 r = put_user(value, (uint32_t *)buf);
3323 if (r) {
3324 result = r;
3325 goto err;
3326 }
3327
3328 result += 4;
3329 buf += 4;
3330 size -= 4;
3331 }
3332
3333err:
3334 kfree(data);
3335 return result;
3336}
3337
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003338static const struct file_operations amdgpu_debugfs_regs_fops = {
3339 .owner = THIS_MODULE,
3340 .read = amdgpu_debugfs_regs_read,
3341 .write = amdgpu_debugfs_regs_write,
3342 .llseek = default_llseek
3343};
Tom St Denisadcec282016-04-15 13:08:44 -04003344static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3345 .owner = THIS_MODULE,
3346 .read = amdgpu_debugfs_regs_didt_read,
3347 .write = amdgpu_debugfs_regs_didt_write,
3348 .llseek = default_llseek
3349};
3350static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3351 .owner = THIS_MODULE,
3352 .read = amdgpu_debugfs_regs_pcie_read,
3353 .write = amdgpu_debugfs_regs_pcie_write,
3354 .llseek = default_llseek
3355};
3356static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3357 .owner = THIS_MODULE,
3358 .read = amdgpu_debugfs_regs_smc_read,
3359 .write = amdgpu_debugfs_regs_smc_write,
3360 .llseek = default_llseek
3361};
3362
Tom St Denis1e051412016-06-27 09:57:18 -04003363static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3364 .owner = THIS_MODULE,
3365 .read = amdgpu_debugfs_gca_config_read,
3366 .llseek = default_llseek
3367};
3368
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003369static const struct file_operations amdgpu_debugfs_sensors_fops = {
3370 .owner = THIS_MODULE,
3371 .read = amdgpu_debugfs_sensor_read,
3372 .llseek = default_llseek
3373};
3374
Tom St Denis273d7aa2016-10-11 14:48:55 -04003375static const struct file_operations amdgpu_debugfs_wave_fops = {
3376 .owner = THIS_MODULE,
3377 .read = amdgpu_debugfs_wave_read,
3378 .llseek = default_llseek
3379};
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003380static const struct file_operations amdgpu_debugfs_gpr_fops = {
3381 .owner = THIS_MODULE,
3382 .read = amdgpu_debugfs_gpr_read,
3383 .llseek = default_llseek
3384};
Tom St Denis273d7aa2016-10-11 14:48:55 -04003385
Tom St Denisadcec282016-04-15 13:08:44 -04003386static const struct file_operations *debugfs_regs[] = {
3387 &amdgpu_debugfs_regs_fops,
3388 &amdgpu_debugfs_regs_didt_fops,
3389 &amdgpu_debugfs_regs_pcie_fops,
3390 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04003391 &amdgpu_debugfs_gca_config_fops,
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003392 &amdgpu_debugfs_sensors_fops,
Tom St Denis273d7aa2016-10-11 14:48:55 -04003393 &amdgpu_debugfs_wave_fops,
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003394 &amdgpu_debugfs_gpr_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04003395};
3396
3397static const char *debugfs_regs_names[] = {
3398 "amdgpu_regs",
3399 "amdgpu_regs_didt",
3400 "amdgpu_regs_pcie",
3401 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04003402 "amdgpu_gca_config",
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003403 "amdgpu_sensors",
Tom St Denis273d7aa2016-10-11 14:48:55 -04003404 "amdgpu_wave",
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003405 "amdgpu_gpr",
Tom St Denisadcec282016-04-15 13:08:44 -04003406};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003407
3408static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3409{
3410 struct drm_minor *minor = adev->ddev->primary;
3411 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04003412 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003413
Tom St Denisadcec282016-04-15 13:08:44 -04003414 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3415 ent = debugfs_create_file(debugfs_regs_names[i],
3416 S_IFREG | S_IRUGO, root,
3417 adev, debugfs_regs[i]);
3418 if (IS_ERR(ent)) {
3419 for (j = 0; j < i; j++) {
3420 debugfs_remove(adev->debugfs_regs[i]);
3421 adev->debugfs_regs[i] = NULL;
3422 }
3423 return PTR_ERR(ent);
3424 }
3425
3426 if (!i)
3427 i_size_write(ent->d_inode, adev->rmmio_size);
3428 adev->debugfs_regs[i] = ent;
3429 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003430
3431 return 0;
3432}
3433
3434static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3435{
Tom St Denisadcec282016-04-15 13:08:44 -04003436 unsigned i;
3437
3438 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3439 if (adev->debugfs_regs[i]) {
3440 debugfs_remove(adev->debugfs_regs[i]);
3441 adev->debugfs_regs[i] = NULL;
3442 }
3443 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003444}
3445
3446int amdgpu_debugfs_init(struct drm_minor *minor)
3447{
3448 return 0;
3449}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003450#else
3451static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3452{
3453 return 0;
3454}
3455static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003456#endif