blob: 4ca5af0e2bc6690fcf12d1259db8e59ce8ce6e26 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deuchera5bde2f2016-09-23 16:23:41 -040043#include "amdgpu_atomfirmware.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050044#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080045#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040048#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040051#include "vi.h"
Ken Wang460826e2017-03-06 14:53:16 -050052#include "soc15.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080054#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080055#include <linux/firmware.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040056
Alex Deuchere2a75f82017-04-27 16:58:01 -040057MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
58
Alex Deucherd38ceaf2015-04-20 16:55:21 -040059static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
60static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
61
62static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080063 "TAHITI",
64 "PITCAIRN",
65 "VERDE",
66 "OLAND",
67 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040068 "BONAIRE",
69 "KAVERI",
70 "KABINI",
71 "HAWAII",
72 "MULLINS",
73 "TOPAZ",
74 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080075 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040076 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040077 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040078 "POLARIS10",
79 "POLARIS11",
Junwei Zhangc4642a42016-12-14 15:32:28 -050080 "POLARIS12",
Ken Wangd4196f02016-03-09 09:28:32 +080081 "VEGA10",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040082 "LAST",
83};
84
85bool amdgpu_device_is_px(struct drm_device *dev)
86{
87 struct amdgpu_device *adev = dev->dev_private;
88
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080089 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040090 return true;
91 return false;
92}
93
94/*
95 * MMIO register access helper functions.
96 */
97uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
Monk Liu15d72fd2017-01-25 15:07:40 +080098 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040099{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400100 uint32_t ret;
101
Monk Liu15d72fd2017-01-25 15:07:40 +0800102 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800103 BUG_ON(in_interrupt());
104 return amdgpu_virt_kiq_rreg(adev, reg);
105 }
106
Monk Liu15d72fd2017-01-25 15:07:40 +0800107 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Tom St Denisf4b373f2016-05-31 08:02:27 -0400108 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400109 else {
110 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400111
112 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
113 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
114 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
115 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400116 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400117 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
118 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400119}
120
121void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
Monk Liu15d72fd2017-01-25 15:07:40 +0800122 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400123{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400124 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800125
Monk Liu15d72fd2017-01-25 15:07:40 +0800126 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800127 BUG_ON(in_interrupt());
128 return amdgpu_virt_kiq_wreg(adev, reg, v);
129 }
130
Monk Liu15d72fd2017-01-25 15:07:40 +0800131 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400132 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
133 else {
134 unsigned long flags;
135
136 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
137 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
138 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
139 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
140 }
141}
142
143u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
144{
145 if ((reg * 4) < adev->rio_mem_size)
146 return ioread32(adev->rio_mem + (reg * 4));
147 else {
148 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
149 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
150 }
151}
152
153void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
154{
155
156 if ((reg * 4) < adev->rio_mem_size)
157 iowrite32(v, adev->rio_mem + (reg * 4));
158 else {
159 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
160 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
161 }
162}
163
164/**
165 * amdgpu_mm_rdoorbell - read a doorbell dword
166 *
167 * @adev: amdgpu_device pointer
168 * @index: doorbell index
169 *
170 * Returns the value in the doorbell aperture at the
171 * requested doorbell index (CIK).
172 */
173u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
174{
175 if (index < adev->doorbell.num_doorbells) {
176 return readl(adev->doorbell.ptr + index);
177 } else {
178 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
179 return 0;
180 }
181}
182
183/**
184 * amdgpu_mm_wdoorbell - write a doorbell dword
185 *
186 * @adev: amdgpu_device pointer
187 * @index: doorbell index
188 * @v: value to write
189 *
190 * Writes @v to the doorbell aperture at the
191 * requested doorbell index (CIK).
192 */
193void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
194{
195 if (index < adev->doorbell.num_doorbells) {
196 writel(v, adev->doorbell.ptr + index);
197 } else {
198 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
199 }
200}
201
202/**
Ken Wang832be402016-03-18 15:23:08 +0800203 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
204 *
205 * @adev: amdgpu_device pointer
206 * @index: doorbell index
207 *
208 * Returns the value in the doorbell aperture at the
209 * requested doorbell index (VEGA10+).
210 */
211u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
212{
213 if (index < adev->doorbell.num_doorbells) {
214 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
215 } else {
216 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
217 return 0;
218 }
219}
220
221/**
222 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
223 *
224 * @adev: amdgpu_device pointer
225 * @index: doorbell index
226 * @v: value to write
227 *
228 * Writes @v to the doorbell aperture at the
229 * requested doorbell index (VEGA10+).
230 */
231void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
232{
233 if (index < adev->doorbell.num_doorbells) {
234 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
235 } else {
236 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
237 }
238}
239
240/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400241 * amdgpu_invalid_rreg - dummy reg read function
242 *
243 * @adev: amdgpu device pointer
244 * @reg: offset of register
245 *
246 * Dummy register read function. Used for register blocks
247 * that certain asics don't have (all asics).
248 * Returns the value in the register.
249 */
250static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
251{
252 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
253 BUG();
254 return 0;
255}
256
257/**
258 * amdgpu_invalid_wreg - dummy reg write function
259 *
260 * @adev: amdgpu device pointer
261 * @reg: offset of register
262 * @v: value to write to the register
263 *
264 * Dummy register read function. Used for register blocks
265 * that certain asics don't have (all asics).
266 */
267static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
268{
269 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
270 reg, v);
271 BUG();
272}
273
274/**
275 * amdgpu_block_invalid_rreg - dummy reg read function
276 *
277 * @adev: amdgpu device pointer
278 * @block: offset of instance
279 * @reg: offset of register
280 *
281 * Dummy register read function. Used for register blocks
282 * that certain asics don't have (all asics).
283 * Returns the value in the register.
284 */
285static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
286 uint32_t block, uint32_t reg)
287{
288 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
289 reg, block);
290 BUG();
291 return 0;
292}
293
294/**
295 * amdgpu_block_invalid_wreg - dummy reg write function
296 *
297 * @adev: amdgpu device pointer
298 * @block: offset of instance
299 * @reg: offset of register
300 * @v: value to write to the register
301 *
302 * Dummy register read function. Used for register blocks
303 * that certain asics don't have (all asics).
304 */
305static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
306 uint32_t block,
307 uint32_t reg, uint32_t v)
308{
309 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
310 reg, block, v);
311 BUG();
312}
313
314static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
315{
316 int r;
317
318 if (adev->vram_scratch.robj == NULL) {
319 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
Alex Deucher857d9132015-08-27 00:14:16 -0400320 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
Christian König03f48dd2016-08-15 17:00:22 +0200321 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
322 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
Christian König72d76682015-09-03 17:34:59 +0200323 NULL, NULL, &adev->vram_scratch.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400324 if (r) {
325 return r;
326 }
327 }
328
329 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
330 if (unlikely(r != 0))
331 return r;
332 r = amdgpu_bo_pin(adev->vram_scratch.robj,
333 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
334 if (r) {
335 amdgpu_bo_unreserve(adev->vram_scratch.robj);
336 return r;
337 }
338 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
339 (void **)&adev->vram_scratch.ptr);
340 if (r)
341 amdgpu_bo_unpin(adev->vram_scratch.robj);
342 amdgpu_bo_unreserve(adev->vram_scratch.robj);
343
344 return r;
345}
346
347static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
348{
349 int r;
350
351 if (adev->vram_scratch.robj == NULL) {
352 return;
353 }
Alex Xie8ab25b42017-04-24 13:30:43 -0400354 r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400355 if (likely(r == 0)) {
356 amdgpu_bo_kunmap(adev->vram_scratch.robj);
357 amdgpu_bo_unpin(adev->vram_scratch.robj);
358 amdgpu_bo_unreserve(adev->vram_scratch.robj);
359 }
360 amdgpu_bo_unref(&adev->vram_scratch.robj);
361}
362
363/**
364 * amdgpu_program_register_sequence - program an array of registers.
365 *
366 * @adev: amdgpu_device pointer
367 * @registers: pointer to the register array
368 * @array_size: size of the register array
369 *
370 * Programs an array or registers with and and or masks.
371 * This is a helper for setting golden registers.
372 */
373void amdgpu_program_register_sequence(struct amdgpu_device *adev,
374 const u32 *registers,
375 const u32 array_size)
376{
377 u32 tmp, reg, and_mask, or_mask;
378 int i;
379
380 if (array_size % 3)
381 return;
382
383 for (i = 0; i < array_size; i +=3) {
384 reg = registers[i + 0];
385 and_mask = registers[i + 1];
386 or_mask = registers[i + 2];
387
388 if (and_mask == 0xffffffff) {
389 tmp = or_mask;
390 } else {
391 tmp = RREG32(reg);
392 tmp &= ~and_mask;
393 tmp |= or_mask;
394 }
395 WREG32(reg, tmp);
396 }
397}
398
399void amdgpu_pci_config_reset(struct amdgpu_device *adev)
400{
401 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
402}
403
404/*
405 * GPU doorbell aperture helpers function.
406 */
407/**
408 * amdgpu_doorbell_init - Init doorbell driver information.
409 *
410 * @adev: amdgpu_device pointer
411 *
412 * Init doorbell driver information (CIK)
413 * Returns 0 on success, error on failure.
414 */
415static int amdgpu_doorbell_init(struct amdgpu_device *adev)
416{
417 /* doorbell bar mapping */
418 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
419 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
420
Christian Königedf600d2016-05-03 15:54:54 +0200421 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400422 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
423 if (adev->doorbell.num_doorbells == 0)
424 return -EINVAL;
425
Christian König8972e5d2017-03-06 13:34:57 +0100426 adev->doorbell.ptr = ioremap(adev->doorbell.base,
427 adev->doorbell.num_doorbells *
428 sizeof(u32));
429 if (adev->doorbell.ptr == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400430 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400431
432 return 0;
433}
434
435/**
436 * amdgpu_doorbell_fini - Tear down doorbell driver information.
437 *
438 * @adev: amdgpu_device pointer
439 *
440 * Tear down doorbell driver information (CIK)
441 */
442static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
443{
444 iounmap(adev->doorbell.ptr);
445 adev->doorbell.ptr = NULL;
446}
447
448/**
449 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
450 * setup amdkfd
451 *
452 * @adev: amdgpu_device pointer
453 * @aperture_base: output returning doorbell aperture base physical address
454 * @aperture_size: output returning doorbell aperture size in bytes
455 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
456 *
457 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
458 * takes doorbells required for its own rings and reports the setup to amdkfd.
459 * amdgpu reserved doorbells are at the start of the doorbell aperture.
460 */
461void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
462 phys_addr_t *aperture_base,
463 size_t *aperture_size,
464 size_t *start_offset)
465{
466 /*
467 * The first num_doorbells are used by amdgpu.
468 * amdkfd takes whatever's left in the aperture.
469 */
470 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
471 *aperture_base = adev->doorbell.base;
472 *aperture_size = adev->doorbell.size;
473 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
474 } else {
475 *aperture_base = 0;
476 *aperture_size = 0;
477 *start_offset = 0;
478 }
479}
480
481/*
482 * amdgpu_wb_*()
Alex Xieea81a172017-05-08 13:41:11 -0400483 * Writeback is the method by which GPU updates special pages in memory
484 * with the status of certain GPU events (fences, ring pointers,etc.).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400485 */
486
487/**
488 * amdgpu_wb_fini - Disable Writeback and free memory
489 *
490 * @adev: amdgpu_device pointer
491 *
492 * Disables Writeback and frees the Writeback memory (all asics).
493 * Used at driver shutdown.
494 */
495static void amdgpu_wb_fini(struct amdgpu_device *adev)
496{
497 if (adev->wb.wb_obj) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400498 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
499 &adev->wb.gpu_addr,
500 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400501 adev->wb.wb_obj = NULL;
502 }
503}
504
505/**
506 * amdgpu_wb_init- Init Writeback driver info and allocate memory
507 *
508 * @adev: amdgpu_device pointer
509 *
Alex Xieea81a172017-05-08 13:41:11 -0400510 * Initialize writeback and allocates writeback memory (all asics).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400511 * Used at driver startup.
512 * Returns 0 on success or an -error on failure.
513 */
514static int amdgpu_wb_init(struct amdgpu_device *adev)
515{
516 int r;
517
518 if (adev->wb.wb_obj == NULL) {
Huang Rui60a970a62017-03-15 10:13:32 +0800519 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
Alex Deuchera76ed482016-10-21 15:30:36 -0400520 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
521 &adev->wb.wb_obj, &adev->wb.gpu_addr,
522 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400523 if (r) {
524 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
525 return r;
526 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400527
528 adev->wb.num_wb = AMDGPU_MAX_WB;
529 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
530
531 /* clear wb memory */
Huang Rui60a970a62017-03-15 10:13:32 +0800532 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400533 }
534
535 return 0;
536}
537
538/**
539 * amdgpu_wb_get - Allocate a wb entry
540 *
541 * @adev: amdgpu_device pointer
542 * @wb: wb index
543 *
544 * Allocate a wb slot for use by the driver (all asics).
545 * Returns 0 on success or -EINVAL on failure.
546 */
547int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
548{
549 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
550 if (offset < adev->wb.num_wb) {
551 __set_bit(offset, adev->wb.used);
552 *wb = offset;
553 return 0;
554 } else {
555 return -EINVAL;
556 }
557}
558
559/**
Ken Wang70142852016-03-18 15:08:49 +0800560 * amdgpu_wb_get_64bit - Allocate a wb entry
561 *
562 * @adev: amdgpu_device pointer
563 * @wb: wb index
564 *
565 * Allocate a wb slot for use by the driver (all asics).
566 * Returns 0 on success or -EINVAL on failure.
567 */
568int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
569{
570 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
571 adev->wb.num_wb, 0, 2, 7, 0);
572 if ((offset + 1) < adev->wb.num_wb) {
573 __set_bit(offset, adev->wb.used);
574 __set_bit(offset + 1, adev->wb.used);
575 *wb = offset;
576 return 0;
577 } else {
578 return -EINVAL;
579 }
580}
581
582/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400583 * amdgpu_wb_free - Free a wb entry
584 *
585 * @adev: amdgpu_device pointer
586 * @wb: wb index
587 *
588 * Free a wb slot allocated for use by the driver (all asics)
589 */
590void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
591{
592 if (wb < adev->wb.num_wb)
593 __clear_bit(wb, adev->wb.used);
594}
595
596/**
Ken Wang70142852016-03-18 15:08:49 +0800597 * amdgpu_wb_free_64bit - Free a wb entry
598 *
599 * @adev: amdgpu_device pointer
600 * @wb: wb index
601 *
602 * Free a wb slot allocated for use by the driver (all asics)
603 */
604void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
605{
606 if ((wb + 1) < adev->wb.num_wb) {
607 __clear_bit(wb, adev->wb.used);
608 __clear_bit(wb + 1, adev->wb.used);
609 }
610}
611
612/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400613 * amdgpu_vram_location - try to find VRAM location
614 * @adev: amdgpu device structure holding all necessary informations
615 * @mc: memory controller structure holding memory informations
616 * @base: base address at which to put VRAM
617 *
618 * Function will place try to place VRAM at base address provided
619 * as parameter (which is so far either PCI aperture address or
620 * for IGP TOM base address).
621 *
622 * If there is not enough space to fit the unvisible VRAM in the 32bits
623 * address space then we limit the VRAM size to the aperture.
624 *
625 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
626 * this shouldn't be a problem as we are using the PCI aperture as a reference.
627 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
628 * not IGP.
629 *
630 * Note: we use mc_vram_size as on some board we need to program the mc to
631 * cover the whole aperture even if VRAM size is inferior to aperture size
632 * Novell bug 204882 + along with lots of ubuntu ones
633 *
634 * Note: when limiting vram it's safe to overwritte real_vram_size because
635 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
636 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
637 * ones)
638 *
639 * Note: IGP TOM addr should be the same as the aperture addr, we don't
640 * explicitly check for that thought.
641 *
642 * FIXME: when reducing VRAM size align new size on power of 2.
643 */
644void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
645{
646 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
647
648 mc->vram_start = base;
649 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
650 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
651 mc->real_vram_size = mc->aper_size;
652 mc->mc_vram_size = mc->aper_size;
653 }
654 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
655 if (limit && limit < mc->real_vram_size)
656 mc->real_vram_size = limit;
657 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
658 mc->mc_vram_size >> 20, mc->vram_start,
659 mc->vram_end, mc->real_vram_size >> 20);
660}
661
662/**
663 * amdgpu_gtt_location - try to find GTT location
664 * @adev: amdgpu device structure holding all necessary informations
665 * @mc: memory controller structure holding memory informations
666 *
667 * Function will place try to place GTT before or after VRAM.
668 *
669 * If GTT size is bigger than space left then we ajust GTT size.
670 * Thus function will never fails.
671 *
672 * FIXME: when reducing GTT size align new size on power of 2.
673 */
674void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
675{
676 u64 size_af, size_bf;
677
678 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
679 size_bf = mc->vram_start & ~mc->gtt_base_align;
680 if (size_bf > size_af) {
681 if (mc->gtt_size > size_bf) {
682 dev_warn(adev->dev, "limiting GTT\n");
683 mc->gtt_size = size_bf;
684 }
Alex Deucher9dc5a912016-11-17 15:40:22 -0500685 mc->gtt_start = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400686 } else {
687 if (mc->gtt_size > size_af) {
688 dev_warn(adev->dev, "limiting GTT\n");
689 mc->gtt_size = size_af;
690 }
691 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
692 }
693 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
694 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
695 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
696}
697
698/*
699 * GPU helpers function.
700 */
701/**
Jim Quc836fec2017-02-10 15:59:59 +0800702 * amdgpu_need_post - check if the hw need post or not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400703 *
704 * @adev: amdgpu_device pointer
705 *
Jim Quc836fec2017-02-10 15:59:59 +0800706 * Check if the asic has been initialized (all asics) at driver startup
707 * or post is needed if hw reset is performed.
708 * Returns true if need or false if not.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400709 */
Jim Quc836fec2017-02-10 15:59:59 +0800710bool amdgpu_need_post(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400711{
712 uint32_t reg;
713
Jim Quc836fec2017-02-10 15:59:59 +0800714 if (adev->has_hw_reset) {
715 adev->has_hw_reset = false;
716 return true;
717 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400718 /* then check MEM_SIZE, in case the crtcs are off */
Alex Deucherbbf282d2017-03-03 17:26:10 -0500719 reg = amdgpu_asic_get_config_memsize(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400720
Alex Deucherf2713e82017-03-28 12:19:31 -0400721 if ((reg != 0) && (reg != 0xffffffff))
Jim Quc836fec2017-02-10 15:59:59 +0800722 return false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400723
Jim Quc836fec2017-02-10 15:59:59 +0800724 return true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400725
726}
727
Monk Liubec86372016-09-14 19:38:08 +0800728static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
729{
730 if (amdgpu_sriov_vf(adev))
731 return false;
732
733 if (amdgpu_passthrough(adev)) {
Monk Liu1da2c322016-11-11 11:24:29 +0800734 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
735 * some old smc fw still need driver do vPost otherwise gpu hang, while
736 * those smc fw version above 22.15 doesn't have this flaw, so we force
737 * vpost executed for smc version below 22.15
Monk Liubec86372016-09-14 19:38:08 +0800738 */
739 if (adev->asic_type == CHIP_FIJI) {
740 int err;
741 uint32_t fw_ver;
742 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
743 /* force vPost if error occured */
744 if (err)
745 return true;
746
747 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
Monk Liu1da2c322016-11-11 11:24:29 +0800748 if (fw_ver < 0x00160e00)
749 return true;
Monk Liubec86372016-09-14 19:38:08 +0800750 }
Monk Liubec86372016-09-14 19:38:08 +0800751 }
Jim Quc836fec2017-02-10 15:59:59 +0800752 return amdgpu_need_post(adev);
Monk Liubec86372016-09-14 19:38:08 +0800753}
754
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400755/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400756 * amdgpu_dummy_page_init - init dummy page used by the driver
757 *
758 * @adev: amdgpu_device pointer
759 *
760 * Allocate the dummy page used by the driver (all asics).
761 * This dummy page is used by the driver as a filler for gart entries
762 * when pages are taken out of the GART
763 * Returns 0 on sucess, -ENOMEM on failure.
764 */
765int amdgpu_dummy_page_init(struct amdgpu_device *adev)
766{
767 if (adev->dummy_page.page)
768 return 0;
769 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
770 if (adev->dummy_page.page == NULL)
771 return -ENOMEM;
772 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
773 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
774 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
775 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
776 __free_page(adev->dummy_page.page);
777 adev->dummy_page.page = NULL;
778 return -ENOMEM;
779 }
780 return 0;
781}
782
783/**
784 * amdgpu_dummy_page_fini - free dummy page used by the driver
785 *
786 * @adev: amdgpu_device pointer
787 *
788 * Frees the dummy page used by the driver (all asics).
789 */
790void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
791{
792 if (adev->dummy_page.page == NULL)
793 return;
794 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
795 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
796 __free_page(adev->dummy_page.page);
797 adev->dummy_page.page = NULL;
798}
799
800
801/* ATOM accessor methods */
802/*
803 * ATOM is an interpreted byte code stored in tables in the vbios. The
804 * driver registers callbacks to access registers and the interpreter
805 * in the driver parses the tables and executes then to program specific
806 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
807 * atombios.h, and atom.c
808 */
809
810/**
811 * cail_pll_read - read PLL register
812 *
813 * @info: atom card_info pointer
814 * @reg: PLL register offset
815 *
816 * Provides a PLL register accessor for the atom interpreter (r4xx+).
817 * Returns the value of the PLL register.
818 */
819static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
820{
821 return 0;
822}
823
824/**
825 * cail_pll_write - write PLL register
826 *
827 * @info: atom card_info pointer
828 * @reg: PLL register offset
829 * @val: value to write to the pll register
830 *
831 * Provides a PLL register accessor for the atom interpreter (r4xx+).
832 */
833static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
834{
835
836}
837
838/**
839 * cail_mc_read - read MC (Memory Controller) register
840 *
841 * @info: atom card_info pointer
842 * @reg: MC register offset
843 *
844 * Provides an MC register accessor for the atom interpreter (r4xx+).
845 * Returns the value of the MC register.
846 */
847static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
848{
849 return 0;
850}
851
852/**
853 * cail_mc_write - write MC (Memory Controller) register
854 *
855 * @info: atom card_info pointer
856 * @reg: MC register offset
857 * @val: value to write to the pll register
858 *
859 * Provides a MC register accessor for the atom interpreter (r4xx+).
860 */
861static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
862{
863
864}
865
866/**
867 * cail_reg_write - write MMIO register
868 *
869 * @info: atom card_info pointer
870 * @reg: MMIO register offset
871 * @val: value to write to the pll register
872 *
873 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
874 */
875static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
876{
877 struct amdgpu_device *adev = info->dev->dev_private;
878
879 WREG32(reg, val);
880}
881
882/**
883 * cail_reg_read - read MMIO register
884 *
885 * @info: atom card_info pointer
886 * @reg: MMIO register offset
887 *
888 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
889 * Returns the value of the MMIO register.
890 */
891static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
892{
893 struct amdgpu_device *adev = info->dev->dev_private;
894 uint32_t r;
895
896 r = RREG32(reg);
897 return r;
898}
899
900/**
901 * cail_ioreg_write - write IO register
902 *
903 * @info: atom card_info pointer
904 * @reg: IO register offset
905 * @val: value to write to the pll register
906 *
907 * Provides a IO register accessor for the atom interpreter (r4xx+).
908 */
909static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
910{
911 struct amdgpu_device *adev = info->dev->dev_private;
912
913 WREG32_IO(reg, val);
914}
915
916/**
917 * cail_ioreg_read - read IO register
918 *
919 * @info: atom card_info pointer
920 * @reg: IO register offset
921 *
922 * Provides an IO register accessor for the atom interpreter (r4xx+).
923 * Returns the value of the IO register.
924 */
925static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
926{
927 struct amdgpu_device *adev = info->dev->dev_private;
928 uint32_t r;
929
930 r = RREG32_IO(reg);
931 return r;
932}
933
934/**
935 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
936 *
937 * @adev: amdgpu_device pointer
938 *
939 * Frees the driver info and register access callbacks for the ATOM
940 * interpreter (r4xx+).
941 * Called at driver shutdown.
942 */
943static void amdgpu_atombios_fini(struct amdgpu_device *adev)
944{
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800945 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400946 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800947 kfree(adev->mode_info.atom_context->iio);
948 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400949 kfree(adev->mode_info.atom_context);
950 adev->mode_info.atom_context = NULL;
951 kfree(adev->mode_info.atom_card_info);
952 adev->mode_info.atom_card_info = NULL;
953}
954
955/**
956 * amdgpu_atombios_init - init the driver info and callbacks for atombios
957 *
958 * @adev: amdgpu_device pointer
959 *
960 * Initializes the driver info and register access callbacks for the
961 * ATOM interpreter (r4xx+).
962 * Returns 0 on sucess, -ENOMEM on failure.
963 * Called at driver startup.
964 */
965static int amdgpu_atombios_init(struct amdgpu_device *adev)
966{
967 struct card_info *atom_card_info =
968 kzalloc(sizeof(struct card_info), GFP_KERNEL);
969
970 if (!atom_card_info)
971 return -ENOMEM;
972
973 adev->mode_info.atom_card_info = atom_card_info;
974 atom_card_info->dev = adev->ddev;
975 atom_card_info->reg_read = cail_reg_read;
976 atom_card_info->reg_write = cail_reg_write;
977 /* needed for iio ops */
978 if (adev->rio_mem) {
979 atom_card_info->ioreg_read = cail_ioreg_read;
980 atom_card_info->ioreg_write = cail_ioreg_write;
981 } else {
Amber Linb64a18c2017-01-04 08:06:58 -0500982 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400983 atom_card_info->ioreg_read = cail_reg_read;
984 atom_card_info->ioreg_write = cail_reg_write;
985 }
986 atom_card_info->mc_read = cail_mc_read;
987 atom_card_info->mc_write = cail_mc_write;
988 atom_card_info->pll_read = cail_pll_read;
989 atom_card_info->pll_write = cail_pll_write;
990
991 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
992 if (!adev->mode_info.atom_context) {
993 amdgpu_atombios_fini(adev);
994 return -ENOMEM;
995 }
996
997 mutex_init(&adev->mode_info.atom_context->mutex);
Alex Deuchera5bde2f2016-09-23 16:23:41 -0400998 if (adev->is_atom_fw) {
999 amdgpu_atomfirmware_scratch_regs_init(adev);
1000 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1001 } else {
1002 amdgpu_atombios_scratch_regs_init(adev);
1003 amdgpu_atombios_allocate_fb_scratch(adev);
1004 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001005 return 0;
1006}
1007
1008/* if we get transitioned to only one device, take VGA back */
1009/**
1010 * amdgpu_vga_set_decode - enable/disable vga decode
1011 *
1012 * @cookie: amdgpu_device pointer
1013 * @state: enable/disable vga decode
1014 *
1015 * Enable/disable vga decode (all asics).
1016 * Returns VGA resource flags.
1017 */
1018static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1019{
1020 struct amdgpu_device *adev = cookie;
1021 amdgpu_asic_set_vga_state(adev, state);
1022 if (state)
1023 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1024 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1025 else
1026 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1027}
1028
1029/**
1030 * amdgpu_check_pot_argument - check that argument is a power of two
1031 *
1032 * @arg: value to check
1033 *
1034 * Validates that a certain argument is a power of two (all asics).
1035 * Returns true if argument is valid.
1036 */
1037static bool amdgpu_check_pot_argument(int arg)
1038{
1039 return (arg & (arg - 1)) == 0;
1040}
1041
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001042static void amdgpu_check_block_size(struct amdgpu_device *adev)
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001043{
1044 /* defines number of bits in page table versus page directory,
1045 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1046 * page table and the remaining bits are in the page directory */
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001047 if (amdgpu_vm_block_size == -1)
1048 return;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001049
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001050 if (amdgpu_vm_block_size < 9) {
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001051 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1052 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001053 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001054 }
1055
1056 if (amdgpu_vm_block_size > 24 ||
1057 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1058 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1059 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001060 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001061 }
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001062
1063 return;
1064
1065def_value:
1066 amdgpu_vm_block_size = -1;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001067}
1068
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001069static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1070{
1071 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1072 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1073 amdgpu_vm_size);
1074 goto def_value;
1075 }
1076
1077 if (amdgpu_vm_size < 1) {
1078 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1079 amdgpu_vm_size);
1080 goto def_value;
1081 }
1082
1083 /*
1084 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1085 */
1086 if (amdgpu_vm_size > 1024) {
1087 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1088 amdgpu_vm_size);
1089 goto def_value;
1090 }
1091
1092 return;
1093
1094def_value:
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001095 amdgpu_vm_size = -1;
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001096}
1097
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001098/**
1099 * amdgpu_check_arguments - validate module params
1100 *
1101 * @adev: amdgpu_device pointer
1102 *
1103 * Validates certain module parameters and updates
1104 * the associated values used by the driver (all asics).
1105 */
1106static void amdgpu_check_arguments(struct amdgpu_device *adev)
1107{
Chunming Zhou5b011232015-12-10 17:34:33 +08001108 if (amdgpu_sched_jobs < 4) {
1109 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1110 amdgpu_sched_jobs);
1111 amdgpu_sched_jobs = 4;
1112 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
1113 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1114 amdgpu_sched_jobs);
1115 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1116 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001117
1118 if (amdgpu_gart_size != -1) {
Christian Königc4e1a132016-03-17 16:25:15 +01001119 /* gtt size must be greater or equal to 32M */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001120 if (amdgpu_gart_size < 32) {
1121 dev_warn(adev->dev, "gart size (%d) too small\n",
1122 amdgpu_gart_size);
1123 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001124 }
1125 }
1126
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001127 amdgpu_check_vm_size(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001128
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001129 amdgpu_check_block_size(adev);
Christian König6a7f76e2016-08-24 15:51:49 +02001130
jimqu526bae32016-11-07 09:53:10 +08001131 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1132 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
Christian König6a7f76e2016-08-24 15:51:49 +02001133 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1134 amdgpu_vram_page_split);
1135 amdgpu_vram_page_split = 1024;
1136 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001137}
1138
1139/**
1140 * amdgpu_switcheroo_set_state - set switcheroo state
1141 *
1142 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001143 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001144 *
1145 * Callback for the switcheroo driver. Suspends or resumes the
1146 * the asics before or after it is powered up using ACPI methods.
1147 */
1148static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1149{
1150 struct drm_device *dev = pci_get_drvdata(pdev);
1151
1152 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1153 return;
1154
1155 if (state == VGA_SWITCHEROO_ON) {
1156 unsigned d3_delay = dev->pdev->d3_delay;
1157
Joe Perches7ca85292017-02-28 04:55:52 -08001158 pr_info("amdgpu: switched on\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001159 /* don't suspend or resume card normally */
1160 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1161
Alex Deucher810ddc32016-08-23 13:25:49 -04001162 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001163
1164 dev->pdev->d3_delay = d3_delay;
1165
1166 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1167 drm_kms_helper_poll_enable(dev);
1168 } else {
Joe Perches7ca85292017-02-28 04:55:52 -08001169 pr_info("amdgpu: switched off\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001170 drm_kms_helper_poll_disable(dev);
1171 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001172 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001173 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1174 }
1175}
1176
1177/**
1178 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1179 *
1180 * @pdev: pci dev pointer
1181 *
1182 * Callback for the switcheroo driver. Check of the switcheroo
1183 * state can be changed.
1184 * Returns true if the state can be changed, false if not.
1185 */
1186static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1187{
1188 struct drm_device *dev = pci_get_drvdata(pdev);
1189
1190 /*
1191 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1192 * locking inversion with the driver load path. And the access here is
1193 * completely racy anyway. So don't bother with locking for now.
1194 */
1195 return dev->open_count == 0;
1196}
1197
1198static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1199 .set_gpu_state = amdgpu_switcheroo_set_state,
1200 .reprobe = NULL,
1201 .can_switch = amdgpu_switcheroo_can_switch,
1202};
1203
1204int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001205 enum amd_ip_block_type block_type,
1206 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001207{
1208 int i, r = 0;
1209
1210 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001211 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001212 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001213 if (adev->ip_blocks[i].version->type != block_type)
1214 continue;
1215 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1216 continue;
1217 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1218 (void *)adev, state);
1219 if (r)
1220 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1221 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001222 }
1223 return r;
1224}
1225
1226int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001227 enum amd_ip_block_type block_type,
1228 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001229{
1230 int i, r = 0;
1231
1232 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001233 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001234 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001235 if (adev->ip_blocks[i].version->type != block_type)
1236 continue;
1237 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1238 continue;
1239 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1240 (void *)adev, state);
1241 if (r)
1242 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1243 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001244 }
1245 return r;
1246}
1247
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001248void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1249{
1250 int i;
1251
1252 for (i = 0; i < adev->num_ip_blocks; i++) {
1253 if (!adev->ip_blocks[i].status.valid)
1254 continue;
1255 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1256 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1257 }
1258}
1259
Alex Deucher5dbbb602016-06-23 11:41:04 -04001260int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1261 enum amd_ip_block_type block_type)
1262{
1263 int i, r;
1264
1265 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001266 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001267 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001268 if (adev->ip_blocks[i].version->type == block_type) {
1269 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001270 if (r)
1271 return r;
1272 break;
1273 }
1274 }
1275 return 0;
1276
1277}
1278
1279bool amdgpu_is_idle(struct amdgpu_device *adev,
1280 enum amd_ip_block_type block_type)
1281{
1282 int i;
1283
1284 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001285 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001286 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001287 if (adev->ip_blocks[i].version->type == block_type)
1288 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001289 }
1290 return true;
1291
1292}
1293
Alex Deuchera1255102016-10-13 17:41:13 -04001294struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1295 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001296{
1297 int i;
1298
1299 for (i = 0; i < adev->num_ip_blocks; i++)
Alex Deuchera1255102016-10-13 17:41:13 -04001300 if (adev->ip_blocks[i].version->type == type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001301 return &adev->ip_blocks[i];
1302
1303 return NULL;
1304}
1305
1306/**
1307 * amdgpu_ip_block_version_cmp
1308 *
1309 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001310 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001311 * @major: major version
1312 * @minor: minor version
1313 *
1314 * return 0 if equal or greater
1315 * return 1 if smaller or the ip_block doesn't exist
1316 */
1317int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001318 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001319 u32 major, u32 minor)
1320{
Alex Deuchera1255102016-10-13 17:41:13 -04001321 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001322
Alex Deuchera1255102016-10-13 17:41:13 -04001323 if (ip_block && ((ip_block->version->major > major) ||
1324 ((ip_block->version->major == major) &&
1325 (ip_block->version->minor >= minor))))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001326 return 0;
1327
1328 return 1;
1329}
1330
Alex Deuchera1255102016-10-13 17:41:13 -04001331/**
1332 * amdgpu_ip_block_add
1333 *
1334 * @adev: amdgpu_device pointer
1335 * @ip_block_version: pointer to the IP to add
1336 *
1337 * Adds the IP block driver information to the collection of IPs
1338 * on the asic.
1339 */
1340int amdgpu_ip_block_add(struct amdgpu_device *adev,
1341 const struct amdgpu_ip_block_version *ip_block_version)
1342{
1343 if (!ip_block_version)
1344 return -EINVAL;
1345
1346 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1347
1348 return 0;
1349}
1350
Alex Deucher483ef982016-09-30 12:43:04 -04001351static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
Emily Deng9accf2f2016-08-10 16:01:25 +08001352{
1353 adev->enable_virtual_display = false;
1354
1355 if (amdgpu_virtual_display) {
1356 struct drm_device *ddev = adev->ddev;
1357 const char *pci_address_name = pci_name(ddev->pdev);
Emily Deng0f663562016-09-30 13:02:18 -04001358 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
Emily Deng9accf2f2016-08-10 16:01:25 +08001359
1360 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1361 pciaddstr_tmp = pciaddstr;
Emily Deng0f663562016-09-30 13:02:18 -04001362 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1363 pciaddname = strsep(&pciaddname_tmp, ",");
Yintian Tao967de2a2017-01-22 15:16:51 +08001364 if (!strcmp("all", pciaddname)
1365 || !strcmp(pci_address_name, pciaddname)) {
Emily Deng0f663562016-09-30 13:02:18 -04001366 long num_crtc;
1367 int res = -1;
1368
Emily Deng9accf2f2016-08-10 16:01:25 +08001369 adev->enable_virtual_display = true;
Emily Deng0f663562016-09-30 13:02:18 -04001370
1371 if (pciaddname_tmp)
1372 res = kstrtol(pciaddname_tmp, 10,
1373 &num_crtc);
1374
1375 if (!res) {
1376 if (num_crtc < 1)
1377 num_crtc = 1;
1378 if (num_crtc > 6)
1379 num_crtc = 6;
1380 adev->mode_info.num_crtc = num_crtc;
1381 } else {
1382 adev->mode_info.num_crtc = 1;
1383 }
Emily Deng9accf2f2016-08-10 16:01:25 +08001384 break;
1385 }
1386 }
1387
Emily Deng0f663562016-09-30 13:02:18 -04001388 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1389 amdgpu_virtual_display, pci_address_name,
1390 adev->enable_virtual_display, adev->mode_info.num_crtc);
Emily Deng9accf2f2016-08-10 16:01:25 +08001391
1392 kfree(pciaddstr);
1393 }
1394}
1395
Alex Deuchere2a75f82017-04-27 16:58:01 -04001396static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1397{
1398 const struct firmware *fw;
1399 const char *chip_name;
1400 char fw_name[30];
1401 int err;
1402 const struct gpu_info_firmware_header_v1_0 *hdr;
1403
1404 switch (adev->asic_type) {
1405 case CHIP_TOPAZ:
1406 case CHIP_TONGA:
1407 case CHIP_FIJI:
1408 case CHIP_POLARIS11:
1409 case CHIP_POLARIS10:
1410 case CHIP_POLARIS12:
1411 case CHIP_CARRIZO:
1412 case CHIP_STONEY:
1413#ifdef CONFIG_DRM_AMDGPU_SI
1414 case CHIP_VERDE:
1415 case CHIP_TAHITI:
1416 case CHIP_PITCAIRN:
1417 case CHIP_OLAND:
1418 case CHIP_HAINAN:
1419#endif
1420#ifdef CONFIG_DRM_AMDGPU_CIK
1421 case CHIP_BONAIRE:
1422 case CHIP_HAWAII:
1423 case CHIP_KAVERI:
1424 case CHIP_KABINI:
1425 case CHIP_MULLINS:
1426#endif
1427 default:
1428 return 0;
1429 case CHIP_VEGA10:
1430 chip_name = "vega10";
1431 break;
1432 }
1433
1434 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1435 err = request_firmware(&fw, fw_name, adev->dev);
1436 if (err) {
1437 dev_err(adev->dev,
1438 "Failed to load gpu_info firmware \"%s\"\n",
1439 fw_name);
1440 goto out;
1441 }
1442 err = amdgpu_ucode_validate(fw);
1443 if (err) {
1444 dev_err(adev->dev,
1445 "Failed to validate gpu_info firmware \"%s\"\n",
1446 fw_name);
1447 goto out;
1448 }
1449
1450 hdr = (const struct gpu_info_firmware_header_v1_0 *)fw->data;
1451 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1452
1453 switch (hdr->version_major) {
1454 case 1:
1455 {
1456 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1457 (const struct gpu_info_firmware_v1_0 *)(fw->data +
1458 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1459
1460 adev->gfx.config.max_shader_engines = gpu_info_fw->gc_num_se;
1461 adev->gfx.config.max_cu_per_sh = gpu_info_fw->gc_num_cu_per_sh;
1462 adev->gfx.config.max_sh_per_se = gpu_info_fw->gc_num_sh_per_se;
1463 adev->gfx.config.max_backends_per_se = gpu_info_fw->gc_num_rb_per_se;
1464 adev->gfx.config.max_texture_channel_caches =
1465 gpu_info_fw->gc_num_tccs;
1466 adev->gfx.config.max_gprs = gpu_info_fw->gc_num_gprs;
1467 adev->gfx.config.max_gs_threads = gpu_info_fw->gc_num_max_gs_thds;
1468 adev->gfx.config.gs_vgt_table_depth = gpu_info_fw->gc_gs_table_depth;
1469 adev->gfx.config.gs_prim_buffer_depth = gpu_info_fw->gc_gsprim_buff_depth;
1470 adev->gfx.config.double_offchip_lds_buf =
1471 gpu_info_fw->gc_double_offchip_lds_buffer;
1472 adev->gfx.cu_info.wave_front_size = gpu_info_fw->gc_wave_size;
1473 break;
1474 }
1475 default:
1476 dev_err(adev->dev,
1477 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1478 err = -EINVAL;
1479 goto out;
1480 }
1481out:
1482 release_firmware(fw);
1483 fw = NULL;
1484
1485 return err;
1486}
1487
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001488static int amdgpu_early_init(struct amdgpu_device *adev)
1489{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001490 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001491
Alex Deucher483ef982016-09-30 12:43:04 -04001492 amdgpu_device_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001493
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001494 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001495 case CHIP_TOPAZ:
1496 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001497 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001498 case CHIP_POLARIS11:
1499 case CHIP_POLARIS10:
Junwei Zhangc4642a42016-12-14 15:32:28 -05001500 case CHIP_POLARIS12:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001501 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001502 case CHIP_STONEY:
1503 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001504 adev->family = AMDGPU_FAMILY_CZ;
1505 else
1506 adev->family = AMDGPU_FAMILY_VI;
1507
1508 r = vi_set_ip_blocks(adev);
1509 if (r)
1510 return r;
1511 break;
Ken Wang33f34802016-01-21 17:29:41 +08001512#ifdef CONFIG_DRM_AMDGPU_SI
1513 case CHIP_VERDE:
1514 case CHIP_TAHITI:
1515 case CHIP_PITCAIRN:
1516 case CHIP_OLAND:
1517 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001518 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001519 r = si_set_ip_blocks(adev);
1520 if (r)
1521 return r;
1522 break;
1523#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001524#ifdef CONFIG_DRM_AMDGPU_CIK
1525 case CHIP_BONAIRE:
1526 case CHIP_HAWAII:
1527 case CHIP_KAVERI:
1528 case CHIP_KABINI:
1529 case CHIP_MULLINS:
1530 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1531 adev->family = AMDGPU_FAMILY_CI;
1532 else
1533 adev->family = AMDGPU_FAMILY_KV;
1534
1535 r = cik_set_ip_blocks(adev);
1536 if (r)
1537 return r;
1538 break;
1539#endif
Ken Wang460826e2017-03-06 14:53:16 -05001540 case CHIP_VEGA10:
1541 adev->family = AMDGPU_FAMILY_AI;
1542
1543 r = soc15_set_ip_blocks(adev);
1544 if (r)
1545 return r;
1546 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001547 default:
1548 /* FIXME: not supported yet */
1549 return -EINVAL;
1550 }
1551
Alex Deuchere2a75f82017-04-27 16:58:01 -04001552 r = amdgpu_device_parse_gpu_info_fw(adev);
1553 if (r)
1554 return r;
1555
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001556 if (amdgpu_sriov_vf(adev)) {
1557 r = amdgpu_virt_request_full_gpu(adev, true);
1558 if (r)
1559 return r;
1560 }
1561
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001562 for (i = 0; i < adev->num_ip_blocks; i++) {
1563 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1564 DRM_ERROR("disabled ip block: %d\n", i);
Alex Deuchera1255102016-10-13 17:41:13 -04001565 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001566 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001567 if (adev->ip_blocks[i].version->funcs->early_init) {
1568 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001569 if (r == -ENOENT) {
Alex Deuchera1255102016-10-13 17:41:13 -04001570 adev->ip_blocks[i].status.valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001571 } else if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001572 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1573 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001574 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001575 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001576 adev->ip_blocks[i].status.valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001577 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001578 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001579 adev->ip_blocks[i].status.valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001580 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001581 }
1582 }
1583
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001584 adev->cg_flags &= amdgpu_cg_mask;
1585 adev->pg_flags &= amdgpu_pg_mask;
1586
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001587 return 0;
1588}
1589
1590static int amdgpu_init(struct amdgpu_device *adev)
1591{
1592 int i, r;
1593
1594 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001595 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001596 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001597 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001598 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001599 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1600 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001601 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001602 }
Alex Deuchera1255102016-10-13 17:41:13 -04001603 adev->ip_blocks[i].status.sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001604 /* need to do gmc hw init early so we can allocate gpu mem */
Alex Deuchera1255102016-10-13 17:41:13 -04001605 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001606 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001607 if (r) {
1608 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001609 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001610 }
Alex Deuchera1255102016-10-13 17:41:13 -04001611 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001612 if (r) {
1613 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001614 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001615 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001616 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001617 if (r) {
1618 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001619 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001620 }
Alex Deuchera1255102016-10-13 17:41:13 -04001621 adev->ip_blocks[i].status.hw = true;
Monk Liu24936642017-01-09 15:54:32 +08001622
1623 /* right after GMC hw init, we create CSA */
1624 if (amdgpu_sriov_vf(adev)) {
1625 r = amdgpu_allocate_static_csa(adev);
1626 if (r) {
1627 DRM_ERROR("allocate CSA failed %d\n", r);
1628 return r;
1629 }
1630 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001631 }
1632 }
1633
1634 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001635 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001636 continue;
1637 /* gmc hw init is done early */
Alex Deuchera1255102016-10-13 17:41:13 -04001638 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001639 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001640 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001641 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001642 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1643 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001644 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001645 }
Alex Deuchera1255102016-10-13 17:41:13 -04001646 adev->ip_blocks[i].status.hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001647 }
1648
1649 return 0;
1650}
1651
1652static int amdgpu_late_init(struct amdgpu_device *adev)
1653{
1654 int i = 0, r;
1655
1656 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001657 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001658 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001659 if (adev->ip_blocks[i].version->funcs->late_init) {
1660 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001661 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001662 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1663 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001664 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001665 }
Alex Deuchera1255102016-10-13 17:41:13 -04001666 adev->ip_blocks[i].status.late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001667 }
Alex Deucher4a446d52016-10-07 14:48:18 -04001668 /* skip CG for VCE/UVD, it's handled specially */
Alex Deuchera1255102016-10-13 17:41:13 -04001669 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1670 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
Alex Deucher4a446d52016-10-07 14:48:18 -04001671 /* enable clockgating to save power */
Alex Deuchera1255102016-10-13 17:41:13 -04001672 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1673 AMD_CG_STATE_GATE);
Alex Deucher4a446d52016-10-07 14:48:18 -04001674 if (r) {
1675 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001676 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher4a446d52016-10-07 14:48:18 -04001677 return r;
1678 }
Arindam Nathb0b00ff2016-10-07 19:01:37 +05301679 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001680 }
1681
1682 return 0;
1683}
1684
1685static int amdgpu_fini(struct amdgpu_device *adev)
1686{
1687 int i, r;
1688
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001689 /* need to disable SMC first */
1690 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001691 if (!adev->ip_blocks[i].status.hw)
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001692 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001693 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001694 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
Alex Deuchera1255102016-10-13 17:41:13 -04001695 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1696 AMD_CG_STATE_UNGATE);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001697 if (r) {
1698 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001699 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001700 return r;
1701 }
Alex Deuchera1255102016-10-13 17:41:13 -04001702 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001703 /* XXX handle errors */
1704 if (r) {
1705 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001706 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001707 }
Alex Deuchera1255102016-10-13 17:41:13 -04001708 adev->ip_blocks[i].status.hw = false;
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001709 break;
1710 }
1711 }
1712
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001713 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001714 if (!adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001715 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001716 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001717 amdgpu_wb_fini(adev);
1718 amdgpu_vram_scratch_fini(adev);
1719 }
Rex Zhu8201a672016-11-24 21:44:44 +08001720
1721 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1722 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1723 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1724 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1725 AMD_CG_STATE_UNGATE);
1726 if (r) {
1727 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1728 adev->ip_blocks[i].version->funcs->name, r);
1729 return r;
1730 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001731 }
Rex Zhu8201a672016-11-24 21:44:44 +08001732
Alex Deuchera1255102016-10-13 17:41:13 -04001733 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001734 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001735 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001736 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1737 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001738 }
Rex Zhu8201a672016-11-24 21:44:44 +08001739
Alex Deuchera1255102016-10-13 17:41:13 -04001740 adev->ip_blocks[i].status.hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001741 }
1742
1743 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001744 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001745 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001746 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001747 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001748 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001749 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1750 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001751 }
Alex Deuchera1255102016-10-13 17:41:13 -04001752 adev->ip_blocks[i].status.sw = false;
1753 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001754 }
1755
Monk Liua6dcfd92016-05-19 14:36:34 +08001756 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001757 if (!adev->ip_blocks[i].status.late_initialized)
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001758 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001759 if (adev->ip_blocks[i].version->funcs->late_fini)
1760 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1761 adev->ip_blocks[i].status.late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001762 }
1763
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001764 if (amdgpu_sriov_vf(adev)) {
Monk Liu24936642017-01-09 15:54:32 +08001765 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001766 amdgpu_virt_release_full_gpu(adev, false);
1767 }
Monk Liu24936642017-01-09 15:54:32 +08001768
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001769 return 0;
1770}
1771
Alex Deucherfaefba92016-12-06 10:38:29 -05001772int amdgpu_suspend(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001773{
1774 int i, r;
1775
Xiangliang Yue941ea92017-01-18 12:47:55 +08001776 if (amdgpu_sriov_vf(adev))
1777 amdgpu_virt_request_full_gpu(adev, false);
1778
Flora Cuic5a93a22016-02-26 10:45:25 +08001779 /* ungate SMC block first */
1780 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1781 AMD_CG_STATE_UNGATE);
1782 if (r) {
1783 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1784 }
1785
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001786 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001787 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001788 continue;
1789 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001790 if (i != AMD_IP_BLOCK_TYPE_SMC) {
Alex Deuchera1255102016-10-13 17:41:13 -04001791 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1792 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001793 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001794 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1795 adev->ip_blocks[i].version->funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001796 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001797 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001798 /* XXX handle errors */
Alex Deuchera1255102016-10-13 17:41:13 -04001799 r = adev->ip_blocks[i].version->funcs->suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001800 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001801 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001802 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1803 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001804 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001805 }
1806
Xiangliang Yue941ea92017-01-18 12:47:55 +08001807 if (amdgpu_sriov_vf(adev))
1808 amdgpu_virt_release_full_gpu(adev, false);
1809
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001810 return 0;
1811}
1812
Monk Liue4f0fdc2017-02-09 11:55:49 +08001813static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001814{
1815 int i, r;
1816
Monk Liu2cb681b2017-04-26 12:00:49 +08001817 static enum amd_ip_block_type ip_order[] = {
1818 AMD_IP_BLOCK_TYPE_GMC,
1819 AMD_IP_BLOCK_TYPE_COMMON,
1820 AMD_IP_BLOCK_TYPE_GFXHUB,
1821 AMD_IP_BLOCK_TYPE_MMHUB,
1822 AMD_IP_BLOCK_TYPE_IH,
1823 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001824
Monk Liu2cb681b2017-04-26 12:00:49 +08001825 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1826 int j;
1827 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001828
Monk Liu2cb681b2017-04-26 12:00:49 +08001829 for (j = 0; j < adev->num_ip_blocks; j++) {
1830 block = &adev->ip_blocks[j];
1831
1832 if (block->version->type != ip_order[i] ||
1833 !block->status.valid)
1834 continue;
1835
1836 r = block->version->funcs->hw_init(adev);
1837 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001838 }
1839 }
1840
1841 return 0;
1842}
1843
Monk Liue4f0fdc2017-02-09 11:55:49 +08001844static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001845{
1846 int i, r;
1847
Monk Liu2cb681b2017-04-26 12:00:49 +08001848 static enum amd_ip_block_type ip_order[] = {
1849 AMD_IP_BLOCK_TYPE_SMC,
1850 AMD_IP_BLOCK_TYPE_DCE,
1851 AMD_IP_BLOCK_TYPE_GFX,
1852 AMD_IP_BLOCK_TYPE_SDMA,
1853 AMD_IP_BLOCK_TYPE_VCE,
1854 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001855
Monk Liu2cb681b2017-04-26 12:00:49 +08001856 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1857 int j;
1858 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001859
Monk Liu2cb681b2017-04-26 12:00:49 +08001860 for (j = 0; j < adev->num_ip_blocks; j++) {
1861 block = &adev->ip_blocks[j];
1862
1863 if (block->version->type != ip_order[i] ||
1864 !block->status.valid)
1865 continue;
1866
1867 r = block->version->funcs->hw_init(adev);
1868 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001869 }
1870 }
1871
1872 return 0;
1873}
1874
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001875static int amdgpu_resume(struct amdgpu_device *adev)
1876{
1877 int i, r;
1878
1879 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001880 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001881 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001882 r = adev->ip_blocks[i].version->funcs->resume(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001883 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001884 DRM_ERROR("resume of IP block <%s> failed %d\n",
1885 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001886 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001887 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001888 }
1889
1890 return 0;
1891}
1892
Monk Liu4e99a442016-03-31 13:26:59 +08001893static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04001894{
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001895 if (adev->is_atom_fw) {
1896 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1897 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1898 } else {
1899 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1900 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1901 }
Andres Rodriguez048765a2016-06-11 02:51:32 -04001902}
1903
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001904/**
1905 * amdgpu_device_init - initialize the driver
1906 *
1907 * @adev: amdgpu_device pointer
1908 * @pdev: drm dev pointer
1909 * @pdev: pci dev pointer
1910 * @flags: driver flags
1911 *
1912 * Initializes the driver info and hw (all asics).
1913 * Returns 0 for success or an error on failure.
1914 * Called at driver startup.
1915 */
1916int amdgpu_device_init(struct amdgpu_device *adev,
1917 struct drm_device *ddev,
1918 struct pci_dev *pdev,
1919 uint32_t flags)
1920{
1921 int r, i;
1922 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02001923 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001924
1925 adev->shutdown = false;
1926 adev->dev = &pdev->dev;
1927 adev->ddev = ddev;
1928 adev->pdev = pdev;
1929 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08001930 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001931 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1932 adev->mc.gtt_size = 512 * 1024 * 1024;
1933 adev->accel_working = false;
1934 adev->num_rings = 0;
1935 adev->mman.buffer_funcs = NULL;
1936 adev->mman.buffer_funcs_ring = NULL;
1937 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01001938 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001939 adev->gart.gart_funcs = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001940 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001941
1942 adev->smc_rreg = &amdgpu_invalid_rreg;
1943 adev->smc_wreg = &amdgpu_invalid_wreg;
1944 adev->pcie_rreg = &amdgpu_invalid_rreg;
1945 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08001946 adev->pciep_rreg = &amdgpu_invalid_rreg;
1947 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001948 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1949 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1950 adev->didt_rreg = &amdgpu_invalid_rreg;
1951 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08001952 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1953 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001954 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1955 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1956
Rex Zhuccdbb202016-06-08 12:47:41 +08001957
Alex Deucher3e39ab92015-06-05 15:04:33 -04001958 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1959 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1960 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001961
1962 /* mutex initialization are all done here so we
1963 * can recall function without having locking issues */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001964 atomic_set(&adev->irq.ih.lock, 0);
Huang Rui0e5ca0d2017-03-03 18:37:23 -05001965 mutex_init(&adev->firmware.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001966 mutex_init(&adev->pm.mutex);
1967 mutex_init(&adev->gfx.gpu_clock_mutex);
1968 mutex_init(&adev->srbm_mutex);
1969 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001970 mutex_init(&adev->mn_lock);
1971 hash_init(adev->mn_hash);
1972
1973 amdgpu_check_arguments(adev);
1974
1975 /* Registers mapping */
1976 /* TODO: block userspace mapping of io register */
1977 spin_lock_init(&adev->mmio_idx_lock);
1978 spin_lock_init(&adev->smc_idx_lock);
1979 spin_lock_init(&adev->pcie_idx_lock);
1980 spin_lock_init(&adev->uvd_ctx_idx_lock);
1981 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08001982 spin_lock_init(&adev->gc_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001983 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02001984 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001985
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08001986 INIT_LIST_HEAD(&adev->shadow_list);
1987 mutex_init(&adev->shadow_list_lock);
1988
Chunming Zhou5c1354b2016-08-30 16:13:10 +08001989 INIT_LIST_HEAD(&adev->gtt_list);
1990 spin_lock_init(&adev->gtt_list_lock);
1991
Ken Wangda69c1612016-01-21 19:08:55 +08001992 if (adev->asic_type >= CHIP_BONAIRE) {
1993 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1994 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1995 } else {
1996 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1997 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1998 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08001999
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002000 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2001 if (adev->rmmio == NULL) {
2002 return -ENOMEM;
2003 }
2004 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2005 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2006
Ken Wangda69c1612016-01-21 19:08:55 +08002007 if (adev->asic_type >= CHIP_BONAIRE)
2008 /* doorbell bar mapping */
2009 amdgpu_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002010
2011 /* io port mapping */
2012 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2013 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2014 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2015 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2016 break;
2017 }
2018 }
2019 if (adev->rio_mem == NULL)
Amber Linb64a18c2017-01-04 08:06:58 -05002020 DRM_INFO("PCI I/O BAR is not found.\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002021
2022 /* early init functions */
2023 r = amdgpu_early_init(adev);
2024 if (r)
2025 return r;
2026
2027 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2028 /* this will fail for cards that aren't VGA class devices, just
2029 * ignore it */
2030 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2031
2032 if (amdgpu_runtime_pm == 1)
2033 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04002034 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002035 runtime = true;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002036 if (!pci_is_thunderbolt_attached(adev->pdev))
2037 vga_switcheroo_register_client(adev->pdev,
2038 &amdgpu_switcheroo_ops, runtime);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002039 if (runtime)
2040 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2041
2042 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04002043 if (!amdgpu_get_bios(adev)) {
2044 r = -EINVAL;
2045 goto failed;
2046 }
Nils Wallméniusf7e9e9f2016-12-14 21:52:45 +01002047
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002048 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002049 if (r) {
2050 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002051 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002052 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002053
Monk Liu4e99a442016-03-31 13:26:59 +08002054 /* detect if we are with an SRIOV vbios */
2055 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04002056
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002057 /* Post card if necessary */
Monk Liubec86372016-09-14 19:38:08 +08002058 if (amdgpu_vpost_needed(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002059 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08002060 dev_err(adev->dev, "no vBIOS found\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002061 r = -EINVAL;
2062 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002063 }
Monk Liubec86372016-09-14 19:38:08 +08002064 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08002065 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2066 if (r) {
2067 dev_err(adev->dev, "gpu post error!\n");
2068 goto failed;
2069 }
2070 } else {
2071 DRM_INFO("GPU post is not needed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002072 }
2073
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002074 if (!adev->is_atom_fw) {
2075 /* Initialize clocks */
2076 r = amdgpu_atombios_get_clock_info(adev);
2077 if (r) {
2078 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2079 return r;
2080 }
2081 /* init i2c buses */
2082 amdgpu_atombios_i2c_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002083 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002084
2085 /* Fence driver */
2086 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002087 if (r) {
2088 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002089 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002090 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002091
2092 /* init the mode config */
2093 drm_mode_config_init(adev->ddev);
2094
2095 r = amdgpu_init(adev);
2096 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05002097 dev_err(adev->dev, "amdgpu_init failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002098 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002099 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002100 }
2101
2102 adev->accel_working = true;
2103
Marek Olšák95844d22016-08-17 23:49:27 +02002104 /* Initialize the buffer migration limit. */
2105 if (amdgpu_moverate >= 0)
2106 max_MBps = amdgpu_moverate;
2107 else
2108 max_MBps = 8; /* Allow 8 MB/s. */
2109 /* Get a log2 for easy divisions. */
2110 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2111
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002112 r = amdgpu_ib_pool_init(adev);
2113 if (r) {
2114 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002115 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002116 }
2117
2118 r = amdgpu_ib_ring_tests(adev);
2119 if (r)
2120 DRM_ERROR("ib ring test failed (%d).\n", r);
2121
Monk Liu9bc92b92017-02-08 17:38:13 +08002122 amdgpu_fbdev_init(adev);
2123
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002124 r = amdgpu_gem_debugfs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002125 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002126 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002127
2128 r = amdgpu_debugfs_regs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002129 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002130 DRM_ERROR("registering register debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002131
Huang Rui50ab2532016-06-12 15:51:09 +08002132 r = amdgpu_debugfs_firmware_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002133 if (r)
Huang Rui50ab2532016-06-12 15:51:09 +08002134 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
Huang Rui50ab2532016-06-12 15:51:09 +08002135
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002136 if ((amdgpu_testing & 1)) {
2137 if (adev->accel_working)
2138 amdgpu_test_moves(adev);
2139 else
2140 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2141 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002142 if (amdgpu_benchmarking) {
2143 if (adev->accel_working)
2144 amdgpu_benchmark(adev, amdgpu_benchmarking);
2145 else
2146 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2147 }
2148
2149 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2150 * explicit gating rather than handling it automatically.
2151 */
2152 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002153 if (r) {
2154 dev_err(adev->dev, "amdgpu_late_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002155 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002156 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002157
2158 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04002159
2160failed:
2161 if (runtime)
2162 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2163 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002164}
2165
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002166/**
2167 * amdgpu_device_fini - tear down the driver
2168 *
2169 * @adev: amdgpu_device pointer
2170 *
2171 * Tear down the driver info (all asics).
2172 * Called at driver shutdown.
2173 */
2174void amdgpu_device_fini(struct amdgpu_device *adev)
2175{
2176 int r;
2177
2178 DRM_INFO("amdgpu: finishing device.\n");
2179 adev->shutdown = true;
Pixel Dingdb2c2a92017-04-25 16:47:42 +08002180 if (adev->mode_info.mode_config_initialized)
2181 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002182 /* evict vram memory */
2183 amdgpu_bo_evict_vram(adev);
2184 amdgpu_ib_pool_fini(adev);
2185 amdgpu_fence_driver_fini(adev);
2186 amdgpu_fbdev_fini(adev);
2187 r = amdgpu_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002188 adev->accel_working = false;
2189 /* free i2c buses */
2190 amdgpu_i2c_fini(adev);
2191 amdgpu_atombios_fini(adev);
2192 kfree(adev->bios);
2193 adev->bios = NULL;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002194 if (!pci_is_thunderbolt_attached(adev->pdev))
2195 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002196 if (adev->flags & AMD_IS_PX)
2197 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002198 vga_client_register(adev->pdev, NULL, NULL, NULL);
2199 if (adev->rio_mem)
2200 pci_iounmap(adev->pdev, adev->rio_mem);
2201 adev->rio_mem = NULL;
2202 iounmap(adev->rmmio);
2203 adev->rmmio = NULL;
Ken Wangda69c1612016-01-21 19:08:55 +08002204 if (adev->asic_type >= CHIP_BONAIRE)
2205 amdgpu_doorbell_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002206 amdgpu_debugfs_regs_cleanup(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002207}
2208
2209
2210/*
2211 * Suspend & resume.
2212 */
2213/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002214 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002215 *
2216 * @pdev: drm dev pointer
2217 * @state: suspend state
2218 *
2219 * Puts the hw in the suspend state (all asics).
2220 * Returns 0 for success or an error on failure.
2221 * Called at driver suspend.
2222 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002223int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002224{
2225 struct amdgpu_device *adev;
2226 struct drm_crtc *crtc;
2227 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002228 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002229
2230 if (dev == NULL || dev->dev_private == NULL) {
2231 return -ENODEV;
2232 }
2233
2234 adev = dev->dev_private;
2235
2236 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2237 return 0;
2238
2239 drm_kms_helper_poll_disable(dev);
2240
2241 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002242 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002243 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2244 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2245 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002246 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002247
Alex Deucher756e6882015-10-08 00:03:36 -04002248 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002249 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04002250 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002251 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2252 struct amdgpu_bo *robj;
2253
Alex Deucher756e6882015-10-08 00:03:36 -04002254 if (amdgpu_crtc->cursor_bo) {
2255 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002256 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002257 if (r == 0) {
2258 amdgpu_bo_unpin(aobj);
2259 amdgpu_bo_unreserve(aobj);
2260 }
2261 }
2262
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002263 if (rfb == NULL || rfb->obj == NULL) {
2264 continue;
2265 }
2266 robj = gem_to_amdgpu_bo(rfb->obj);
2267 /* don't unpin kernel fb objects */
2268 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
Alex Xie7a6901d2017-04-24 13:52:41 -04002269 r = amdgpu_bo_reserve(robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002270 if (r == 0) {
2271 amdgpu_bo_unpin(robj);
2272 amdgpu_bo_unreserve(robj);
2273 }
2274 }
2275 }
2276 /* evict vram memory */
2277 amdgpu_bo_evict_vram(adev);
2278
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002279 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002280
2281 r = amdgpu_suspend(adev);
2282
Alex Deuchera0a71e42016-10-10 12:41:36 -04002283 /* evict remaining vram memory
2284 * This second call to evict vram is to evict the gart page table
2285 * using the CPU.
2286 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002287 amdgpu_bo_evict_vram(adev);
2288
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002289 if (adev->is_atom_fw)
2290 amdgpu_atomfirmware_scratch_regs_save(adev);
2291 else
2292 amdgpu_atombios_scratch_regs_save(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002293 pci_save_state(dev->pdev);
2294 if (suspend) {
2295 /* Shut down the device */
2296 pci_disable_device(dev->pdev);
2297 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08002298 } else {
2299 r = amdgpu_asic_reset(adev);
2300 if (r)
2301 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002302 }
2303
2304 if (fbcon) {
2305 console_lock();
2306 amdgpu_fbdev_set_suspend(adev, 1);
2307 console_unlock();
2308 }
2309 return 0;
2310}
2311
2312/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002313 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002314 *
2315 * @pdev: drm dev pointer
2316 *
2317 * Bring the hw back to operating state (all asics).
2318 * Returns 0 for success or an error on failure.
2319 * Called at driver resume.
2320 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002321int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002322{
2323 struct drm_connector *connector;
2324 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04002325 struct drm_crtc *crtc;
Huang Rui03161a62017-04-13 16:12:26 +08002326 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002327
2328 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2329 return 0;
2330
jimqu74b0b152016-09-07 17:09:12 +08002331 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002332 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08002333
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002334 if (resume) {
2335 pci_set_power_state(dev->pdev, PCI_D0);
2336 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08002337 r = pci_enable_device(dev->pdev);
Huang Rui03161a62017-04-13 16:12:26 +08002338 if (r)
2339 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002340 }
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002341 if (adev->is_atom_fw)
2342 amdgpu_atomfirmware_scratch_regs_restore(adev);
2343 else
2344 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002345
2346 /* post card */
Jim Quc836fec2017-02-10 15:59:59 +08002347 if (amdgpu_need_post(adev)) {
jimqu74b0b152016-09-07 17:09:12 +08002348 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2349 if (r)
2350 DRM_ERROR("amdgpu asic init failed\n");
2351 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002352
2353 r = amdgpu_resume(adev);
Rex Zhue6707212017-03-30 13:21:01 +08002354 if (r) {
Flora Cuica198522016-02-04 15:10:08 +08002355 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Huang Rui03161a62017-04-13 16:12:26 +08002356 goto unlock;
Rex Zhue6707212017-03-30 13:21:01 +08002357 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002358 amdgpu_fence_driver_resume(adev);
2359
Flora Cuica198522016-02-04 15:10:08 +08002360 if (resume) {
2361 r = amdgpu_ib_ring_tests(adev);
2362 if (r)
2363 DRM_ERROR("ib ring test failed (%d).\n", r);
2364 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002365
2366 r = amdgpu_late_init(adev);
Huang Rui03161a62017-04-13 16:12:26 +08002367 if (r)
2368 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002369
Alex Deucher756e6882015-10-08 00:03:36 -04002370 /* pin cursors */
2371 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2372 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2373
2374 if (amdgpu_crtc->cursor_bo) {
2375 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002376 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002377 if (r == 0) {
2378 r = amdgpu_bo_pin(aobj,
2379 AMDGPU_GEM_DOMAIN_VRAM,
2380 &amdgpu_crtc->cursor_addr);
2381 if (r != 0)
2382 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2383 amdgpu_bo_unreserve(aobj);
2384 }
2385 }
2386 }
2387
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002388 /* blat the mode back in */
2389 if (fbcon) {
2390 drm_helper_resume_force_mode(dev);
2391 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002392 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002393 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2394 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2395 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002396 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002397 }
2398
2399 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002400
2401 /*
2402 * Most of the connector probing functions try to acquire runtime pm
2403 * refs to ensure that the GPU is powered on when connector polling is
2404 * performed. Since we're calling this from a runtime PM callback,
2405 * trying to acquire rpm refs will cause us to deadlock.
2406 *
2407 * Since we're guaranteed to be holding the rpm lock, it's safe to
2408 * temporarily disable the rpm helpers so this doesn't deadlock us.
2409 */
2410#ifdef CONFIG_PM
2411 dev->dev->power.disable_depth++;
2412#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05002413 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002414#ifdef CONFIG_PM
2415 dev->dev->power.disable_depth--;
2416#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002417
Huang Rui03161a62017-04-13 16:12:26 +08002418 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002419 amdgpu_fbdev_set_suspend(adev, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002420
Huang Rui03161a62017-04-13 16:12:26 +08002421unlock:
2422 if (fbcon)
2423 console_unlock();
2424
2425 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002426}
2427
Chunming Zhou63fbf422016-07-15 11:19:20 +08002428static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2429{
2430 int i;
2431 bool asic_hang = false;
2432
2433 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002434 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002435 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002436 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2437 adev->ip_blocks[i].status.hang =
2438 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2439 if (adev->ip_blocks[i].status.hang) {
2440 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
Chunming Zhou63fbf422016-07-15 11:19:20 +08002441 asic_hang = true;
2442 }
2443 }
2444 return asic_hang;
2445}
2446
Baoyou Xie4d446652016-09-18 22:09:35 +08002447static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002448{
2449 int i, r = 0;
2450
2451 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002452 if (!adev->ip_blocks[i].status.valid)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002453 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002454 if (adev->ip_blocks[i].status.hang &&
2455 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2456 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
Chunming Zhoud31a5012016-07-18 10:04:34 +08002457 if (r)
2458 return r;
2459 }
2460 }
2461
2462 return 0;
2463}
2464
Chunming Zhou35d782f2016-07-15 15:57:13 +08002465static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2466{
Alex Deucherda146d32016-10-13 16:07:03 -04002467 int i;
2468
2469 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002470 if (!adev->ip_blocks[i].status.valid)
Alex Deucherda146d32016-10-13 16:07:03 -04002471 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002472 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2473 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2474 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2475 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2476 if (adev->ip_blocks[i].status.hang) {
Alex Deucherda146d32016-10-13 16:07:03 -04002477 DRM_INFO("Some block need full reset!\n");
2478 return true;
2479 }
2480 }
Chunming Zhou35d782f2016-07-15 15:57:13 +08002481 }
2482 return false;
2483}
2484
2485static int amdgpu_soft_reset(struct amdgpu_device *adev)
2486{
2487 int i, r = 0;
2488
2489 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002490 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002491 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002492 if (adev->ip_blocks[i].status.hang &&
2493 adev->ip_blocks[i].version->funcs->soft_reset) {
2494 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002495 if (r)
2496 return r;
2497 }
2498 }
2499
2500 return 0;
2501}
2502
2503static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2504{
2505 int i, r = 0;
2506
2507 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002508 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002509 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002510 if (adev->ip_blocks[i].status.hang &&
2511 adev->ip_blocks[i].version->funcs->post_soft_reset)
2512 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002513 if (r)
2514 return r;
2515 }
2516
2517 return 0;
2518}
2519
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002520bool amdgpu_need_backup(struct amdgpu_device *adev)
2521{
2522 if (adev->flags & AMD_IS_APU)
2523 return false;
2524
2525 return amdgpu_lockup_timeout > 0 ? true : false;
2526}
2527
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002528static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2529 struct amdgpu_ring *ring,
2530 struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +01002531 struct dma_fence **fence)
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002532{
2533 uint32_t domain;
2534 int r;
2535
Roger.He23d2e502017-04-21 14:24:26 +08002536 if (!bo->shadow)
2537 return 0;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002538
Alex Xie1d284792017-04-24 13:53:04 -04002539 r = amdgpu_bo_reserve(bo, true);
Roger.He23d2e502017-04-21 14:24:26 +08002540 if (r)
2541 return r;
2542 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2543 /* if bo has been evicted, then no need to recover */
2544 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
Roger.He82521312017-04-21 13:08:43 +08002545 r = amdgpu_bo_validate(bo->shadow);
2546 if (r) {
2547 DRM_ERROR("bo validate failed!\n");
2548 goto err;
2549 }
2550
2551 r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
2552 if (r) {
2553 DRM_ERROR("%p bind failed\n", bo->shadow);
2554 goto err;
2555 }
2556
Roger.He23d2e502017-04-21 14:24:26 +08002557 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002558 NULL, fence, true);
Roger.He23d2e502017-04-21 14:24:26 +08002559 if (r) {
2560 DRM_ERROR("recover page table failed!\n");
2561 goto err;
2562 }
2563 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002564err:
Roger.He23d2e502017-04-21 14:24:26 +08002565 amdgpu_bo_unreserve(bo);
2566 return r;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002567}
2568
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002569/**
Monk Liua90ad3c2017-01-23 14:22:08 +08002570 * amdgpu_sriov_gpu_reset - reset the asic
2571 *
2572 * @adev: amdgpu device pointer
2573 * @voluntary: if this reset is requested by guest.
2574 * (true means by guest and false means by HYPERVISOR )
2575 *
2576 * Attempt the reset the GPU if it has hung (all asics).
2577 * for SRIOV case.
2578 * Returns 0 for success or an error on failure.
2579 */
2580int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
2581{
2582 int i, r = 0;
2583 int resched;
2584 struct amdgpu_bo *bo, *tmp;
2585 struct amdgpu_ring *ring;
2586 struct dma_fence *fence = NULL, *next = NULL;
2587
Monk Liu147b5982017-01-25 15:48:01 +08002588 mutex_lock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002589 atomic_inc(&adev->gpu_reset_counter);
Monk Liu1fb37a32017-01-26 15:36:37 +08002590 adev->gfx.in_reset = true;
Monk Liua90ad3c2017-01-23 14:22:08 +08002591
2592 /* block TTM */
2593 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2594
2595 /* block scheduler */
2596 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2597 ring = adev->rings[i];
2598
2599 if (!ring || !ring->sched.thread)
2600 continue;
2601
2602 kthread_park(ring->sched.thread);
2603 amd_sched_hw_job_reset(&ring->sched);
2604 }
2605
2606 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2607 amdgpu_fence_driver_force_completion(adev);
2608
2609 /* request to take full control of GPU before re-initialization */
2610 if (voluntary)
2611 amdgpu_virt_reset_gpu(adev);
2612 else
2613 amdgpu_virt_request_full_gpu(adev, true);
2614
2615
2616 /* Resume IP prior to SMC */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002617 amdgpu_sriov_reinit_early(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002618
2619 /* we need recover gart prior to run SMC/CP/SDMA resume */
2620 amdgpu_ttm_recover_gart(adev);
2621
2622 /* now we are okay to resume SMC/CP/SDMA */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002623 amdgpu_sriov_reinit_late(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002624
2625 amdgpu_irq_gpu_reset_resume_helper(adev);
2626
2627 if (amdgpu_ib_ring_tests(adev))
2628 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2629
2630 /* release full control of GPU after ib test */
2631 amdgpu_virt_release_full_gpu(adev, true);
2632
2633 DRM_INFO("recover vram bo from shadow\n");
2634
2635 ring = adev->mman.buffer_funcs_ring;
2636 mutex_lock(&adev->shadow_list_lock);
2637 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002638 next = NULL;
Monk Liua90ad3c2017-01-23 14:22:08 +08002639 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2640 if (fence) {
2641 r = dma_fence_wait(fence, false);
2642 if (r) {
2643 WARN(r, "recovery from shadow isn't completed\n");
2644 break;
2645 }
2646 }
2647
2648 dma_fence_put(fence);
2649 fence = next;
2650 }
2651 mutex_unlock(&adev->shadow_list_lock);
2652
2653 if (fence) {
2654 r = dma_fence_wait(fence, false);
2655 if (r)
2656 WARN(r, "recovery from shadow isn't completed\n");
2657 }
2658 dma_fence_put(fence);
2659
2660 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2661 struct amdgpu_ring *ring = adev->rings[i];
2662 if (!ring || !ring->sched.thread)
2663 continue;
2664
2665 amd_sched_job_recovery(&ring->sched);
2666 kthread_unpark(ring->sched.thread);
2667 }
2668
2669 drm_helper_resume_force_mode(adev->ddev);
2670 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2671 if (r) {
2672 /* bad news, how to tell it to userspace ? */
2673 dev_info(adev->dev, "GPU reset failed\n");
2674 }
2675
Monk Liu1fb37a32017-01-26 15:36:37 +08002676 adev->gfx.in_reset = false;
Monk Liu147b5982017-01-25 15:48:01 +08002677 mutex_unlock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002678 return r;
2679}
2680
2681/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002682 * amdgpu_gpu_reset - reset the asic
2683 *
2684 * @adev: amdgpu device pointer
2685 *
2686 * Attempt the reset the GPU if it has hung (all asics).
2687 * Returns 0 for success or an error on failure.
2688 */
2689int amdgpu_gpu_reset(struct amdgpu_device *adev)
2690{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002691 int i, r;
2692 int resched;
Chunming Zhou35d782f2016-07-15 15:57:13 +08002693 bool need_full_reset;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002694
Xiangliang Yufb140b22016-12-17 22:48:57 +08002695 if (amdgpu_sriov_vf(adev))
Monk Liua90ad3c2017-01-23 14:22:08 +08002696 return amdgpu_sriov_gpu_reset(adev, true);
Xiangliang Yufb140b22016-12-17 22:48:57 +08002697
Chunming Zhou63fbf422016-07-15 11:19:20 +08002698 if (!amdgpu_check_soft_reset(adev)) {
2699 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2700 return 0;
2701 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002702
Marek Olšákd94aed52015-05-05 21:13:49 +02002703 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002704
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002705 /* block TTM */
2706 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2707
Chunming Zhou0875dc92016-06-12 15:41:58 +08002708 /* block scheduler */
2709 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2710 struct amdgpu_ring *ring = adev->rings[i];
2711
Chunming Zhou51687752017-04-24 17:09:15 +08002712 if (!ring || !ring->sched.thread)
Chunming Zhou0875dc92016-06-12 15:41:58 +08002713 continue;
2714 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002715 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002716 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002717 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2718 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002719
Chunming Zhou35d782f2016-07-15 15:57:13 +08002720 need_full_reset = amdgpu_need_full_reset(adev);
2721
2722 if (!need_full_reset) {
2723 amdgpu_pre_soft_reset(adev);
2724 r = amdgpu_soft_reset(adev);
2725 amdgpu_post_soft_reset(adev);
2726 if (r || amdgpu_check_soft_reset(adev)) {
2727 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2728 need_full_reset = true;
2729 }
2730 }
2731
2732 if (need_full_reset) {
Chunming Zhou35d782f2016-07-15 15:57:13 +08002733 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002734
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002735retry:
Chunming Zhou35d782f2016-07-15 15:57:13 +08002736 /* Disable fb access */
2737 if (adev->mode_info.num_crtc) {
2738 struct amdgpu_mode_mc_save save;
2739 amdgpu_display_stop_mc_access(adev, &save);
2740 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2741 }
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002742 if (adev->is_atom_fw)
2743 amdgpu_atomfirmware_scratch_regs_save(adev);
2744 else
2745 amdgpu_atombios_scratch_regs_save(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002746 r = amdgpu_asic_reset(adev);
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002747 if (adev->is_atom_fw)
2748 amdgpu_atomfirmware_scratch_regs_restore(adev);
2749 else
2750 amdgpu_atombios_scratch_regs_restore(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002751 /* post card */
2752 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002753
Chunming Zhou35d782f2016-07-15 15:57:13 +08002754 if (!r) {
2755 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2756 r = amdgpu_resume(adev);
2757 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002758 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002759 if (!r) {
Chunming Zhoue72cfd52016-07-27 13:15:20 +08002760 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002761 if (need_full_reset && amdgpu_need_backup(adev)) {
2762 r = amdgpu_ttm_recover_gart(adev);
2763 if (r)
2764 DRM_ERROR("gart recovery failed!!!\n");
2765 }
Chunming Zhou1f465082016-06-30 15:02:26 +08002766 r = amdgpu_ib_ring_tests(adev);
2767 if (r) {
2768 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002769 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002770 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002771 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002772 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002773 /**
2774 * recovery vm page tables, since we cannot depend on VRAM is
2775 * consistent after gpu full reset.
2776 */
2777 if (need_full_reset && amdgpu_need_backup(adev)) {
2778 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2779 struct amdgpu_bo *bo, *tmp;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002780 struct dma_fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002781
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002782 DRM_INFO("recover vram bo from shadow\n");
2783 mutex_lock(&adev->shadow_list_lock);
2784 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002785 next = NULL;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002786 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2787 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002788 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002789 if (r) {
Monk Liu1d7b17b2017-01-22 18:52:56 +08002790 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002791 break;
2792 }
2793 }
2794
Chris Wilsonf54d1862016-10-25 13:00:45 +01002795 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002796 fence = next;
2797 }
2798 mutex_unlock(&adev->shadow_list_lock);
2799 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002800 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002801 if (r)
Monk Liu1d7b17b2017-01-22 18:52:56 +08002802 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002803 }
Chris Wilsonf54d1862016-10-25 13:00:45 +01002804 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002805 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002806 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2807 struct amdgpu_ring *ring = adev->rings[i];
Chunming Zhou51687752017-04-24 17:09:15 +08002808
2809 if (!ring || !ring->sched.thread)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002810 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002811
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002812 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002813 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002814 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002815 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08002816 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002817 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou51687752017-04-24 17:09:15 +08002818 if (adev->rings[i] && adev->rings[i]->sched.thread) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08002819 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002820 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002821 }
2822 }
2823
2824 drm_helper_resume_force_mode(adev->ddev);
2825
2826 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2827 if (r) {
2828 /* bad news, how to tell it to userspace ? */
2829 dev_info(adev->dev, "GPU reset failed\n");
2830 }
2831
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002832 return r;
2833}
2834
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002835void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2836{
2837 u32 mask;
2838 int ret;
2839
Alex Deuchercd474ba2016-02-04 10:21:23 -05002840 if (amdgpu_pcie_gen_cap)
2841 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2842
2843 if (amdgpu_pcie_lane_cap)
2844 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2845
2846 /* covers APUs as well */
2847 if (pci_is_root_bus(adev->pdev->bus)) {
2848 if (adev->pm.pcie_gen_mask == 0)
2849 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2850 if (adev->pm.pcie_mlw_mask == 0)
2851 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002852 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002853 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05002854
2855 if (adev->pm.pcie_gen_mask == 0) {
2856 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2857 if (!ret) {
2858 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2859 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2860 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2861
2862 if (mask & DRM_PCIE_SPEED_25)
2863 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2864 if (mask & DRM_PCIE_SPEED_50)
2865 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2866 if (mask & DRM_PCIE_SPEED_80)
2867 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2868 } else {
2869 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2870 }
2871 }
2872 if (adev->pm.pcie_mlw_mask == 0) {
2873 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2874 if (!ret) {
2875 switch (mask) {
2876 case 32:
2877 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2878 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2879 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2880 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2881 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2882 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2883 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2884 break;
2885 case 16:
2886 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2887 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2888 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2889 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2890 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2891 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2892 break;
2893 case 12:
2894 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2895 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2896 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2897 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2898 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2899 break;
2900 case 8:
2901 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2902 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2903 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2904 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2905 break;
2906 case 4:
2907 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2908 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2909 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2910 break;
2911 case 2:
2912 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2913 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2914 break;
2915 case 1:
2916 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2917 break;
2918 default:
2919 break;
2920 }
2921 } else {
2922 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002923 }
2924 }
2925}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002926
2927/*
2928 * Debugfs
2929 */
2930int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04002931 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002932 unsigned nfiles)
2933{
2934 unsigned i;
2935
2936 for (i = 0; i < adev->debugfs_count; i++) {
2937 if (adev->debugfs[i].files == files) {
2938 /* Already registered */
2939 return 0;
2940 }
2941 }
2942
2943 i = adev->debugfs_count + 1;
2944 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2945 DRM_ERROR("Reached maximum number of debugfs components.\n");
2946 DRM_ERROR("Report so we increase "
2947 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2948 return -EINVAL;
2949 }
2950 adev->debugfs[adev->debugfs_count].files = files;
2951 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2952 adev->debugfs_count = i;
2953#if defined(CONFIG_DEBUG_FS)
2954 drm_debugfs_create_files(files, nfiles,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002955 adev->ddev->primary->debugfs_root,
2956 adev->ddev->primary);
2957#endif
2958 return 0;
2959}
2960
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002961#if defined(CONFIG_DEBUG_FS)
2962
2963static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2964 size_t size, loff_t *pos)
2965{
Al Viro45063092016-12-04 18:24:56 -05002966 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002967 ssize_t result = 0;
2968 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04002969 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04002970 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002971
2972 if (size & 0x3 || *pos & 0x3)
2973 return -EINVAL;
2974
Tom St Denisbd122672016-07-28 09:39:22 -04002975 /* are we reading registers for which a PG lock is necessary? */
2976 pm_pg_lock = (*pos >> 23) & 1;
2977
Tom St Denis566281592016-06-27 11:55:07 -04002978 if (*pos & (1ULL << 62)) {
2979 se_bank = (*pos >> 24) & 0x3FF;
2980 sh_bank = (*pos >> 34) & 0x3FF;
2981 instance_bank = (*pos >> 44) & 0x3FF;
Tom St Denis32977f92016-10-09 07:41:26 -04002982
2983 if (se_bank == 0x3FF)
2984 se_bank = 0xFFFFFFFF;
2985 if (sh_bank == 0x3FF)
2986 sh_bank = 0xFFFFFFFF;
2987 if (instance_bank == 0x3FF)
2988 instance_bank = 0xFFFFFFFF;
Tom St Denis566281592016-06-27 11:55:07 -04002989 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04002990 } else {
2991 use_bank = 0;
2992 }
2993
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04002994 *pos &= (1UL << 22) - 1;
Tom St Denisbd122672016-07-28 09:39:22 -04002995
Tom St Denis566281592016-06-27 11:55:07 -04002996 if (use_bank) {
Tom St Denis32977f92016-10-09 07:41:26 -04002997 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2998 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
Tom St Denis566281592016-06-27 11:55:07 -04002999 return -EINVAL;
3000 mutex_lock(&adev->grbm_idx_mutex);
3001 amdgpu_gfx_select_se_sh(adev, se_bank,
3002 sh_bank, instance_bank);
3003 }
3004
Tom St Denisbd122672016-07-28 09:39:22 -04003005 if (pm_pg_lock)
3006 mutex_lock(&adev->pm.mutex);
3007
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003008 while (size) {
3009 uint32_t value;
3010
3011 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04003012 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003013
3014 value = RREG32(*pos >> 2);
3015 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04003016 if (r) {
3017 result = r;
3018 goto end;
3019 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003020
3021 result += 4;
3022 buf += 4;
3023 *pos += 4;
3024 size -= 4;
3025 }
3026
Tom St Denis566281592016-06-27 11:55:07 -04003027end:
3028 if (use_bank) {
3029 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3030 mutex_unlock(&adev->grbm_idx_mutex);
3031 }
3032
Tom St Denisbd122672016-07-28 09:39:22 -04003033 if (pm_pg_lock)
3034 mutex_unlock(&adev->pm.mutex);
3035
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003036 return result;
3037}
3038
3039static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3040 size_t size, loff_t *pos)
3041{
Al Viro45063092016-12-04 18:24:56 -05003042 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003043 ssize_t result = 0;
3044 int r;
Tom St Denis394fdde2016-10-10 07:31:23 -04003045 bool pm_pg_lock, use_bank;
3046 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003047
3048 if (size & 0x3 || *pos & 0x3)
3049 return -EINVAL;
3050
Tom St Denis394fdde2016-10-10 07:31:23 -04003051 /* are we reading registers for which a PG lock is necessary? */
3052 pm_pg_lock = (*pos >> 23) & 1;
3053
3054 if (*pos & (1ULL << 62)) {
3055 se_bank = (*pos >> 24) & 0x3FF;
3056 sh_bank = (*pos >> 34) & 0x3FF;
3057 instance_bank = (*pos >> 44) & 0x3FF;
3058
3059 if (se_bank == 0x3FF)
3060 se_bank = 0xFFFFFFFF;
3061 if (sh_bank == 0x3FF)
3062 sh_bank = 0xFFFFFFFF;
3063 if (instance_bank == 0x3FF)
3064 instance_bank = 0xFFFFFFFF;
3065 use_bank = 1;
3066 } else {
3067 use_bank = 0;
3068 }
3069
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003070 *pos &= (1UL << 22) - 1;
Tom St Denis394fdde2016-10-10 07:31:23 -04003071
3072 if (use_bank) {
3073 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3074 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3075 return -EINVAL;
3076 mutex_lock(&adev->grbm_idx_mutex);
3077 amdgpu_gfx_select_se_sh(adev, se_bank,
3078 sh_bank, instance_bank);
3079 }
3080
3081 if (pm_pg_lock)
3082 mutex_lock(&adev->pm.mutex);
3083
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003084 while (size) {
3085 uint32_t value;
3086
3087 if (*pos > adev->rmmio_size)
3088 return result;
3089
3090 r = get_user(value, (uint32_t *)buf);
3091 if (r)
3092 return r;
3093
3094 WREG32(*pos >> 2, value);
3095
3096 result += 4;
3097 buf += 4;
3098 *pos += 4;
3099 size -= 4;
3100 }
3101
Tom St Denis394fdde2016-10-10 07:31:23 -04003102 if (use_bank) {
3103 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3104 mutex_unlock(&adev->grbm_idx_mutex);
3105 }
3106
3107 if (pm_pg_lock)
3108 mutex_unlock(&adev->pm.mutex);
3109
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003110 return result;
3111}
3112
Tom St Denisadcec282016-04-15 13:08:44 -04003113static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3114 size_t size, loff_t *pos)
3115{
Al Viro45063092016-12-04 18:24:56 -05003116 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003117 ssize_t result = 0;
3118 int r;
3119
3120 if (size & 0x3 || *pos & 0x3)
3121 return -EINVAL;
3122
3123 while (size) {
3124 uint32_t value;
3125
3126 value = RREG32_PCIE(*pos >> 2);
3127 r = put_user(value, (uint32_t *)buf);
3128 if (r)
3129 return r;
3130
3131 result += 4;
3132 buf += 4;
3133 *pos += 4;
3134 size -= 4;
3135 }
3136
3137 return result;
3138}
3139
3140static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3141 size_t size, loff_t *pos)
3142{
Al Viro45063092016-12-04 18:24:56 -05003143 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003144 ssize_t result = 0;
3145 int r;
3146
3147 if (size & 0x3 || *pos & 0x3)
3148 return -EINVAL;
3149
3150 while (size) {
3151 uint32_t value;
3152
3153 r = get_user(value, (uint32_t *)buf);
3154 if (r)
3155 return r;
3156
3157 WREG32_PCIE(*pos >> 2, value);
3158
3159 result += 4;
3160 buf += 4;
3161 *pos += 4;
3162 size -= 4;
3163 }
3164
3165 return result;
3166}
3167
3168static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3169 size_t size, loff_t *pos)
3170{
Al Viro45063092016-12-04 18:24:56 -05003171 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003172 ssize_t result = 0;
3173 int r;
3174
3175 if (size & 0x3 || *pos & 0x3)
3176 return -EINVAL;
3177
3178 while (size) {
3179 uint32_t value;
3180
3181 value = RREG32_DIDT(*pos >> 2);
3182 r = put_user(value, (uint32_t *)buf);
3183 if (r)
3184 return r;
3185
3186 result += 4;
3187 buf += 4;
3188 *pos += 4;
3189 size -= 4;
3190 }
3191
3192 return result;
3193}
3194
3195static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3196 size_t size, loff_t *pos)
3197{
Al Viro45063092016-12-04 18:24:56 -05003198 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003199 ssize_t result = 0;
3200 int r;
3201
3202 if (size & 0x3 || *pos & 0x3)
3203 return -EINVAL;
3204
3205 while (size) {
3206 uint32_t value;
3207
3208 r = get_user(value, (uint32_t *)buf);
3209 if (r)
3210 return r;
3211
3212 WREG32_DIDT(*pos >> 2, value);
3213
3214 result += 4;
3215 buf += 4;
3216 *pos += 4;
3217 size -= 4;
3218 }
3219
3220 return result;
3221}
3222
3223static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3224 size_t size, loff_t *pos)
3225{
Al Viro45063092016-12-04 18:24:56 -05003226 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003227 ssize_t result = 0;
3228 int r;
3229
3230 if (size & 0x3 || *pos & 0x3)
3231 return -EINVAL;
3232
3233 while (size) {
3234 uint32_t value;
3235
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003236 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04003237 r = put_user(value, (uint32_t *)buf);
3238 if (r)
3239 return r;
3240
3241 result += 4;
3242 buf += 4;
3243 *pos += 4;
3244 size -= 4;
3245 }
3246
3247 return result;
3248}
3249
3250static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3251 size_t size, loff_t *pos)
3252{
Al Viro45063092016-12-04 18:24:56 -05003253 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003254 ssize_t result = 0;
3255 int r;
3256
3257 if (size & 0x3 || *pos & 0x3)
3258 return -EINVAL;
3259
3260 while (size) {
3261 uint32_t value;
3262
3263 r = get_user(value, (uint32_t *)buf);
3264 if (r)
3265 return r;
3266
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003267 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04003268
3269 result += 4;
3270 buf += 4;
3271 *pos += 4;
3272 size -= 4;
3273 }
3274
3275 return result;
3276}
3277
Tom St Denis1e051412016-06-27 09:57:18 -04003278static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3279 size_t size, loff_t *pos)
3280{
Al Viro45063092016-12-04 18:24:56 -05003281 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis1e051412016-06-27 09:57:18 -04003282 ssize_t result = 0;
3283 int r;
3284 uint32_t *config, no_regs = 0;
3285
3286 if (size & 0x3 || *pos & 0x3)
3287 return -EINVAL;
3288
Markus Elfringecab7662016-09-18 17:00:52 +02003289 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
Tom St Denis1e051412016-06-27 09:57:18 -04003290 if (!config)
3291 return -ENOMEM;
3292
3293 /* version, increment each time something is added */
Tom St Denis9a999352017-01-18 13:01:25 -05003294 config[no_regs++] = 3;
Tom St Denis1e051412016-06-27 09:57:18 -04003295 config[no_regs++] = adev->gfx.config.max_shader_engines;
3296 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3297 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3298 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3299 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3300 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3301 config[no_regs++] = adev->gfx.config.max_gprs;
3302 config[no_regs++] = adev->gfx.config.max_gs_threads;
3303 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3304 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3305 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3306 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3307 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3308 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3309 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3310 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3311 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3312 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3313 config[no_regs++] = adev->gfx.config.num_gpus;
3314 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3315 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3316 config[no_regs++] = adev->gfx.config.gb_addr_config;
3317 config[no_regs++] = adev->gfx.config.num_rbs;
3318
Tom St Denis89a8f302016-08-12 15:14:31 -04003319 /* rev==1 */
3320 config[no_regs++] = adev->rev_id;
3321 config[no_regs++] = adev->pg_flags;
3322 config[no_regs++] = adev->cg_flags;
3323
Tom St Denise9f11dc2016-08-17 12:00:51 -04003324 /* rev==2 */
3325 config[no_regs++] = adev->family;
3326 config[no_regs++] = adev->external_rev_id;
3327
Tom St Denis9a999352017-01-18 13:01:25 -05003328 /* rev==3 */
3329 config[no_regs++] = adev->pdev->device;
3330 config[no_regs++] = adev->pdev->revision;
3331 config[no_regs++] = adev->pdev->subsystem_device;
3332 config[no_regs++] = adev->pdev->subsystem_vendor;
3333
Tom St Denis1e051412016-06-27 09:57:18 -04003334 while (size && (*pos < no_regs * 4)) {
3335 uint32_t value;
3336
3337 value = config[*pos >> 2];
3338 r = put_user(value, (uint32_t *)buf);
3339 if (r) {
3340 kfree(config);
3341 return r;
3342 }
3343
3344 result += 4;
3345 buf += 4;
3346 *pos += 4;
3347 size -= 4;
3348 }
3349
3350 kfree(config);
3351 return result;
3352}
3353
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003354static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3355 size_t size, loff_t *pos)
3356{
Al Viro45063092016-12-04 18:24:56 -05003357 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003358 int idx, x, outsize, r, valuesize;
3359 uint32_t values[16];
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003360
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003361 if (size & 3 || *pos & 0x3)
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003362 return -EINVAL;
3363
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003364 if (amdgpu_dpm == 0)
3365 return -EINVAL;
3366
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003367 /* convert offset to sensor number */
3368 idx = *pos >> 2;
3369
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003370 valuesize = sizeof(values);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003371 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003372 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003373 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3374 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3375 &valuesize);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003376 else
3377 return -EINVAL;
3378
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003379 if (size > valuesize)
3380 return -EINVAL;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003381
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003382 outsize = 0;
3383 x = 0;
3384 if (!r) {
3385 while (size) {
3386 r = put_user(values[x++], (int32_t *)buf);
3387 buf += 4;
3388 size -= 4;
3389 outsize += 4;
3390 }
3391 }
3392
3393 return !r ? outsize : r;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003394}
Tom St Denis1e051412016-06-27 09:57:18 -04003395
Tom St Denis273d7aa2016-10-11 14:48:55 -04003396static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3397 size_t size, loff_t *pos)
3398{
3399 struct amdgpu_device *adev = f->f_inode->i_private;
3400 int r, x;
3401 ssize_t result=0;
Tom St Denis472259f2016-10-14 09:49:09 -04003402 uint32_t offset, se, sh, cu, wave, simd, data[32];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003403
3404 if (size & 3 || *pos & 3)
3405 return -EINVAL;
3406
3407 /* decode offset */
3408 offset = (*pos & 0x7F);
3409 se = ((*pos >> 7) & 0xFF);
3410 sh = ((*pos >> 15) & 0xFF);
3411 cu = ((*pos >> 23) & 0xFF);
3412 wave = ((*pos >> 31) & 0xFF);
3413 simd = ((*pos >> 37) & 0xFF);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003414
3415 /* switch to the specific se/sh/cu */
3416 mutex_lock(&adev->grbm_idx_mutex);
3417 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3418
3419 x = 0;
Tom St Denis472259f2016-10-14 09:49:09 -04003420 if (adev->gfx.funcs->read_wave_data)
3421 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003422
3423 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3424 mutex_unlock(&adev->grbm_idx_mutex);
3425
Tom St Denis5ecfb3b2016-10-13 12:15:03 -04003426 if (!x)
3427 return -EINVAL;
3428
Tom St Denis472259f2016-10-14 09:49:09 -04003429 while (size && (offset < x * 4)) {
Tom St Denis273d7aa2016-10-11 14:48:55 -04003430 uint32_t value;
3431
Tom St Denis472259f2016-10-14 09:49:09 -04003432 value = data[offset >> 2];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003433 r = put_user(value, (uint32_t *)buf);
3434 if (r)
3435 return r;
3436
3437 result += 4;
3438 buf += 4;
Tom St Denis472259f2016-10-14 09:49:09 -04003439 offset += 4;
Tom St Denis273d7aa2016-10-11 14:48:55 -04003440 size -= 4;
3441 }
3442
3443 return result;
3444}
3445
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003446static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3447 size_t size, loff_t *pos)
3448{
3449 struct amdgpu_device *adev = f->f_inode->i_private;
3450 int r;
3451 ssize_t result = 0;
3452 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3453
3454 if (size & 3 || *pos & 3)
3455 return -EINVAL;
3456
3457 /* decode offset */
3458 offset = (*pos & 0xFFF); /* in dwords */
3459 se = ((*pos >> 12) & 0xFF);
3460 sh = ((*pos >> 20) & 0xFF);
3461 cu = ((*pos >> 28) & 0xFF);
3462 wave = ((*pos >> 36) & 0xFF);
3463 simd = ((*pos >> 44) & 0xFF);
3464 thread = ((*pos >> 52) & 0xFF);
3465 bank = ((*pos >> 60) & 1);
3466
3467 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3468 if (!data)
3469 return -ENOMEM;
3470
3471 /* switch to the specific se/sh/cu */
3472 mutex_lock(&adev->grbm_idx_mutex);
3473 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3474
3475 if (bank == 0) {
3476 if (adev->gfx.funcs->read_wave_vgprs)
3477 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3478 } else {
3479 if (adev->gfx.funcs->read_wave_sgprs)
3480 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3481 }
3482
3483 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3484 mutex_unlock(&adev->grbm_idx_mutex);
3485
3486 while (size) {
3487 uint32_t value;
3488
3489 value = data[offset++];
3490 r = put_user(value, (uint32_t *)buf);
3491 if (r) {
3492 result = r;
3493 goto err;
3494 }
3495
3496 result += 4;
3497 buf += 4;
3498 size -= 4;
3499 }
3500
3501err:
3502 kfree(data);
3503 return result;
3504}
3505
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003506static const struct file_operations amdgpu_debugfs_regs_fops = {
3507 .owner = THIS_MODULE,
3508 .read = amdgpu_debugfs_regs_read,
3509 .write = amdgpu_debugfs_regs_write,
3510 .llseek = default_llseek
3511};
Tom St Denisadcec282016-04-15 13:08:44 -04003512static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3513 .owner = THIS_MODULE,
3514 .read = amdgpu_debugfs_regs_didt_read,
3515 .write = amdgpu_debugfs_regs_didt_write,
3516 .llseek = default_llseek
3517};
3518static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3519 .owner = THIS_MODULE,
3520 .read = amdgpu_debugfs_regs_pcie_read,
3521 .write = amdgpu_debugfs_regs_pcie_write,
3522 .llseek = default_llseek
3523};
3524static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3525 .owner = THIS_MODULE,
3526 .read = amdgpu_debugfs_regs_smc_read,
3527 .write = amdgpu_debugfs_regs_smc_write,
3528 .llseek = default_llseek
3529};
3530
Tom St Denis1e051412016-06-27 09:57:18 -04003531static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3532 .owner = THIS_MODULE,
3533 .read = amdgpu_debugfs_gca_config_read,
3534 .llseek = default_llseek
3535};
3536
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003537static const struct file_operations amdgpu_debugfs_sensors_fops = {
3538 .owner = THIS_MODULE,
3539 .read = amdgpu_debugfs_sensor_read,
3540 .llseek = default_llseek
3541};
3542
Tom St Denis273d7aa2016-10-11 14:48:55 -04003543static const struct file_operations amdgpu_debugfs_wave_fops = {
3544 .owner = THIS_MODULE,
3545 .read = amdgpu_debugfs_wave_read,
3546 .llseek = default_llseek
3547};
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003548static const struct file_operations amdgpu_debugfs_gpr_fops = {
3549 .owner = THIS_MODULE,
3550 .read = amdgpu_debugfs_gpr_read,
3551 .llseek = default_llseek
3552};
Tom St Denis273d7aa2016-10-11 14:48:55 -04003553
Tom St Denisadcec282016-04-15 13:08:44 -04003554static const struct file_operations *debugfs_regs[] = {
3555 &amdgpu_debugfs_regs_fops,
3556 &amdgpu_debugfs_regs_didt_fops,
3557 &amdgpu_debugfs_regs_pcie_fops,
3558 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04003559 &amdgpu_debugfs_gca_config_fops,
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003560 &amdgpu_debugfs_sensors_fops,
Tom St Denis273d7aa2016-10-11 14:48:55 -04003561 &amdgpu_debugfs_wave_fops,
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003562 &amdgpu_debugfs_gpr_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04003563};
3564
3565static const char *debugfs_regs_names[] = {
3566 "amdgpu_regs",
3567 "amdgpu_regs_didt",
3568 "amdgpu_regs_pcie",
3569 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04003570 "amdgpu_gca_config",
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003571 "amdgpu_sensors",
Tom St Denis273d7aa2016-10-11 14:48:55 -04003572 "amdgpu_wave",
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003573 "amdgpu_gpr",
Tom St Denisadcec282016-04-15 13:08:44 -04003574};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003575
3576static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3577{
3578 struct drm_minor *minor = adev->ddev->primary;
3579 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04003580 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003581
Tom St Denisadcec282016-04-15 13:08:44 -04003582 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3583 ent = debugfs_create_file(debugfs_regs_names[i],
3584 S_IFREG | S_IRUGO, root,
3585 adev, debugfs_regs[i]);
3586 if (IS_ERR(ent)) {
3587 for (j = 0; j < i; j++) {
3588 debugfs_remove(adev->debugfs_regs[i]);
3589 adev->debugfs_regs[i] = NULL;
3590 }
3591 return PTR_ERR(ent);
3592 }
3593
3594 if (!i)
3595 i_size_write(ent->d_inode, adev->rmmio_size);
3596 adev->debugfs_regs[i] = ent;
3597 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003598
3599 return 0;
3600}
3601
3602static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3603{
Tom St Denisadcec282016-04-15 13:08:44 -04003604 unsigned i;
3605
3606 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3607 if (adev->debugfs_regs[i]) {
3608 debugfs_remove(adev->debugfs_regs[i]);
3609 adev->debugfs_regs[i] = NULL;
3610 }
3611 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003612}
3613
3614int amdgpu_debugfs_init(struct drm_minor *minor)
3615{
3616 return 0;
3617}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003618#else
3619static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3620{
3621 return 0;
3622}
3623static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003624#endif