blob: 5b8f7e59099ede894aa1ca4d5b2bce92b928d89a [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deuchera5bde2f2016-09-23 16:23:41 -040043#include "amdgpu_atomfirmware.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050044#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080045#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040048#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040051#include "vi.h"
Ken Wang460826e2017-03-06 14:53:16 -050052#include "soc15.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080054#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080055#include <linux/firmware.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040056
Alex Deuchere2a75f82017-04-27 16:58:01 -040057MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
58
Alex Deucherd38ceaf2015-04-20 16:55:21 -040059static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
60static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
61
62static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080063 "TAHITI",
64 "PITCAIRN",
65 "VERDE",
66 "OLAND",
67 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040068 "BONAIRE",
69 "KAVERI",
70 "KABINI",
71 "HAWAII",
72 "MULLINS",
73 "TOPAZ",
74 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080075 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040076 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040077 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040078 "POLARIS10",
79 "POLARIS11",
Junwei Zhangc4642a42016-12-14 15:32:28 -050080 "POLARIS12",
Ken Wangd4196f02016-03-09 09:28:32 +080081 "VEGA10",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040082 "LAST",
83};
84
85bool amdgpu_device_is_px(struct drm_device *dev)
86{
87 struct amdgpu_device *adev = dev->dev_private;
88
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080089 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040090 return true;
91 return false;
92}
93
94/*
95 * MMIO register access helper functions.
96 */
97uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
Monk Liu15d72fd2017-01-25 15:07:40 +080098 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040099{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400100 uint32_t ret;
101
Monk Liu15d72fd2017-01-25 15:07:40 +0800102 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800103 BUG_ON(in_interrupt());
104 return amdgpu_virt_kiq_rreg(adev, reg);
105 }
106
Monk Liu15d72fd2017-01-25 15:07:40 +0800107 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Tom St Denisf4b373f2016-05-31 08:02:27 -0400108 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400109 else {
110 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400111
112 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
113 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
114 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
115 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400116 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400117 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
118 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400119}
120
121void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
Monk Liu15d72fd2017-01-25 15:07:40 +0800122 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400123{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400124 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800125
Monk Liu15d72fd2017-01-25 15:07:40 +0800126 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800127 BUG_ON(in_interrupt());
128 return amdgpu_virt_kiq_wreg(adev, reg, v);
129 }
130
Monk Liu15d72fd2017-01-25 15:07:40 +0800131 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400132 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
133 else {
134 unsigned long flags;
135
136 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
137 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
138 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
139 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
140 }
141}
142
143u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
144{
145 if ((reg * 4) < adev->rio_mem_size)
146 return ioread32(adev->rio_mem + (reg * 4));
147 else {
148 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
149 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
150 }
151}
152
153void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
154{
155
156 if ((reg * 4) < adev->rio_mem_size)
157 iowrite32(v, adev->rio_mem + (reg * 4));
158 else {
159 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
160 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
161 }
162}
163
164/**
165 * amdgpu_mm_rdoorbell - read a doorbell dword
166 *
167 * @adev: amdgpu_device pointer
168 * @index: doorbell index
169 *
170 * Returns the value in the doorbell aperture at the
171 * requested doorbell index (CIK).
172 */
173u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
174{
175 if (index < adev->doorbell.num_doorbells) {
176 return readl(adev->doorbell.ptr + index);
177 } else {
178 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
179 return 0;
180 }
181}
182
183/**
184 * amdgpu_mm_wdoorbell - write a doorbell dword
185 *
186 * @adev: amdgpu_device pointer
187 * @index: doorbell index
188 * @v: value to write
189 *
190 * Writes @v to the doorbell aperture at the
191 * requested doorbell index (CIK).
192 */
193void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
194{
195 if (index < adev->doorbell.num_doorbells) {
196 writel(v, adev->doorbell.ptr + index);
197 } else {
198 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
199 }
200}
201
202/**
Ken Wang832be402016-03-18 15:23:08 +0800203 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
204 *
205 * @adev: amdgpu_device pointer
206 * @index: doorbell index
207 *
208 * Returns the value in the doorbell aperture at the
209 * requested doorbell index (VEGA10+).
210 */
211u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
212{
213 if (index < adev->doorbell.num_doorbells) {
214 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
215 } else {
216 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
217 return 0;
218 }
219}
220
221/**
222 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
223 *
224 * @adev: amdgpu_device pointer
225 * @index: doorbell index
226 * @v: value to write
227 *
228 * Writes @v to the doorbell aperture at the
229 * requested doorbell index (VEGA10+).
230 */
231void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
232{
233 if (index < adev->doorbell.num_doorbells) {
234 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
235 } else {
236 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
237 }
238}
239
240/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400241 * amdgpu_invalid_rreg - dummy reg read function
242 *
243 * @adev: amdgpu device pointer
244 * @reg: offset of register
245 *
246 * Dummy register read function. Used for register blocks
247 * that certain asics don't have (all asics).
248 * Returns the value in the register.
249 */
250static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
251{
252 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
253 BUG();
254 return 0;
255}
256
257/**
258 * amdgpu_invalid_wreg - dummy reg write function
259 *
260 * @adev: amdgpu device pointer
261 * @reg: offset of register
262 * @v: value to write to the register
263 *
264 * Dummy register read function. Used for register blocks
265 * that certain asics don't have (all asics).
266 */
267static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
268{
269 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
270 reg, v);
271 BUG();
272}
273
274/**
275 * amdgpu_block_invalid_rreg - dummy reg read function
276 *
277 * @adev: amdgpu device pointer
278 * @block: offset of instance
279 * @reg: offset of register
280 *
281 * Dummy register read function. Used for register blocks
282 * that certain asics don't have (all asics).
283 * Returns the value in the register.
284 */
285static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
286 uint32_t block, uint32_t reg)
287{
288 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
289 reg, block);
290 BUG();
291 return 0;
292}
293
294/**
295 * amdgpu_block_invalid_wreg - dummy reg write function
296 *
297 * @adev: amdgpu device pointer
298 * @block: offset of instance
299 * @reg: offset of register
300 * @v: value to write to the register
301 *
302 * Dummy register read function. Used for register blocks
303 * that certain asics don't have (all asics).
304 */
305static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
306 uint32_t block,
307 uint32_t reg, uint32_t v)
308{
309 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
310 reg, block, v);
311 BUG();
312}
313
314static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
315{
316 int r;
317
318 if (adev->vram_scratch.robj == NULL) {
319 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
Alex Deucher857d9132015-08-27 00:14:16 -0400320 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
Christian König03f48dd2016-08-15 17:00:22 +0200321 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
322 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
Christian König72d76682015-09-03 17:34:59 +0200323 NULL, NULL, &adev->vram_scratch.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400324 if (r) {
325 return r;
326 }
327 }
328
329 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
330 if (unlikely(r != 0))
331 return r;
332 r = amdgpu_bo_pin(adev->vram_scratch.robj,
333 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
334 if (r) {
335 amdgpu_bo_unreserve(adev->vram_scratch.robj);
336 return r;
337 }
338 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
339 (void **)&adev->vram_scratch.ptr);
340 if (r)
341 amdgpu_bo_unpin(adev->vram_scratch.robj);
342 amdgpu_bo_unreserve(adev->vram_scratch.robj);
343
344 return r;
345}
346
347static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
348{
349 int r;
350
351 if (adev->vram_scratch.robj == NULL) {
352 return;
353 }
Alex Xie8ab25b42017-04-24 13:30:43 -0400354 r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400355 if (likely(r == 0)) {
356 amdgpu_bo_kunmap(adev->vram_scratch.robj);
357 amdgpu_bo_unpin(adev->vram_scratch.robj);
358 amdgpu_bo_unreserve(adev->vram_scratch.robj);
359 }
360 amdgpu_bo_unref(&adev->vram_scratch.robj);
361}
362
363/**
364 * amdgpu_program_register_sequence - program an array of registers.
365 *
366 * @adev: amdgpu_device pointer
367 * @registers: pointer to the register array
368 * @array_size: size of the register array
369 *
370 * Programs an array or registers with and and or masks.
371 * This is a helper for setting golden registers.
372 */
373void amdgpu_program_register_sequence(struct amdgpu_device *adev,
374 const u32 *registers,
375 const u32 array_size)
376{
377 u32 tmp, reg, and_mask, or_mask;
378 int i;
379
380 if (array_size % 3)
381 return;
382
383 for (i = 0; i < array_size; i +=3) {
384 reg = registers[i + 0];
385 and_mask = registers[i + 1];
386 or_mask = registers[i + 2];
387
388 if (and_mask == 0xffffffff) {
389 tmp = or_mask;
390 } else {
391 tmp = RREG32(reg);
392 tmp &= ~and_mask;
393 tmp |= or_mask;
394 }
395 WREG32(reg, tmp);
396 }
397}
398
399void amdgpu_pci_config_reset(struct amdgpu_device *adev)
400{
401 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
402}
403
404/*
405 * GPU doorbell aperture helpers function.
406 */
407/**
408 * amdgpu_doorbell_init - Init doorbell driver information.
409 *
410 * @adev: amdgpu_device pointer
411 *
412 * Init doorbell driver information (CIK)
413 * Returns 0 on success, error on failure.
414 */
415static int amdgpu_doorbell_init(struct amdgpu_device *adev)
416{
417 /* doorbell bar mapping */
418 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
419 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
420
Christian Königedf600d2016-05-03 15:54:54 +0200421 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400422 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
423 if (adev->doorbell.num_doorbells == 0)
424 return -EINVAL;
425
Christian König8972e5d2017-03-06 13:34:57 +0100426 adev->doorbell.ptr = ioremap(adev->doorbell.base,
427 adev->doorbell.num_doorbells *
428 sizeof(u32));
429 if (adev->doorbell.ptr == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400430 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400431
432 return 0;
433}
434
435/**
436 * amdgpu_doorbell_fini - Tear down doorbell driver information.
437 *
438 * @adev: amdgpu_device pointer
439 *
440 * Tear down doorbell driver information (CIK)
441 */
442static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
443{
444 iounmap(adev->doorbell.ptr);
445 adev->doorbell.ptr = NULL;
446}
447
448/**
449 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
450 * setup amdkfd
451 *
452 * @adev: amdgpu_device pointer
453 * @aperture_base: output returning doorbell aperture base physical address
454 * @aperture_size: output returning doorbell aperture size in bytes
455 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
456 *
457 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
458 * takes doorbells required for its own rings and reports the setup to amdkfd.
459 * amdgpu reserved doorbells are at the start of the doorbell aperture.
460 */
461void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
462 phys_addr_t *aperture_base,
463 size_t *aperture_size,
464 size_t *start_offset)
465{
466 /*
467 * The first num_doorbells are used by amdgpu.
468 * amdkfd takes whatever's left in the aperture.
469 */
470 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
471 *aperture_base = adev->doorbell.base;
472 *aperture_size = adev->doorbell.size;
473 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
474 } else {
475 *aperture_base = 0;
476 *aperture_size = 0;
477 *start_offset = 0;
478 }
479}
480
481/*
482 * amdgpu_wb_*()
Alex Xie455a7bc2017-05-08 21:36:03 -0400483 * Writeback is the method by which the GPU updates special pages in memory
Alex Xieea81a172017-05-08 13:41:11 -0400484 * with the status of certain GPU events (fences, ring pointers,etc.).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400485 */
486
487/**
488 * amdgpu_wb_fini - Disable Writeback and free memory
489 *
490 * @adev: amdgpu_device pointer
491 *
492 * Disables Writeback and frees the Writeback memory (all asics).
493 * Used at driver shutdown.
494 */
495static void amdgpu_wb_fini(struct amdgpu_device *adev)
496{
497 if (adev->wb.wb_obj) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400498 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
499 &adev->wb.gpu_addr,
500 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400501 adev->wb.wb_obj = NULL;
502 }
503}
504
505/**
506 * amdgpu_wb_init- Init Writeback driver info and allocate memory
507 *
508 * @adev: amdgpu_device pointer
509 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400510 * Initializes writeback and allocates writeback memory (all asics).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400511 * Used at driver startup.
512 * Returns 0 on success or an -error on failure.
513 */
514static int amdgpu_wb_init(struct amdgpu_device *adev)
515{
516 int r;
517
518 if (adev->wb.wb_obj == NULL) {
Huang Rui60a970a62017-03-15 10:13:32 +0800519 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
Alex Deuchera76ed482016-10-21 15:30:36 -0400520 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
521 &adev->wb.wb_obj, &adev->wb.gpu_addr,
522 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400523 if (r) {
524 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
525 return r;
526 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400527
528 adev->wb.num_wb = AMDGPU_MAX_WB;
529 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
530
531 /* clear wb memory */
Huang Rui60a970a62017-03-15 10:13:32 +0800532 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400533 }
534
535 return 0;
536}
537
538/**
539 * amdgpu_wb_get - Allocate a wb entry
540 *
541 * @adev: amdgpu_device pointer
542 * @wb: wb index
543 *
544 * Allocate a wb slot for use by the driver (all asics).
545 * Returns 0 on success or -EINVAL on failure.
546 */
547int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
548{
549 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
550 if (offset < adev->wb.num_wb) {
551 __set_bit(offset, adev->wb.used);
552 *wb = offset;
553 return 0;
554 } else {
555 return -EINVAL;
556 }
557}
558
559/**
Ken Wang70142852016-03-18 15:08:49 +0800560 * amdgpu_wb_get_64bit - Allocate a wb entry
561 *
562 * @adev: amdgpu_device pointer
563 * @wb: wb index
564 *
565 * Allocate a wb slot for use by the driver (all asics).
566 * Returns 0 on success or -EINVAL on failure.
567 */
568int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
569{
570 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
571 adev->wb.num_wb, 0, 2, 7, 0);
572 if ((offset + 1) < adev->wb.num_wb) {
573 __set_bit(offset, adev->wb.used);
574 __set_bit(offset + 1, adev->wb.used);
575 *wb = offset;
576 return 0;
577 } else {
578 return -EINVAL;
579 }
580}
581
582/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400583 * amdgpu_wb_free - Free a wb entry
584 *
585 * @adev: amdgpu_device pointer
586 * @wb: wb index
587 *
588 * Free a wb slot allocated for use by the driver (all asics)
589 */
590void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
591{
592 if (wb < adev->wb.num_wb)
593 __clear_bit(wb, adev->wb.used);
594}
595
596/**
Ken Wang70142852016-03-18 15:08:49 +0800597 * amdgpu_wb_free_64bit - Free a wb entry
598 *
599 * @adev: amdgpu_device pointer
600 * @wb: wb index
601 *
602 * Free a wb slot allocated for use by the driver (all asics)
603 */
604void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
605{
606 if ((wb + 1) < adev->wb.num_wb) {
607 __clear_bit(wb, adev->wb.used);
608 __clear_bit(wb + 1, adev->wb.used);
609 }
610}
611
612/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400613 * amdgpu_vram_location - try to find VRAM location
614 * @adev: amdgpu device structure holding all necessary informations
615 * @mc: memory controller structure holding memory informations
616 * @base: base address at which to put VRAM
617 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400618 * Function will try to place VRAM at base address provided
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400619 * as parameter (which is so far either PCI aperture address or
620 * for IGP TOM base address).
621 *
622 * If there is not enough space to fit the unvisible VRAM in the 32bits
623 * address space then we limit the VRAM size to the aperture.
624 *
625 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
626 * this shouldn't be a problem as we are using the PCI aperture as a reference.
627 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
628 * not IGP.
629 *
630 * Note: we use mc_vram_size as on some board we need to program the mc to
631 * cover the whole aperture even if VRAM size is inferior to aperture size
632 * Novell bug 204882 + along with lots of ubuntu ones
633 *
634 * Note: when limiting vram it's safe to overwritte real_vram_size because
635 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
636 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
637 * ones)
638 *
639 * Note: IGP TOM addr should be the same as the aperture addr, we don't
Alex Xie455a7bc2017-05-08 21:36:03 -0400640 * explicitly check for that though.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400641 *
642 * FIXME: when reducing VRAM size align new size on power of 2.
643 */
644void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
645{
646 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
647
648 mc->vram_start = base;
649 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
650 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
651 mc->real_vram_size = mc->aper_size;
652 mc->mc_vram_size = mc->aper_size;
653 }
654 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
655 if (limit && limit < mc->real_vram_size)
656 mc->real_vram_size = limit;
657 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
658 mc->mc_vram_size >> 20, mc->vram_start,
659 mc->vram_end, mc->real_vram_size >> 20);
660}
661
662/**
663 * amdgpu_gtt_location - try to find GTT location
664 * @adev: amdgpu device structure holding all necessary informations
665 * @mc: memory controller structure holding memory informations
666 *
667 * Function will place try to place GTT before or after VRAM.
668 *
669 * If GTT size is bigger than space left then we ajust GTT size.
670 * Thus function will never fails.
671 *
672 * FIXME: when reducing GTT size align new size on power of 2.
673 */
674void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
675{
676 u64 size_af, size_bf;
677
678 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
679 size_bf = mc->vram_start & ~mc->gtt_base_align;
680 if (size_bf > size_af) {
681 if (mc->gtt_size > size_bf) {
682 dev_warn(adev->dev, "limiting GTT\n");
683 mc->gtt_size = size_bf;
684 }
Alex Deucher9dc5a912016-11-17 15:40:22 -0500685 mc->gtt_start = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400686 } else {
687 if (mc->gtt_size > size_af) {
688 dev_warn(adev->dev, "limiting GTT\n");
689 mc->gtt_size = size_af;
690 }
691 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
692 }
693 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
694 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
695 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
696}
697
698/*
699 * GPU helpers function.
700 */
701/**
Jim Quc836fec2017-02-10 15:59:59 +0800702 * amdgpu_need_post - check if the hw need post or not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400703 *
704 * @adev: amdgpu_device pointer
705 *
Jim Quc836fec2017-02-10 15:59:59 +0800706 * Check if the asic has been initialized (all asics) at driver startup
707 * or post is needed if hw reset is performed.
708 * Returns true if need or false if not.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400709 */
Jim Quc836fec2017-02-10 15:59:59 +0800710bool amdgpu_need_post(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400711{
712 uint32_t reg;
713
Jim Quc836fec2017-02-10 15:59:59 +0800714 if (adev->has_hw_reset) {
715 adev->has_hw_reset = false;
716 return true;
717 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400718 /* then check MEM_SIZE, in case the crtcs are off */
Alex Deucherbbf282d2017-03-03 17:26:10 -0500719 reg = amdgpu_asic_get_config_memsize(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400720
Alex Deucherf2713e82017-03-28 12:19:31 -0400721 if ((reg != 0) && (reg != 0xffffffff))
Jim Quc836fec2017-02-10 15:59:59 +0800722 return false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400723
Jim Quc836fec2017-02-10 15:59:59 +0800724 return true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400725
726}
727
Monk Liubec86372016-09-14 19:38:08 +0800728static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
729{
730 if (amdgpu_sriov_vf(adev))
731 return false;
732
733 if (amdgpu_passthrough(adev)) {
Monk Liu1da2c322016-11-11 11:24:29 +0800734 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
735 * some old smc fw still need driver do vPost otherwise gpu hang, while
736 * those smc fw version above 22.15 doesn't have this flaw, so we force
737 * vpost executed for smc version below 22.15
Monk Liubec86372016-09-14 19:38:08 +0800738 */
739 if (adev->asic_type == CHIP_FIJI) {
740 int err;
741 uint32_t fw_ver;
742 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
743 /* force vPost if error occured */
744 if (err)
745 return true;
746
747 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
Monk Liu1da2c322016-11-11 11:24:29 +0800748 if (fw_ver < 0x00160e00)
749 return true;
Monk Liubec86372016-09-14 19:38:08 +0800750 }
Monk Liubec86372016-09-14 19:38:08 +0800751 }
Jim Quc836fec2017-02-10 15:59:59 +0800752 return amdgpu_need_post(adev);
Monk Liubec86372016-09-14 19:38:08 +0800753}
754
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400755/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400756 * amdgpu_dummy_page_init - init dummy page used by the driver
757 *
758 * @adev: amdgpu_device pointer
759 *
760 * Allocate the dummy page used by the driver (all asics).
761 * This dummy page is used by the driver as a filler for gart entries
762 * when pages are taken out of the GART
763 * Returns 0 on sucess, -ENOMEM on failure.
764 */
765int amdgpu_dummy_page_init(struct amdgpu_device *adev)
766{
767 if (adev->dummy_page.page)
768 return 0;
769 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
770 if (adev->dummy_page.page == NULL)
771 return -ENOMEM;
772 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
773 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
774 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
775 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
776 __free_page(adev->dummy_page.page);
777 adev->dummy_page.page = NULL;
778 return -ENOMEM;
779 }
780 return 0;
781}
782
783/**
784 * amdgpu_dummy_page_fini - free dummy page used by the driver
785 *
786 * @adev: amdgpu_device pointer
787 *
788 * Frees the dummy page used by the driver (all asics).
789 */
790void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
791{
792 if (adev->dummy_page.page == NULL)
793 return;
794 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
795 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
796 __free_page(adev->dummy_page.page);
797 adev->dummy_page.page = NULL;
798}
799
800
801/* ATOM accessor methods */
802/*
803 * ATOM is an interpreted byte code stored in tables in the vbios. The
804 * driver registers callbacks to access registers and the interpreter
805 * in the driver parses the tables and executes then to program specific
806 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
807 * atombios.h, and atom.c
808 */
809
810/**
811 * cail_pll_read - read PLL register
812 *
813 * @info: atom card_info pointer
814 * @reg: PLL register offset
815 *
816 * Provides a PLL register accessor for the atom interpreter (r4xx+).
817 * Returns the value of the PLL register.
818 */
819static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
820{
821 return 0;
822}
823
824/**
825 * cail_pll_write - write PLL register
826 *
827 * @info: atom card_info pointer
828 * @reg: PLL register offset
829 * @val: value to write to the pll register
830 *
831 * Provides a PLL register accessor for the atom interpreter (r4xx+).
832 */
833static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
834{
835
836}
837
838/**
839 * cail_mc_read - read MC (Memory Controller) register
840 *
841 * @info: atom card_info pointer
842 * @reg: MC register offset
843 *
844 * Provides an MC register accessor for the atom interpreter (r4xx+).
845 * Returns the value of the MC register.
846 */
847static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
848{
849 return 0;
850}
851
852/**
853 * cail_mc_write - write MC (Memory Controller) register
854 *
855 * @info: atom card_info pointer
856 * @reg: MC register offset
857 * @val: value to write to the pll register
858 *
859 * Provides a MC register accessor for the atom interpreter (r4xx+).
860 */
861static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
862{
863
864}
865
866/**
867 * cail_reg_write - write MMIO register
868 *
869 * @info: atom card_info pointer
870 * @reg: MMIO register offset
871 * @val: value to write to the pll register
872 *
873 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
874 */
875static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
876{
877 struct amdgpu_device *adev = info->dev->dev_private;
878
879 WREG32(reg, val);
880}
881
882/**
883 * cail_reg_read - read MMIO register
884 *
885 * @info: atom card_info pointer
886 * @reg: MMIO register offset
887 *
888 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
889 * Returns the value of the MMIO register.
890 */
891static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
892{
893 struct amdgpu_device *adev = info->dev->dev_private;
894 uint32_t r;
895
896 r = RREG32(reg);
897 return r;
898}
899
900/**
901 * cail_ioreg_write - write IO register
902 *
903 * @info: atom card_info pointer
904 * @reg: IO register offset
905 * @val: value to write to the pll register
906 *
907 * Provides a IO register accessor for the atom interpreter (r4xx+).
908 */
909static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
910{
911 struct amdgpu_device *adev = info->dev->dev_private;
912
913 WREG32_IO(reg, val);
914}
915
916/**
917 * cail_ioreg_read - read IO register
918 *
919 * @info: atom card_info pointer
920 * @reg: IO register offset
921 *
922 * Provides an IO register accessor for the atom interpreter (r4xx+).
923 * Returns the value of the IO register.
924 */
925static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
926{
927 struct amdgpu_device *adev = info->dev->dev_private;
928 uint32_t r;
929
930 r = RREG32_IO(reg);
931 return r;
932}
933
934/**
935 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
936 *
937 * @adev: amdgpu_device pointer
938 *
939 * Frees the driver info and register access callbacks for the ATOM
940 * interpreter (r4xx+).
941 * Called at driver shutdown.
942 */
943static void amdgpu_atombios_fini(struct amdgpu_device *adev)
944{
Monk Liu89e0ec92016-05-27 19:34:11 +0800945 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400946 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec92016-05-27 19:34:11 +0800947 kfree(adev->mode_info.atom_context->iio);
948 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400949 kfree(adev->mode_info.atom_context);
950 adev->mode_info.atom_context = NULL;
951 kfree(adev->mode_info.atom_card_info);
952 adev->mode_info.atom_card_info = NULL;
953}
954
955/**
956 * amdgpu_atombios_init - init the driver info and callbacks for atombios
957 *
958 * @adev: amdgpu_device pointer
959 *
960 * Initializes the driver info and register access callbacks for the
961 * ATOM interpreter (r4xx+).
962 * Returns 0 on sucess, -ENOMEM on failure.
963 * Called at driver startup.
964 */
965static int amdgpu_atombios_init(struct amdgpu_device *adev)
966{
967 struct card_info *atom_card_info =
968 kzalloc(sizeof(struct card_info), GFP_KERNEL);
969
970 if (!atom_card_info)
971 return -ENOMEM;
972
973 adev->mode_info.atom_card_info = atom_card_info;
974 atom_card_info->dev = adev->ddev;
975 atom_card_info->reg_read = cail_reg_read;
976 atom_card_info->reg_write = cail_reg_write;
977 /* needed for iio ops */
978 if (adev->rio_mem) {
979 atom_card_info->ioreg_read = cail_ioreg_read;
980 atom_card_info->ioreg_write = cail_ioreg_write;
981 } else {
Amber Linb64a18c2017-01-04 08:06:58 -0500982 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400983 atom_card_info->ioreg_read = cail_reg_read;
984 atom_card_info->ioreg_write = cail_reg_write;
985 }
986 atom_card_info->mc_read = cail_mc_read;
987 atom_card_info->mc_write = cail_mc_write;
988 atom_card_info->pll_read = cail_pll_read;
989 atom_card_info->pll_write = cail_pll_write;
990
991 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
992 if (!adev->mode_info.atom_context) {
993 amdgpu_atombios_fini(adev);
994 return -ENOMEM;
995 }
996
997 mutex_init(&adev->mode_info.atom_context->mutex);
Alex Deuchera5bde2f2016-09-23 16:23:41 -0400998 if (adev->is_atom_fw) {
999 amdgpu_atomfirmware_scratch_regs_init(adev);
1000 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1001 } else {
1002 amdgpu_atombios_scratch_regs_init(adev);
1003 amdgpu_atombios_allocate_fb_scratch(adev);
1004 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001005 return 0;
1006}
1007
1008/* if we get transitioned to only one device, take VGA back */
1009/**
1010 * amdgpu_vga_set_decode - enable/disable vga decode
1011 *
1012 * @cookie: amdgpu_device pointer
1013 * @state: enable/disable vga decode
1014 *
1015 * Enable/disable vga decode (all asics).
1016 * Returns VGA resource flags.
1017 */
1018static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1019{
1020 struct amdgpu_device *adev = cookie;
1021 amdgpu_asic_set_vga_state(adev, state);
1022 if (state)
1023 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1024 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1025 else
1026 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1027}
1028
1029/**
1030 * amdgpu_check_pot_argument - check that argument is a power of two
1031 *
1032 * @arg: value to check
1033 *
1034 * Validates that a certain argument is a power of two (all asics).
1035 * Returns true if argument is valid.
1036 */
1037static bool amdgpu_check_pot_argument(int arg)
1038{
1039 return (arg & (arg - 1)) == 0;
1040}
1041
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001042static void amdgpu_check_block_size(struct amdgpu_device *adev)
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001043{
1044 /* defines number of bits in page table versus page directory,
1045 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1046 * page table and the remaining bits are in the page directory */
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001047 if (amdgpu_vm_block_size == -1)
1048 return;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001049
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001050 if (amdgpu_vm_block_size < 9) {
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001051 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1052 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001053 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001054 }
1055
1056 if (amdgpu_vm_block_size > 24 ||
1057 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1058 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1059 amdgpu_vm_block_size);
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001060 goto def_value;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001061 }
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001062
1063 return;
1064
1065def_value:
1066 amdgpu_vm_block_size = -1;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +08001067}
1068
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001069static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1070{
1071 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1072 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1073 amdgpu_vm_size);
1074 goto def_value;
1075 }
1076
1077 if (amdgpu_vm_size < 1) {
1078 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1079 amdgpu_vm_size);
1080 goto def_value;
1081 }
1082
1083 /*
1084 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1085 */
1086 if (amdgpu_vm_size > 1024) {
1087 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1088 amdgpu_vm_size);
1089 goto def_value;
1090 }
1091
1092 return;
1093
1094def_value:
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001095 amdgpu_vm_size = -1;
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001096}
1097
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001098/**
1099 * amdgpu_check_arguments - validate module params
1100 *
1101 * @adev: amdgpu_device pointer
1102 *
1103 * Validates certain module parameters and updates
1104 * the associated values used by the driver (all asics).
1105 */
1106static void amdgpu_check_arguments(struct amdgpu_device *adev)
1107{
Chunming Zhou5b011232015-12-10 17:34:33 +08001108 if (amdgpu_sched_jobs < 4) {
1109 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1110 amdgpu_sched_jobs);
1111 amdgpu_sched_jobs = 4;
1112 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
1113 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1114 amdgpu_sched_jobs);
1115 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1116 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001117
1118 if (amdgpu_gart_size != -1) {
Christian Königc4e1a132016-03-17 16:25:15 +01001119 /* gtt size must be greater or equal to 32M */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001120 if (amdgpu_gart_size < 32) {
1121 dev_warn(adev->dev, "gart size (%d) too small\n",
1122 amdgpu_gart_size);
1123 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001124 }
1125 }
1126
Zhang, Jerry83ca1452017-03-29 16:08:31 +08001127 amdgpu_check_vm_size(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001128
Junwei Zhangbab4fee2017-04-05 13:54:56 +08001129 amdgpu_check_block_size(adev);
Christian König6a7f76e2016-08-24 15:51:49 +02001130
jimqu526bae32016-11-07 09:53:10 +08001131 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1132 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
Christian König6a7f76e2016-08-24 15:51:49 +02001133 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1134 amdgpu_vram_page_split);
1135 amdgpu_vram_page_split = 1024;
1136 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001137}
1138
1139/**
1140 * amdgpu_switcheroo_set_state - set switcheroo state
1141 *
1142 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001143 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001144 *
1145 * Callback for the switcheroo driver. Suspends or resumes the
1146 * the asics before or after it is powered up using ACPI methods.
1147 */
1148static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1149{
1150 struct drm_device *dev = pci_get_drvdata(pdev);
1151
1152 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1153 return;
1154
1155 if (state == VGA_SWITCHEROO_ON) {
1156 unsigned d3_delay = dev->pdev->d3_delay;
1157
Joe Perches7ca85292017-02-28 04:55:52 -08001158 pr_info("amdgpu: switched on\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001159 /* don't suspend or resume card normally */
1160 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1161
Alex Deucher810ddc32016-08-23 13:25:49 -04001162 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001163
1164 dev->pdev->d3_delay = d3_delay;
1165
1166 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1167 drm_kms_helper_poll_enable(dev);
1168 } else {
Joe Perches7ca85292017-02-28 04:55:52 -08001169 pr_info("amdgpu: switched off\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001170 drm_kms_helper_poll_disable(dev);
1171 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001172 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001173 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1174 }
1175}
1176
1177/**
1178 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1179 *
1180 * @pdev: pci dev pointer
1181 *
1182 * Callback for the switcheroo driver. Check of the switcheroo
1183 * state can be changed.
1184 * Returns true if the state can be changed, false if not.
1185 */
1186static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1187{
1188 struct drm_device *dev = pci_get_drvdata(pdev);
1189
1190 /*
1191 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1192 * locking inversion with the driver load path. And the access here is
1193 * completely racy anyway. So don't bother with locking for now.
1194 */
1195 return dev->open_count == 0;
1196}
1197
1198static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1199 .set_gpu_state = amdgpu_switcheroo_set_state,
1200 .reprobe = NULL,
1201 .can_switch = amdgpu_switcheroo_can_switch,
1202};
1203
1204int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001205 enum amd_ip_block_type block_type,
1206 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001207{
1208 int i, r = 0;
1209
1210 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001211 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001212 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001213 if (adev->ip_blocks[i].version->type != block_type)
1214 continue;
1215 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1216 continue;
1217 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1218 (void *)adev, state);
1219 if (r)
1220 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1221 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001222 }
1223 return r;
1224}
1225
1226int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001227 enum amd_ip_block_type block_type,
1228 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001229{
1230 int i, r = 0;
1231
1232 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001233 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001234 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001235 if (adev->ip_blocks[i].version->type != block_type)
1236 continue;
1237 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1238 continue;
1239 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1240 (void *)adev, state);
1241 if (r)
1242 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1243 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001244 }
1245 return r;
1246}
1247
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001248void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1249{
1250 int i;
1251
1252 for (i = 0; i < adev->num_ip_blocks; i++) {
1253 if (!adev->ip_blocks[i].status.valid)
1254 continue;
1255 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1256 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1257 }
1258}
1259
Alex Deucher5dbbb602016-06-23 11:41:04 -04001260int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1261 enum amd_ip_block_type block_type)
1262{
1263 int i, r;
1264
1265 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001266 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001267 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001268 if (adev->ip_blocks[i].version->type == block_type) {
1269 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001270 if (r)
1271 return r;
1272 break;
1273 }
1274 }
1275 return 0;
1276
1277}
1278
1279bool amdgpu_is_idle(struct amdgpu_device *adev,
1280 enum amd_ip_block_type block_type)
1281{
1282 int i;
1283
1284 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001285 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001286 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001287 if (adev->ip_blocks[i].version->type == block_type)
1288 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001289 }
1290 return true;
1291
1292}
1293
Alex Deuchera1255102016-10-13 17:41:13 -04001294struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1295 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001296{
1297 int i;
1298
1299 for (i = 0; i < adev->num_ip_blocks; i++)
Alex Deuchera1255102016-10-13 17:41:13 -04001300 if (adev->ip_blocks[i].version->type == type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001301 return &adev->ip_blocks[i];
1302
1303 return NULL;
1304}
1305
1306/**
1307 * amdgpu_ip_block_version_cmp
1308 *
1309 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001310 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001311 * @major: major version
1312 * @minor: minor version
1313 *
1314 * return 0 if equal or greater
1315 * return 1 if smaller or the ip_block doesn't exist
1316 */
1317int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001318 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001319 u32 major, u32 minor)
1320{
Alex Deuchera1255102016-10-13 17:41:13 -04001321 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001322
Alex Deuchera1255102016-10-13 17:41:13 -04001323 if (ip_block && ((ip_block->version->major > major) ||
1324 ((ip_block->version->major == major) &&
1325 (ip_block->version->minor >= minor))))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001326 return 0;
1327
1328 return 1;
1329}
1330
Alex Deuchera1255102016-10-13 17:41:13 -04001331/**
1332 * amdgpu_ip_block_add
1333 *
1334 * @adev: amdgpu_device pointer
1335 * @ip_block_version: pointer to the IP to add
1336 *
1337 * Adds the IP block driver information to the collection of IPs
1338 * on the asic.
1339 */
1340int amdgpu_ip_block_add(struct amdgpu_device *adev,
1341 const struct amdgpu_ip_block_version *ip_block_version)
1342{
1343 if (!ip_block_version)
1344 return -EINVAL;
1345
1346 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1347
1348 return 0;
1349}
1350
Alex Deucher483ef982016-09-30 12:43:04 -04001351static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
Emily Deng9accf2f2016-08-10 16:01:25 +08001352{
1353 adev->enable_virtual_display = false;
1354
1355 if (amdgpu_virtual_display) {
1356 struct drm_device *ddev = adev->ddev;
1357 const char *pci_address_name = pci_name(ddev->pdev);
Emily Deng0f663562016-09-30 13:02:18 -04001358 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
Emily Deng9accf2f2016-08-10 16:01:25 +08001359
1360 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1361 pciaddstr_tmp = pciaddstr;
Emily Deng0f663562016-09-30 13:02:18 -04001362 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1363 pciaddname = strsep(&pciaddname_tmp, ",");
Yintian Tao967de2a2017-01-22 15:16:51 +08001364 if (!strcmp("all", pciaddname)
1365 || !strcmp(pci_address_name, pciaddname)) {
Emily Deng0f663562016-09-30 13:02:18 -04001366 long num_crtc;
1367 int res = -1;
1368
Emily Deng9accf2f2016-08-10 16:01:25 +08001369 adev->enable_virtual_display = true;
Emily Deng0f663562016-09-30 13:02:18 -04001370
1371 if (pciaddname_tmp)
1372 res = kstrtol(pciaddname_tmp, 10,
1373 &num_crtc);
1374
1375 if (!res) {
1376 if (num_crtc < 1)
1377 num_crtc = 1;
1378 if (num_crtc > 6)
1379 num_crtc = 6;
1380 adev->mode_info.num_crtc = num_crtc;
1381 } else {
1382 adev->mode_info.num_crtc = 1;
1383 }
Emily Deng9accf2f2016-08-10 16:01:25 +08001384 break;
1385 }
1386 }
1387
Emily Deng0f663562016-09-30 13:02:18 -04001388 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1389 amdgpu_virtual_display, pci_address_name,
1390 adev->enable_virtual_display, adev->mode_info.num_crtc);
Emily Deng9accf2f2016-08-10 16:01:25 +08001391
1392 kfree(pciaddstr);
1393 }
1394}
1395
Alex Deuchere2a75f82017-04-27 16:58:01 -04001396static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1397{
1398 const struct firmware *fw;
1399 const char *chip_name;
1400 char fw_name[30];
1401 int err;
1402 const struct gpu_info_firmware_header_v1_0 *hdr;
1403
1404 switch (adev->asic_type) {
1405 case CHIP_TOPAZ:
1406 case CHIP_TONGA:
1407 case CHIP_FIJI:
1408 case CHIP_POLARIS11:
1409 case CHIP_POLARIS10:
1410 case CHIP_POLARIS12:
1411 case CHIP_CARRIZO:
1412 case CHIP_STONEY:
1413#ifdef CONFIG_DRM_AMDGPU_SI
1414 case CHIP_VERDE:
1415 case CHIP_TAHITI:
1416 case CHIP_PITCAIRN:
1417 case CHIP_OLAND:
1418 case CHIP_HAINAN:
1419#endif
1420#ifdef CONFIG_DRM_AMDGPU_CIK
1421 case CHIP_BONAIRE:
1422 case CHIP_HAWAII:
1423 case CHIP_KAVERI:
1424 case CHIP_KABINI:
1425 case CHIP_MULLINS:
1426#endif
1427 default:
1428 return 0;
1429 case CHIP_VEGA10:
1430 chip_name = "vega10";
1431 break;
1432 }
1433
1434 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1435 err = request_firmware(&fw, fw_name, adev->dev);
1436 if (err) {
1437 dev_err(adev->dev,
1438 "Failed to load gpu_info firmware \"%s\"\n",
1439 fw_name);
1440 goto out;
1441 }
1442 err = amdgpu_ucode_validate(fw);
1443 if (err) {
1444 dev_err(adev->dev,
1445 "Failed to validate gpu_info firmware \"%s\"\n",
1446 fw_name);
1447 goto out;
1448 }
1449
1450 hdr = (const struct gpu_info_firmware_header_v1_0 *)fw->data;
1451 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1452
1453 switch (hdr->version_major) {
1454 case 1:
1455 {
1456 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1457 (const struct gpu_info_firmware_v1_0 *)(fw->data +
1458 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1459
1460 adev->gfx.config.max_shader_engines = gpu_info_fw->gc_num_se;
1461 adev->gfx.config.max_cu_per_sh = gpu_info_fw->gc_num_cu_per_sh;
1462 adev->gfx.config.max_sh_per_se = gpu_info_fw->gc_num_sh_per_se;
1463 adev->gfx.config.max_backends_per_se = gpu_info_fw->gc_num_rb_per_se;
1464 adev->gfx.config.max_texture_channel_caches =
1465 gpu_info_fw->gc_num_tccs;
1466 adev->gfx.config.max_gprs = gpu_info_fw->gc_num_gprs;
1467 adev->gfx.config.max_gs_threads = gpu_info_fw->gc_num_max_gs_thds;
1468 adev->gfx.config.gs_vgt_table_depth = gpu_info_fw->gc_gs_table_depth;
1469 adev->gfx.config.gs_prim_buffer_depth = gpu_info_fw->gc_gsprim_buff_depth;
1470 adev->gfx.config.double_offchip_lds_buf =
1471 gpu_info_fw->gc_double_offchip_lds_buffer;
1472 adev->gfx.cu_info.wave_front_size = gpu_info_fw->gc_wave_size;
1473 break;
1474 }
1475 default:
1476 dev_err(adev->dev,
1477 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1478 err = -EINVAL;
1479 goto out;
1480 }
1481out:
1482 release_firmware(fw);
1483 fw = NULL;
1484
1485 return err;
1486}
1487
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001488static int amdgpu_early_init(struct amdgpu_device *adev)
1489{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001490 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001491
Alex Deucher483ef982016-09-30 12:43:04 -04001492 amdgpu_device_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001493
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001494 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001495 case CHIP_TOPAZ:
1496 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001497 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001498 case CHIP_POLARIS11:
1499 case CHIP_POLARIS10:
Junwei Zhangc4642a42016-12-14 15:32:28 -05001500 case CHIP_POLARIS12:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001501 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001502 case CHIP_STONEY:
1503 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001504 adev->family = AMDGPU_FAMILY_CZ;
1505 else
1506 adev->family = AMDGPU_FAMILY_VI;
1507
1508 r = vi_set_ip_blocks(adev);
1509 if (r)
1510 return r;
1511 break;
Ken Wang33f34802016-01-21 17:29:41 +08001512#ifdef CONFIG_DRM_AMDGPU_SI
1513 case CHIP_VERDE:
1514 case CHIP_TAHITI:
1515 case CHIP_PITCAIRN:
1516 case CHIP_OLAND:
1517 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001518 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001519 r = si_set_ip_blocks(adev);
1520 if (r)
1521 return r;
1522 break;
1523#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001524#ifdef CONFIG_DRM_AMDGPU_CIK
1525 case CHIP_BONAIRE:
1526 case CHIP_HAWAII:
1527 case CHIP_KAVERI:
1528 case CHIP_KABINI:
1529 case CHIP_MULLINS:
1530 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1531 adev->family = AMDGPU_FAMILY_CI;
1532 else
1533 adev->family = AMDGPU_FAMILY_KV;
1534
1535 r = cik_set_ip_blocks(adev);
1536 if (r)
1537 return r;
1538 break;
1539#endif
Ken Wang460826e2017-03-06 14:53:16 -05001540 case CHIP_VEGA10:
1541 adev->family = AMDGPU_FAMILY_AI;
1542
1543 r = soc15_set_ip_blocks(adev);
1544 if (r)
1545 return r;
1546 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001547 default:
1548 /* FIXME: not supported yet */
1549 return -EINVAL;
1550 }
1551
Alex Deuchere2a75f82017-04-27 16:58:01 -04001552 r = amdgpu_device_parse_gpu_info_fw(adev);
1553 if (r)
1554 return r;
1555
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001556 if (amdgpu_sriov_vf(adev)) {
1557 r = amdgpu_virt_request_full_gpu(adev, true);
1558 if (r)
1559 return r;
1560 }
1561
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001562 for (i = 0; i < adev->num_ip_blocks; i++) {
1563 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1564 DRM_ERROR("disabled ip block: %d\n", i);
Alex Deuchera1255102016-10-13 17:41:13 -04001565 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001566 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001567 if (adev->ip_blocks[i].version->funcs->early_init) {
1568 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001569 if (r == -ENOENT) {
Alex Deuchera1255102016-10-13 17:41:13 -04001570 adev->ip_blocks[i].status.valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001571 } else if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001572 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1573 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001574 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001575 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001576 adev->ip_blocks[i].status.valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001577 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001578 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001579 adev->ip_blocks[i].status.valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001580 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001581 }
1582 }
1583
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001584 adev->cg_flags &= amdgpu_cg_mask;
1585 adev->pg_flags &= amdgpu_pg_mask;
1586
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001587 return 0;
1588}
1589
1590static int amdgpu_init(struct amdgpu_device *adev)
1591{
1592 int i, r;
1593
1594 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001595 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001596 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001597 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001598 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001599 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1600 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001601 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001602 }
Alex Deuchera1255102016-10-13 17:41:13 -04001603 adev->ip_blocks[i].status.sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001604 /* need to do gmc hw init early so we can allocate gpu mem */
Alex Deuchera1255102016-10-13 17:41:13 -04001605 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001606 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001607 if (r) {
1608 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001609 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001610 }
Alex Deuchera1255102016-10-13 17:41:13 -04001611 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001612 if (r) {
1613 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001614 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001615 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001616 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001617 if (r) {
1618 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001619 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001620 }
Alex Deuchera1255102016-10-13 17:41:13 -04001621 adev->ip_blocks[i].status.hw = true;
Monk Liu24936642017-01-09 15:54:32 +08001622
1623 /* right after GMC hw init, we create CSA */
1624 if (amdgpu_sriov_vf(adev)) {
1625 r = amdgpu_allocate_static_csa(adev);
1626 if (r) {
1627 DRM_ERROR("allocate CSA failed %d\n", r);
1628 return r;
1629 }
1630 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001631 }
1632 }
1633
1634 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001635 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001636 continue;
1637 /* gmc hw init is done early */
Alex Deuchera1255102016-10-13 17:41:13 -04001638 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001639 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001640 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001641 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001642 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1643 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001644 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001645 }
Alex Deuchera1255102016-10-13 17:41:13 -04001646 adev->ip_blocks[i].status.hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001647 }
1648
1649 return 0;
1650}
1651
1652static int amdgpu_late_init(struct amdgpu_device *adev)
1653{
1654 int i = 0, r;
1655
1656 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001657 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001658 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001659 if (adev->ip_blocks[i].version->funcs->late_init) {
1660 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001661 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001662 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1663 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001664 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001665 }
Alex Deuchera1255102016-10-13 17:41:13 -04001666 adev->ip_blocks[i].status.late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001667 }
Alex Deucher4a446d52016-10-07 14:48:18 -04001668 /* skip CG for VCE/UVD, it's handled specially */
Alex Deuchera1255102016-10-13 17:41:13 -04001669 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1670 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
Alex Deucher4a446d52016-10-07 14:48:18 -04001671 /* enable clockgating to save power */
Alex Deuchera1255102016-10-13 17:41:13 -04001672 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1673 AMD_CG_STATE_GATE);
Alex Deucher4a446d52016-10-07 14:48:18 -04001674 if (r) {
1675 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001676 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher4a446d52016-10-07 14:48:18 -04001677 return r;
1678 }
Arindam Nathb0b00ff2016-10-07 19:01:37 +05301679 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001680 }
1681
1682 return 0;
1683}
1684
1685static int amdgpu_fini(struct amdgpu_device *adev)
1686{
1687 int i, r;
1688
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001689 /* need to disable SMC first */
1690 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001691 if (!adev->ip_blocks[i].status.hw)
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001692 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001693 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001694 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
Alex Deuchera1255102016-10-13 17:41:13 -04001695 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1696 AMD_CG_STATE_UNGATE);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001697 if (r) {
1698 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001699 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001700 return r;
1701 }
Alex Deuchera1255102016-10-13 17:41:13 -04001702 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001703 /* XXX handle errors */
1704 if (r) {
1705 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001706 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001707 }
Alex Deuchera1255102016-10-13 17:41:13 -04001708 adev->ip_blocks[i].status.hw = false;
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001709 break;
1710 }
1711 }
1712
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001713 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001714 if (!adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001715 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001716 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001717 amdgpu_wb_fini(adev);
1718 amdgpu_vram_scratch_fini(adev);
1719 }
Rex Zhu8201a672016-11-24 21:44:44 +08001720
1721 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1722 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1723 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1724 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1725 AMD_CG_STATE_UNGATE);
1726 if (r) {
1727 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1728 adev->ip_blocks[i].version->funcs->name, r);
1729 return r;
1730 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001731 }
Rex Zhu8201a672016-11-24 21:44:44 +08001732
Alex Deuchera1255102016-10-13 17:41:13 -04001733 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001734 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001735 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001736 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1737 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001738 }
Rex Zhu8201a672016-11-24 21:44:44 +08001739
Alex Deuchera1255102016-10-13 17:41:13 -04001740 adev->ip_blocks[i].status.hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001741 }
1742
1743 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001744 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001745 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001746 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001747 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001748 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001749 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1750 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001751 }
Alex Deuchera1255102016-10-13 17:41:13 -04001752 adev->ip_blocks[i].status.sw = false;
1753 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001754 }
1755
Monk Liua6dcfd92016-05-19 14:36:34 +08001756 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001757 if (!adev->ip_blocks[i].status.late_initialized)
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001758 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001759 if (adev->ip_blocks[i].version->funcs->late_fini)
1760 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1761 adev->ip_blocks[i].status.late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001762 }
1763
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001764 if (amdgpu_sriov_vf(adev)) {
Monk Liu24936642017-01-09 15:54:32 +08001765 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001766 amdgpu_virt_release_full_gpu(adev, false);
1767 }
Monk Liu24936642017-01-09 15:54:32 +08001768
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001769 return 0;
1770}
1771
Alex Deucherfaefba92016-12-06 10:38:29 -05001772int amdgpu_suspend(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001773{
1774 int i, r;
1775
Xiangliang Yue941ea92017-01-18 12:47:55 +08001776 if (amdgpu_sriov_vf(adev))
1777 amdgpu_virt_request_full_gpu(adev, false);
1778
Flora Cuic5a93a22016-02-26 10:45:25 +08001779 /* ungate SMC block first */
1780 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1781 AMD_CG_STATE_UNGATE);
1782 if (r) {
1783 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1784 }
1785
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001786 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001787 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001788 continue;
1789 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001790 if (i != AMD_IP_BLOCK_TYPE_SMC) {
Alex Deuchera1255102016-10-13 17:41:13 -04001791 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1792 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001793 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001794 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1795 adev->ip_blocks[i].version->funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001796 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001797 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001798 /* XXX handle errors */
Alex Deuchera1255102016-10-13 17:41:13 -04001799 r = adev->ip_blocks[i].version->funcs->suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001800 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001801 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001802 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1803 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001804 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001805 }
1806
Xiangliang Yue941ea92017-01-18 12:47:55 +08001807 if (amdgpu_sriov_vf(adev))
1808 amdgpu_virt_release_full_gpu(adev, false);
1809
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001810 return 0;
1811}
1812
Monk Liue4f0fdc2017-02-09 11:55:49 +08001813static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001814{
1815 int i, r;
1816
Monk Liu2cb681b2017-04-26 12:00:49 +08001817 static enum amd_ip_block_type ip_order[] = {
1818 AMD_IP_BLOCK_TYPE_GMC,
1819 AMD_IP_BLOCK_TYPE_COMMON,
1820 AMD_IP_BLOCK_TYPE_GFXHUB,
1821 AMD_IP_BLOCK_TYPE_MMHUB,
1822 AMD_IP_BLOCK_TYPE_IH,
1823 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001824
Monk Liu2cb681b2017-04-26 12:00:49 +08001825 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1826 int j;
1827 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001828
Monk Liu2cb681b2017-04-26 12:00:49 +08001829 for (j = 0; j < adev->num_ip_blocks; j++) {
1830 block = &adev->ip_blocks[j];
1831
1832 if (block->version->type != ip_order[i] ||
1833 !block->status.valid)
1834 continue;
1835
1836 r = block->version->funcs->hw_init(adev);
1837 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001838 }
1839 }
1840
1841 return 0;
1842}
1843
Monk Liue4f0fdc2017-02-09 11:55:49 +08001844static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001845{
1846 int i, r;
1847
Monk Liu2cb681b2017-04-26 12:00:49 +08001848 static enum amd_ip_block_type ip_order[] = {
1849 AMD_IP_BLOCK_TYPE_SMC,
1850 AMD_IP_BLOCK_TYPE_DCE,
1851 AMD_IP_BLOCK_TYPE_GFX,
1852 AMD_IP_BLOCK_TYPE_SDMA,
1853 AMD_IP_BLOCK_TYPE_VCE,
1854 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001855
Monk Liu2cb681b2017-04-26 12:00:49 +08001856 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1857 int j;
1858 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001859
Monk Liu2cb681b2017-04-26 12:00:49 +08001860 for (j = 0; j < adev->num_ip_blocks; j++) {
1861 block = &adev->ip_blocks[j];
1862
1863 if (block->version->type != ip_order[i] ||
1864 !block->status.valid)
1865 continue;
1866
1867 r = block->version->funcs->hw_init(adev);
1868 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liua90ad3c2017-01-23 14:22:08 +08001869 }
1870 }
1871
1872 return 0;
1873}
1874
Chunming Zhoufcf06492017-05-05 10:33:33 +08001875static int amdgpu_resume_phase1(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001876{
1877 int i, r;
1878
1879 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001880 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001881 continue;
Chunming Zhoufcf06492017-05-05 10:33:33 +08001882 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1883 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1884 adev->ip_blocks[i].version->type ==
1885 AMD_IP_BLOCK_TYPE_IH) {
1886 r = adev->ip_blocks[i].version->funcs->resume(adev);
1887 if (r) {
1888 DRM_ERROR("resume of IP block <%s> failed %d\n",
1889 adev->ip_blocks[i].version->funcs->name, r);
1890 return r;
1891 }
1892 }
1893 }
1894
1895 return 0;
1896}
1897
1898static int amdgpu_resume_phase2(struct amdgpu_device *adev)
1899{
1900 int i, r;
1901
1902 for (i = 0; i < adev->num_ip_blocks; i++) {
1903 if (!adev->ip_blocks[i].status.valid)
1904 continue;
1905 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1906 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1907 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1908 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001909 r = adev->ip_blocks[i].version->funcs->resume(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001910 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001911 DRM_ERROR("resume of IP block <%s> failed %d\n",
1912 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001913 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001914 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001915 }
1916
1917 return 0;
1918}
1919
Chunming Zhoufcf06492017-05-05 10:33:33 +08001920static int amdgpu_resume(struct amdgpu_device *adev)
1921{
1922 int r;
1923
1924 r = amdgpu_resume_phase1(adev);
1925 if (r)
1926 return r;
1927 r = amdgpu_resume_phase2(adev);
1928
1929 return r;
1930}
1931
Monk Liu4e99a442016-03-31 13:26:59 +08001932static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04001933{
Alex Deuchera5bde2f2016-09-23 16:23:41 -04001934 if (adev->is_atom_fw) {
1935 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1936 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1937 } else {
1938 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1939 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1940 }
Andres Rodriguez048765a2016-06-11 02:51:32 -04001941}
1942
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001943/**
1944 * amdgpu_device_init - initialize the driver
1945 *
1946 * @adev: amdgpu_device pointer
1947 * @pdev: drm dev pointer
1948 * @pdev: pci dev pointer
1949 * @flags: driver flags
1950 *
1951 * Initializes the driver info and hw (all asics).
1952 * Returns 0 for success or an error on failure.
1953 * Called at driver startup.
1954 */
1955int amdgpu_device_init(struct amdgpu_device *adev,
1956 struct drm_device *ddev,
1957 struct pci_dev *pdev,
1958 uint32_t flags)
1959{
1960 int r, i;
1961 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02001962 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001963
1964 adev->shutdown = false;
1965 adev->dev = &pdev->dev;
1966 adev->ddev = ddev;
1967 adev->pdev = pdev;
1968 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08001969 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001970 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1971 adev->mc.gtt_size = 512 * 1024 * 1024;
1972 adev->accel_working = false;
1973 adev->num_rings = 0;
1974 adev->mman.buffer_funcs = NULL;
1975 adev->mman.buffer_funcs_ring = NULL;
1976 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01001977 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001978 adev->gart.gart_funcs = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001979 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001980
1981 adev->smc_rreg = &amdgpu_invalid_rreg;
1982 adev->smc_wreg = &amdgpu_invalid_wreg;
1983 adev->pcie_rreg = &amdgpu_invalid_rreg;
1984 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08001985 adev->pciep_rreg = &amdgpu_invalid_rreg;
1986 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001987 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1988 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1989 adev->didt_rreg = &amdgpu_invalid_rreg;
1990 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08001991 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1992 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001993 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1994 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1995
Rex Zhuccdbb202016-06-08 12:47:41 +08001996
Alex Deucher3e39ab92015-06-05 15:04:33 -04001997 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1998 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1999 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002000
2001 /* mutex initialization are all done here so we
2002 * can recall function without having locking issues */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002003 atomic_set(&adev->irq.ih.lock, 0);
Huang Rui0e5ca0d2017-03-03 18:37:23 -05002004 mutex_init(&adev->firmware.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002005 mutex_init(&adev->pm.mutex);
2006 mutex_init(&adev->gfx.gpu_clock_mutex);
2007 mutex_init(&adev->srbm_mutex);
2008 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002009 mutex_init(&adev->mn_lock);
2010 hash_init(adev->mn_hash);
2011
2012 amdgpu_check_arguments(adev);
2013
2014 /* Registers mapping */
2015 /* TODO: block userspace mapping of io register */
2016 spin_lock_init(&adev->mmio_idx_lock);
2017 spin_lock_init(&adev->smc_idx_lock);
2018 spin_lock_init(&adev->pcie_idx_lock);
2019 spin_lock_init(&adev->uvd_ctx_idx_lock);
2020 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08002021 spin_lock_init(&adev->gc_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002022 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02002023 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002024
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08002025 INIT_LIST_HEAD(&adev->shadow_list);
2026 mutex_init(&adev->shadow_list_lock);
2027
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002028 INIT_LIST_HEAD(&adev->gtt_list);
2029 spin_lock_init(&adev->gtt_list_lock);
2030
Ken Wangda69c1612016-01-21 19:08:55 +08002031 if (adev->asic_type >= CHIP_BONAIRE) {
2032 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2033 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2034 } else {
2035 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2036 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2037 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002038
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002039 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2040 if (adev->rmmio == NULL) {
2041 return -ENOMEM;
2042 }
2043 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2044 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2045
Ken Wangda69c1612016-01-21 19:08:55 +08002046 if (adev->asic_type >= CHIP_BONAIRE)
2047 /* doorbell bar mapping */
2048 amdgpu_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002049
2050 /* io port mapping */
2051 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2052 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2053 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2054 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2055 break;
2056 }
2057 }
2058 if (adev->rio_mem == NULL)
Amber Linb64a18c2017-01-04 08:06:58 -05002059 DRM_INFO("PCI I/O BAR is not found.\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002060
2061 /* early init functions */
2062 r = amdgpu_early_init(adev);
2063 if (r)
2064 return r;
2065
2066 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2067 /* this will fail for cards that aren't VGA class devices, just
2068 * ignore it */
2069 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2070
2071 if (amdgpu_runtime_pm == 1)
2072 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04002073 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002074 runtime = true;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002075 if (!pci_is_thunderbolt_attached(adev->pdev))
2076 vga_switcheroo_register_client(adev->pdev,
2077 &amdgpu_switcheroo_ops, runtime);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002078 if (runtime)
2079 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2080
2081 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04002082 if (!amdgpu_get_bios(adev)) {
2083 r = -EINVAL;
2084 goto failed;
2085 }
Nils Wallméniusf7e9e9f2016-12-14 21:52:45 +01002086
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002087 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002088 if (r) {
2089 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002090 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002091 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002092
Monk Liu4e99a442016-03-31 13:26:59 +08002093 /* detect if we are with an SRIOV vbios */
2094 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04002095
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002096 /* Post card if necessary */
Monk Liubec86372016-09-14 19:38:08 +08002097 if (amdgpu_vpost_needed(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002098 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08002099 dev_err(adev->dev, "no vBIOS found\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002100 r = -EINVAL;
2101 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002102 }
Monk Liubec86372016-09-14 19:38:08 +08002103 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08002104 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2105 if (r) {
2106 dev_err(adev->dev, "gpu post error!\n");
2107 goto failed;
2108 }
2109 } else {
2110 DRM_INFO("GPU post is not needed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002111 }
2112
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002113 if (!adev->is_atom_fw) {
2114 /* Initialize clocks */
2115 r = amdgpu_atombios_get_clock_info(adev);
2116 if (r) {
2117 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2118 return r;
2119 }
2120 /* init i2c buses */
2121 amdgpu_atombios_i2c_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002122 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002123
2124 /* Fence driver */
2125 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002126 if (r) {
2127 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002128 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002129 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002130
2131 /* init the mode config */
2132 drm_mode_config_init(adev->ddev);
2133
2134 r = amdgpu_init(adev);
2135 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05002136 dev_err(adev->dev, "amdgpu_init failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002137 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002138 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002139 }
2140
2141 adev->accel_working = true;
2142
Marek Olšák95844d22016-08-17 23:49:27 +02002143 /* Initialize the buffer migration limit. */
2144 if (amdgpu_moverate >= 0)
2145 max_MBps = amdgpu_moverate;
2146 else
2147 max_MBps = 8; /* Allow 8 MB/s. */
2148 /* Get a log2 for easy divisions. */
2149 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2150
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002151 r = amdgpu_ib_pool_init(adev);
2152 if (r) {
2153 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002154 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002155 }
2156
2157 r = amdgpu_ib_ring_tests(adev);
2158 if (r)
2159 DRM_ERROR("ib ring test failed (%d).\n", r);
2160
Monk Liu9bc92b92017-02-08 17:38:13 +08002161 amdgpu_fbdev_init(adev);
2162
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002163 r = amdgpu_gem_debugfs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002164 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002165 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002166
2167 r = amdgpu_debugfs_regs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002168 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002169 DRM_ERROR("registering register debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002170
Huang Rui50ab2532016-06-12 15:51:09 +08002171 r = amdgpu_debugfs_firmware_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002172 if (r)
Huang Rui50ab2532016-06-12 15:51:09 +08002173 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
Huang Rui50ab2532016-06-12 15:51:09 +08002174
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002175 if ((amdgpu_testing & 1)) {
2176 if (adev->accel_working)
2177 amdgpu_test_moves(adev);
2178 else
2179 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2180 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002181 if (amdgpu_benchmarking) {
2182 if (adev->accel_working)
2183 amdgpu_benchmark(adev, amdgpu_benchmarking);
2184 else
2185 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2186 }
2187
2188 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2189 * explicit gating rather than handling it automatically.
2190 */
2191 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002192 if (r) {
2193 dev_err(adev->dev, "amdgpu_late_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002194 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002195 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002196
2197 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04002198
2199failed:
2200 if (runtime)
2201 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2202 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002203}
2204
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002205/**
2206 * amdgpu_device_fini - tear down the driver
2207 *
2208 * @adev: amdgpu_device pointer
2209 *
2210 * Tear down the driver info (all asics).
2211 * Called at driver shutdown.
2212 */
2213void amdgpu_device_fini(struct amdgpu_device *adev)
2214{
2215 int r;
2216
2217 DRM_INFO("amdgpu: finishing device.\n");
2218 adev->shutdown = true;
Pixel Dingdb2c2a92017-04-25 16:47:42 +08002219 if (adev->mode_info.mode_config_initialized)
2220 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002221 /* evict vram memory */
2222 amdgpu_bo_evict_vram(adev);
2223 amdgpu_ib_pool_fini(adev);
2224 amdgpu_fence_driver_fini(adev);
2225 amdgpu_fbdev_fini(adev);
2226 r = amdgpu_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002227 adev->accel_working = false;
2228 /* free i2c buses */
2229 amdgpu_i2c_fini(adev);
2230 amdgpu_atombios_fini(adev);
2231 kfree(adev->bios);
2232 adev->bios = NULL;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002233 if (!pci_is_thunderbolt_attached(adev->pdev))
2234 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002235 if (adev->flags & AMD_IS_PX)
2236 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002237 vga_client_register(adev->pdev, NULL, NULL, NULL);
2238 if (adev->rio_mem)
2239 pci_iounmap(adev->pdev, adev->rio_mem);
2240 adev->rio_mem = NULL;
2241 iounmap(adev->rmmio);
2242 adev->rmmio = NULL;
Ken Wangda69c1612016-01-21 19:08:55 +08002243 if (adev->asic_type >= CHIP_BONAIRE)
2244 amdgpu_doorbell_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002245 amdgpu_debugfs_regs_cleanup(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002246}
2247
2248
2249/*
2250 * Suspend & resume.
2251 */
2252/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002253 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002254 *
2255 * @pdev: drm dev pointer
2256 * @state: suspend state
2257 *
2258 * Puts the hw in the suspend state (all asics).
2259 * Returns 0 for success or an error on failure.
2260 * Called at driver suspend.
2261 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002262int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002263{
2264 struct amdgpu_device *adev;
2265 struct drm_crtc *crtc;
2266 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002267 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002268
2269 if (dev == NULL || dev->dev_private == NULL) {
2270 return -ENODEV;
2271 }
2272
2273 adev = dev->dev_private;
2274
2275 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2276 return 0;
2277
2278 drm_kms_helper_poll_disable(dev);
2279
2280 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002281 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002282 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2283 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2284 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002285 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002286
Alex Deucher756e6882015-10-08 00:03:36 -04002287 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002288 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04002289 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002290 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2291 struct amdgpu_bo *robj;
2292
Alex Deucher756e6882015-10-08 00:03:36 -04002293 if (amdgpu_crtc->cursor_bo) {
2294 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002295 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002296 if (r == 0) {
2297 amdgpu_bo_unpin(aobj);
2298 amdgpu_bo_unreserve(aobj);
2299 }
2300 }
2301
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002302 if (rfb == NULL || rfb->obj == NULL) {
2303 continue;
2304 }
2305 robj = gem_to_amdgpu_bo(rfb->obj);
2306 /* don't unpin kernel fb objects */
2307 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
Alex Xie7a6901d2017-04-24 13:52:41 -04002308 r = amdgpu_bo_reserve(robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002309 if (r == 0) {
2310 amdgpu_bo_unpin(robj);
2311 amdgpu_bo_unreserve(robj);
2312 }
2313 }
2314 }
2315 /* evict vram memory */
2316 amdgpu_bo_evict_vram(adev);
2317
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002318 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002319
2320 r = amdgpu_suspend(adev);
2321
Alex Deuchera0a71e42016-10-10 12:41:36 -04002322 /* evict remaining vram memory
2323 * This second call to evict vram is to evict the gart page table
2324 * using the CPU.
2325 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002326 amdgpu_bo_evict_vram(adev);
2327
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002328 if (adev->is_atom_fw)
2329 amdgpu_atomfirmware_scratch_regs_save(adev);
2330 else
2331 amdgpu_atombios_scratch_regs_save(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002332 pci_save_state(dev->pdev);
2333 if (suspend) {
2334 /* Shut down the device */
2335 pci_disable_device(dev->pdev);
2336 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08002337 } else {
2338 r = amdgpu_asic_reset(adev);
2339 if (r)
2340 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002341 }
2342
2343 if (fbcon) {
2344 console_lock();
2345 amdgpu_fbdev_set_suspend(adev, 1);
2346 console_unlock();
2347 }
2348 return 0;
2349}
2350
2351/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002352 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002353 *
2354 * @pdev: drm dev pointer
2355 *
2356 * Bring the hw back to operating state (all asics).
2357 * Returns 0 for success or an error on failure.
2358 * Called at driver resume.
2359 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002360int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002361{
2362 struct drm_connector *connector;
2363 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04002364 struct drm_crtc *crtc;
Huang Rui03161a62017-04-13 16:12:26 +08002365 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002366
2367 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2368 return 0;
2369
jimqu74b0b152016-09-07 17:09:12 +08002370 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002371 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08002372
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002373 if (resume) {
2374 pci_set_power_state(dev->pdev, PCI_D0);
2375 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08002376 r = pci_enable_device(dev->pdev);
Huang Rui03161a62017-04-13 16:12:26 +08002377 if (r)
2378 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002379 }
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002380 if (adev->is_atom_fw)
2381 amdgpu_atomfirmware_scratch_regs_restore(adev);
2382 else
2383 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002384
2385 /* post card */
Jim Quc836fec2017-02-10 15:59:59 +08002386 if (amdgpu_need_post(adev)) {
jimqu74b0b152016-09-07 17:09:12 +08002387 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2388 if (r)
2389 DRM_ERROR("amdgpu asic init failed\n");
2390 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002391
2392 r = amdgpu_resume(adev);
Rex Zhue6707212017-03-30 13:21:01 +08002393 if (r) {
Flora Cuica198522016-02-04 15:10:08 +08002394 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Huang Rui03161a62017-04-13 16:12:26 +08002395 goto unlock;
Rex Zhue6707212017-03-30 13:21:01 +08002396 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002397 amdgpu_fence_driver_resume(adev);
2398
Flora Cuica198522016-02-04 15:10:08 +08002399 if (resume) {
2400 r = amdgpu_ib_ring_tests(adev);
2401 if (r)
2402 DRM_ERROR("ib ring test failed (%d).\n", r);
2403 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002404
2405 r = amdgpu_late_init(adev);
Huang Rui03161a62017-04-13 16:12:26 +08002406 if (r)
2407 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002408
Alex Deucher756e6882015-10-08 00:03:36 -04002409 /* pin cursors */
2410 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2411 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2412
2413 if (amdgpu_crtc->cursor_bo) {
2414 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002415 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002416 if (r == 0) {
2417 r = amdgpu_bo_pin(aobj,
2418 AMDGPU_GEM_DOMAIN_VRAM,
2419 &amdgpu_crtc->cursor_addr);
2420 if (r != 0)
2421 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2422 amdgpu_bo_unreserve(aobj);
2423 }
2424 }
2425 }
2426
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002427 /* blat the mode back in */
2428 if (fbcon) {
2429 drm_helper_resume_force_mode(dev);
2430 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002431 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002432 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2433 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2434 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002435 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002436 }
2437
2438 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002439
2440 /*
2441 * Most of the connector probing functions try to acquire runtime pm
2442 * refs to ensure that the GPU is powered on when connector polling is
2443 * performed. Since we're calling this from a runtime PM callback,
2444 * trying to acquire rpm refs will cause us to deadlock.
2445 *
2446 * Since we're guaranteed to be holding the rpm lock, it's safe to
2447 * temporarily disable the rpm helpers so this doesn't deadlock us.
2448 */
2449#ifdef CONFIG_PM
2450 dev->dev->power.disable_depth++;
2451#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05002452 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002453#ifdef CONFIG_PM
2454 dev->dev->power.disable_depth--;
2455#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002456
Huang Rui03161a62017-04-13 16:12:26 +08002457 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002458 amdgpu_fbdev_set_suspend(adev, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002459
Huang Rui03161a62017-04-13 16:12:26 +08002460unlock:
2461 if (fbcon)
2462 console_unlock();
2463
2464 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002465}
2466
Chunming Zhou63fbf422016-07-15 11:19:20 +08002467static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2468{
2469 int i;
2470 bool asic_hang = false;
2471
2472 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002473 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002474 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002475 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2476 adev->ip_blocks[i].status.hang =
2477 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2478 if (adev->ip_blocks[i].status.hang) {
2479 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
Chunming Zhou63fbf422016-07-15 11:19:20 +08002480 asic_hang = true;
2481 }
2482 }
2483 return asic_hang;
2484}
2485
Baoyou Xie4d446652016-09-18 22:09:35 +08002486static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002487{
2488 int i, r = 0;
2489
2490 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002491 if (!adev->ip_blocks[i].status.valid)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002492 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002493 if (adev->ip_blocks[i].status.hang &&
2494 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2495 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
Chunming Zhoud31a5012016-07-18 10:04:34 +08002496 if (r)
2497 return r;
2498 }
2499 }
2500
2501 return 0;
2502}
2503
Chunming Zhou35d782f2016-07-15 15:57:13 +08002504static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2505{
Alex Deucherda146d32016-10-13 16:07:03 -04002506 int i;
2507
2508 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002509 if (!adev->ip_blocks[i].status.valid)
Alex Deucherda146d32016-10-13 16:07:03 -04002510 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002511 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2512 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2513 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2514 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2515 if (adev->ip_blocks[i].status.hang) {
Alex Deucherda146d32016-10-13 16:07:03 -04002516 DRM_INFO("Some block need full reset!\n");
2517 return true;
2518 }
2519 }
Chunming Zhou35d782f2016-07-15 15:57:13 +08002520 }
2521 return false;
2522}
2523
2524static int amdgpu_soft_reset(struct amdgpu_device *adev)
2525{
2526 int i, r = 0;
2527
2528 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002529 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002530 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002531 if (adev->ip_blocks[i].status.hang &&
2532 adev->ip_blocks[i].version->funcs->soft_reset) {
2533 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002534 if (r)
2535 return r;
2536 }
2537 }
2538
2539 return 0;
2540}
2541
2542static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2543{
2544 int i, r = 0;
2545
2546 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002547 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002548 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002549 if (adev->ip_blocks[i].status.hang &&
2550 adev->ip_blocks[i].version->funcs->post_soft_reset)
2551 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002552 if (r)
2553 return r;
2554 }
2555
2556 return 0;
2557}
2558
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002559bool amdgpu_need_backup(struct amdgpu_device *adev)
2560{
2561 if (adev->flags & AMD_IS_APU)
2562 return false;
2563
2564 return amdgpu_lockup_timeout > 0 ? true : false;
2565}
2566
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002567static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2568 struct amdgpu_ring *ring,
2569 struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +01002570 struct dma_fence **fence)
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002571{
2572 uint32_t domain;
2573 int r;
2574
Roger.He23d2e502017-04-21 14:24:26 +08002575 if (!bo->shadow)
2576 return 0;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002577
Alex Xie1d284792017-04-24 13:53:04 -04002578 r = amdgpu_bo_reserve(bo, true);
Roger.He23d2e502017-04-21 14:24:26 +08002579 if (r)
2580 return r;
2581 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2582 /* if bo has been evicted, then no need to recover */
2583 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
Roger.He82521312017-04-21 13:08:43 +08002584 r = amdgpu_bo_validate(bo->shadow);
2585 if (r) {
2586 DRM_ERROR("bo validate failed!\n");
2587 goto err;
2588 }
2589
2590 r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
2591 if (r) {
2592 DRM_ERROR("%p bind failed\n", bo->shadow);
2593 goto err;
2594 }
2595
Roger.He23d2e502017-04-21 14:24:26 +08002596 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002597 NULL, fence, true);
Roger.He23d2e502017-04-21 14:24:26 +08002598 if (r) {
2599 DRM_ERROR("recover page table failed!\n");
2600 goto err;
2601 }
2602 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002603err:
Roger.He23d2e502017-04-21 14:24:26 +08002604 amdgpu_bo_unreserve(bo);
2605 return r;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002606}
2607
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002608/**
Monk Liua90ad3c2017-01-23 14:22:08 +08002609 * amdgpu_sriov_gpu_reset - reset the asic
2610 *
2611 * @adev: amdgpu device pointer
2612 * @voluntary: if this reset is requested by guest.
2613 * (true means by guest and false means by HYPERVISOR )
2614 *
2615 * Attempt the reset the GPU if it has hung (all asics).
2616 * for SRIOV case.
2617 * Returns 0 for success or an error on failure.
2618 */
2619int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
2620{
2621 int i, r = 0;
2622 int resched;
2623 struct amdgpu_bo *bo, *tmp;
2624 struct amdgpu_ring *ring;
2625 struct dma_fence *fence = NULL, *next = NULL;
2626
Monk Liu147b5982017-01-25 15:48:01 +08002627 mutex_lock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002628 atomic_inc(&adev->gpu_reset_counter);
Monk Liu1fb37a32017-01-26 15:36:37 +08002629 adev->gfx.in_reset = true;
Monk Liua90ad3c2017-01-23 14:22:08 +08002630
2631 /* block TTM */
2632 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2633
2634 /* block scheduler */
2635 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2636 ring = adev->rings[i];
2637
2638 if (!ring || !ring->sched.thread)
2639 continue;
2640
2641 kthread_park(ring->sched.thread);
2642 amd_sched_hw_job_reset(&ring->sched);
2643 }
2644
2645 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2646 amdgpu_fence_driver_force_completion(adev);
2647
2648 /* request to take full control of GPU before re-initialization */
2649 if (voluntary)
2650 amdgpu_virt_reset_gpu(adev);
2651 else
2652 amdgpu_virt_request_full_gpu(adev, true);
2653
2654
2655 /* Resume IP prior to SMC */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002656 amdgpu_sriov_reinit_early(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002657
2658 /* we need recover gart prior to run SMC/CP/SDMA resume */
2659 amdgpu_ttm_recover_gart(adev);
2660
2661 /* now we are okay to resume SMC/CP/SDMA */
Monk Liue4f0fdc2017-02-09 11:55:49 +08002662 amdgpu_sriov_reinit_late(adev);
Monk Liua90ad3c2017-01-23 14:22:08 +08002663
2664 amdgpu_irq_gpu_reset_resume_helper(adev);
2665
2666 if (amdgpu_ib_ring_tests(adev))
2667 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2668
2669 /* release full control of GPU after ib test */
2670 amdgpu_virt_release_full_gpu(adev, true);
2671
2672 DRM_INFO("recover vram bo from shadow\n");
2673
2674 ring = adev->mman.buffer_funcs_ring;
2675 mutex_lock(&adev->shadow_list_lock);
2676 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002677 next = NULL;
Monk Liua90ad3c2017-01-23 14:22:08 +08002678 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2679 if (fence) {
2680 r = dma_fence_wait(fence, false);
2681 if (r) {
2682 WARN(r, "recovery from shadow isn't completed\n");
2683 break;
2684 }
2685 }
2686
2687 dma_fence_put(fence);
2688 fence = next;
2689 }
2690 mutex_unlock(&adev->shadow_list_lock);
2691
2692 if (fence) {
2693 r = dma_fence_wait(fence, false);
2694 if (r)
2695 WARN(r, "recovery from shadow isn't completed\n");
2696 }
2697 dma_fence_put(fence);
2698
2699 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2700 struct amdgpu_ring *ring = adev->rings[i];
2701 if (!ring || !ring->sched.thread)
2702 continue;
2703
2704 amd_sched_job_recovery(&ring->sched);
2705 kthread_unpark(ring->sched.thread);
2706 }
2707
2708 drm_helper_resume_force_mode(adev->ddev);
2709 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2710 if (r) {
2711 /* bad news, how to tell it to userspace ? */
2712 dev_info(adev->dev, "GPU reset failed\n");
2713 }
2714
Monk Liu1fb37a32017-01-26 15:36:37 +08002715 adev->gfx.in_reset = false;
Monk Liu147b5982017-01-25 15:48:01 +08002716 mutex_unlock(&adev->virt.lock_reset);
Monk Liua90ad3c2017-01-23 14:22:08 +08002717 return r;
2718}
2719
2720/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002721 * amdgpu_gpu_reset - reset the asic
2722 *
2723 * @adev: amdgpu device pointer
2724 *
2725 * Attempt the reset the GPU if it has hung (all asics).
2726 * Returns 0 for success or an error on failure.
2727 */
2728int amdgpu_gpu_reset(struct amdgpu_device *adev)
2729{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002730 int i, r;
2731 int resched;
Chunming Zhou35d782f2016-07-15 15:57:13 +08002732 bool need_full_reset;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002733
Chunming Zhou63fbf422016-07-15 11:19:20 +08002734 if (!amdgpu_check_soft_reset(adev)) {
2735 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2736 return 0;
2737 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002738
Marek Olšákd94aed52015-05-05 21:13:49 +02002739 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002740
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002741 /* block TTM */
2742 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2743
Chunming Zhou0875dc92016-06-12 15:41:58 +08002744 /* block scheduler */
2745 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2746 struct amdgpu_ring *ring = adev->rings[i];
2747
Chunming Zhou51687752017-04-24 17:09:15 +08002748 if (!ring || !ring->sched.thread)
Chunming Zhou0875dc92016-06-12 15:41:58 +08002749 continue;
2750 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002751 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002752 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002753 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2754 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002755
Chunming Zhou35d782f2016-07-15 15:57:13 +08002756 need_full_reset = amdgpu_need_full_reset(adev);
2757
2758 if (!need_full_reset) {
2759 amdgpu_pre_soft_reset(adev);
2760 r = amdgpu_soft_reset(adev);
2761 amdgpu_post_soft_reset(adev);
2762 if (r || amdgpu_check_soft_reset(adev)) {
2763 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2764 need_full_reset = true;
2765 }
2766 }
2767
2768 if (need_full_reset) {
Chunming Zhou35d782f2016-07-15 15:57:13 +08002769 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002770
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002771retry:
Chunming Zhou35d782f2016-07-15 15:57:13 +08002772 /* Disable fb access */
2773 if (adev->mode_info.num_crtc) {
2774 struct amdgpu_mode_mc_save save;
2775 amdgpu_display_stop_mc_access(adev, &save);
2776 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2777 }
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002778 if (adev->is_atom_fw)
2779 amdgpu_atomfirmware_scratch_regs_save(adev);
2780 else
2781 amdgpu_atombios_scratch_regs_save(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002782 r = amdgpu_asic_reset(adev);
Alex Deucherbe34d3b2017-03-03 14:26:51 -05002783 if (adev->is_atom_fw)
2784 amdgpu_atomfirmware_scratch_regs_restore(adev);
2785 else
2786 amdgpu_atombios_scratch_regs_restore(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002787 /* post card */
2788 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002789
Chunming Zhou35d782f2016-07-15 15:57:13 +08002790 if (!r) {
2791 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
Chunming Zhoufcf06492017-05-05 10:33:33 +08002792 r = amdgpu_resume_phase1(adev);
2793 if (r)
2794 goto out;
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002795 r = amdgpu_ttm_recover_gart(adev);
2796 if (r)
Chunming Zhoufcf06492017-05-05 10:33:33 +08002797 goto out;
2798 r = amdgpu_resume_phase2(adev);
2799 if (r)
2800 goto out;
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002801 }
Chunming Zhoufcf06492017-05-05 10:33:33 +08002802 }
2803out:
2804 if (!r) {
2805 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou1f465082016-06-30 15:02:26 +08002806 r = amdgpu_ib_ring_tests(adev);
2807 if (r) {
2808 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002809 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002810 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002811 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002812 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002813 /**
2814 * recovery vm page tables, since we cannot depend on VRAM is
2815 * consistent after gpu full reset.
2816 */
2817 if (need_full_reset && amdgpu_need_backup(adev)) {
2818 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2819 struct amdgpu_bo *bo, *tmp;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002820 struct dma_fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002821
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002822 DRM_INFO("recover vram bo from shadow\n");
2823 mutex_lock(&adev->shadow_list_lock);
2824 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
Monk Liu236763d2017-05-01 16:15:31 +08002825 next = NULL;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002826 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2827 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002828 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002829 if (r) {
Monk Liu1d7b17b2017-01-22 18:52:56 +08002830 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002831 break;
2832 }
2833 }
2834
Chris Wilsonf54d1862016-10-25 13:00:45 +01002835 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002836 fence = next;
2837 }
2838 mutex_unlock(&adev->shadow_list_lock);
2839 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01002840 r = dma_fence_wait(fence, false);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002841 if (r)
Monk Liu1d7b17b2017-01-22 18:52:56 +08002842 WARN(r, "recovery from shadow isn't completed\n");
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002843 }
Chris Wilsonf54d1862016-10-25 13:00:45 +01002844 dma_fence_put(fence);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002845 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002846 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2847 struct amdgpu_ring *ring = adev->rings[i];
Chunming Zhou51687752017-04-24 17:09:15 +08002848
2849 if (!ring || !ring->sched.thread)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002850 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002851
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002852 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002853 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002854 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002855 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08002856 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002857 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou51687752017-04-24 17:09:15 +08002858 if (adev->rings[i] && adev->rings[i]->sched.thread) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08002859 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002860 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002861 }
2862 }
2863
2864 drm_helper_resume_force_mode(adev->ddev);
2865
2866 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
Chunming Zhou6643be62017-05-05 10:50:09 +08002867 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002868 /* bad news, how to tell it to userspace ? */
2869 dev_info(adev->dev, "GPU reset failed\n");
Chunming Zhou6643be62017-05-05 10:50:09 +08002870 else
2871 dev_info(adev->dev, "GPU reset successed!\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002872
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002873 return r;
2874}
2875
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002876void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2877{
2878 u32 mask;
2879 int ret;
2880
Alex Deuchercd474ba2016-02-04 10:21:23 -05002881 if (amdgpu_pcie_gen_cap)
2882 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2883
2884 if (amdgpu_pcie_lane_cap)
2885 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2886
2887 /* covers APUs as well */
2888 if (pci_is_root_bus(adev->pdev->bus)) {
2889 if (adev->pm.pcie_gen_mask == 0)
2890 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2891 if (adev->pm.pcie_mlw_mask == 0)
2892 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002893 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002894 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05002895
2896 if (adev->pm.pcie_gen_mask == 0) {
2897 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2898 if (!ret) {
2899 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2900 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2901 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2902
2903 if (mask & DRM_PCIE_SPEED_25)
2904 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2905 if (mask & DRM_PCIE_SPEED_50)
2906 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2907 if (mask & DRM_PCIE_SPEED_80)
2908 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2909 } else {
2910 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2911 }
2912 }
2913 if (adev->pm.pcie_mlw_mask == 0) {
2914 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2915 if (!ret) {
2916 switch (mask) {
2917 case 32:
2918 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2919 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2920 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2921 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2922 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2923 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2924 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2925 break;
2926 case 16:
2927 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2928 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2929 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2930 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2931 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2932 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2933 break;
2934 case 12:
2935 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2936 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2937 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2938 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2939 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2940 break;
2941 case 8:
2942 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2943 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2944 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2945 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2946 break;
2947 case 4:
2948 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2949 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2950 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2951 break;
2952 case 2:
2953 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2954 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2955 break;
2956 case 1:
2957 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2958 break;
2959 default:
2960 break;
2961 }
2962 } else {
2963 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002964 }
2965 }
2966}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002967
2968/*
2969 * Debugfs
2970 */
2971int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04002972 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002973 unsigned nfiles)
2974{
2975 unsigned i;
2976
2977 for (i = 0; i < adev->debugfs_count; i++) {
2978 if (adev->debugfs[i].files == files) {
2979 /* Already registered */
2980 return 0;
2981 }
2982 }
2983
2984 i = adev->debugfs_count + 1;
2985 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2986 DRM_ERROR("Reached maximum number of debugfs components.\n");
2987 DRM_ERROR("Report so we increase "
2988 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2989 return -EINVAL;
2990 }
2991 adev->debugfs[adev->debugfs_count].files = files;
2992 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2993 adev->debugfs_count = i;
2994#if defined(CONFIG_DEBUG_FS)
2995 drm_debugfs_create_files(files, nfiles,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002996 adev->ddev->primary->debugfs_root,
2997 adev->ddev->primary);
2998#endif
2999 return 0;
3000}
3001
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003002#if defined(CONFIG_DEBUG_FS)
3003
3004static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3005 size_t size, loff_t *pos)
3006{
Al Viro45063092016-12-04 18:24:56 -05003007 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003008 ssize_t result = 0;
3009 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04003010 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04003011 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003012
3013 if (size & 0x3 || *pos & 0x3)
3014 return -EINVAL;
3015
Tom St Denisbd122672016-07-28 09:39:22 -04003016 /* are we reading registers for which a PG lock is necessary? */
3017 pm_pg_lock = (*pos >> 23) & 1;
3018
Tom St Denis566281592016-06-27 11:55:07 -04003019 if (*pos & (1ULL << 62)) {
3020 se_bank = (*pos >> 24) & 0x3FF;
3021 sh_bank = (*pos >> 34) & 0x3FF;
3022 instance_bank = (*pos >> 44) & 0x3FF;
Tom St Denis32977f92016-10-09 07:41:26 -04003023
3024 if (se_bank == 0x3FF)
3025 se_bank = 0xFFFFFFFF;
3026 if (sh_bank == 0x3FF)
3027 sh_bank = 0xFFFFFFFF;
3028 if (instance_bank == 0x3FF)
3029 instance_bank = 0xFFFFFFFF;
Tom St Denis566281592016-06-27 11:55:07 -04003030 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04003031 } else {
3032 use_bank = 0;
3033 }
3034
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003035 *pos &= (1UL << 22) - 1;
Tom St Denisbd122672016-07-28 09:39:22 -04003036
Tom St Denis566281592016-06-27 11:55:07 -04003037 if (use_bank) {
Tom St Denis32977f92016-10-09 07:41:26 -04003038 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3039 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
Tom St Denis566281592016-06-27 11:55:07 -04003040 return -EINVAL;
3041 mutex_lock(&adev->grbm_idx_mutex);
3042 amdgpu_gfx_select_se_sh(adev, se_bank,
3043 sh_bank, instance_bank);
3044 }
3045
Tom St Denisbd122672016-07-28 09:39:22 -04003046 if (pm_pg_lock)
3047 mutex_lock(&adev->pm.mutex);
3048
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003049 while (size) {
3050 uint32_t value;
3051
3052 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04003053 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003054
3055 value = RREG32(*pos >> 2);
3056 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04003057 if (r) {
3058 result = r;
3059 goto end;
3060 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003061
3062 result += 4;
3063 buf += 4;
3064 *pos += 4;
3065 size -= 4;
3066 }
3067
Tom St Denis566281592016-06-27 11:55:07 -04003068end:
3069 if (use_bank) {
3070 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3071 mutex_unlock(&adev->grbm_idx_mutex);
3072 }
3073
Tom St Denisbd122672016-07-28 09:39:22 -04003074 if (pm_pg_lock)
3075 mutex_unlock(&adev->pm.mutex);
3076
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003077 return result;
3078}
3079
3080static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3081 size_t size, loff_t *pos)
3082{
Al Viro45063092016-12-04 18:24:56 -05003083 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003084 ssize_t result = 0;
3085 int r;
Tom St Denis394fdde2016-10-10 07:31:23 -04003086 bool pm_pg_lock, use_bank;
3087 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003088
3089 if (size & 0x3 || *pos & 0x3)
3090 return -EINVAL;
3091
Tom St Denis394fdde2016-10-10 07:31:23 -04003092 /* are we reading registers for which a PG lock is necessary? */
3093 pm_pg_lock = (*pos >> 23) & 1;
3094
3095 if (*pos & (1ULL << 62)) {
3096 se_bank = (*pos >> 24) & 0x3FF;
3097 sh_bank = (*pos >> 34) & 0x3FF;
3098 instance_bank = (*pos >> 44) & 0x3FF;
3099
3100 if (se_bank == 0x3FF)
3101 se_bank = 0xFFFFFFFF;
3102 if (sh_bank == 0x3FF)
3103 sh_bank = 0xFFFFFFFF;
3104 if (instance_bank == 0x3FF)
3105 instance_bank = 0xFFFFFFFF;
3106 use_bank = 1;
3107 } else {
3108 use_bank = 0;
3109 }
3110
Tom St Denis801a6aa9a62017-03-15 05:34:25 -04003111 *pos &= (1UL << 22) - 1;
Tom St Denis394fdde2016-10-10 07:31:23 -04003112
3113 if (use_bank) {
3114 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3115 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3116 return -EINVAL;
3117 mutex_lock(&adev->grbm_idx_mutex);
3118 amdgpu_gfx_select_se_sh(adev, se_bank,
3119 sh_bank, instance_bank);
3120 }
3121
3122 if (pm_pg_lock)
3123 mutex_lock(&adev->pm.mutex);
3124
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003125 while (size) {
3126 uint32_t value;
3127
3128 if (*pos > adev->rmmio_size)
3129 return result;
3130
3131 r = get_user(value, (uint32_t *)buf);
3132 if (r)
3133 return r;
3134
3135 WREG32(*pos >> 2, value);
3136
3137 result += 4;
3138 buf += 4;
3139 *pos += 4;
3140 size -= 4;
3141 }
3142
Tom St Denis394fdde2016-10-10 07:31:23 -04003143 if (use_bank) {
3144 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3145 mutex_unlock(&adev->grbm_idx_mutex);
3146 }
3147
3148 if (pm_pg_lock)
3149 mutex_unlock(&adev->pm.mutex);
3150
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003151 return result;
3152}
3153
Tom St Denisadcec282016-04-15 13:08:44 -04003154static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3155 size_t size, loff_t *pos)
3156{
Al Viro45063092016-12-04 18:24:56 -05003157 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003158 ssize_t result = 0;
3159 int r;
3160
3161 if (size & 0x3 || *pos & 0x3)
3162 return -EINVAL;
3163
3164 while (size) {
3165 uint32_t value;
3166
3167 value = RREG32_PCIE(*pos >> 2);
3168 r = put_user(value, (uint32_t *)buf);
3169 if (r)
3170 return r;
3171
3172 result += 4;
3173 buf += 4;
3174 *pos += 4;
3175 size -= 4;
3176 }
3177
3178 return result;
3179}
3180
3181static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3182 size_t size, loff_t *pos)
3183{
Al Viro45063092016-12-04 18:24:56 -05003184 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003185 ssize_t result = 0;
3186 int r;
3187
3188 if (size & 0x3 || *pos & 0x3)
3189 return -EINVAL;
3190
3191 while (size) {
3192 uint32_t value;
3193
3194 r = get_user(value, (uint32_t *)buf);
3195 if (r)
3196 return r;
3197
3198 WREG32_PCIE(*pos >> 2, value);
3199
3200 result += 4;
3201 buf += 4;
3202 *pos += 4;
3203 size -= 4;
3204 }
3205
3206 return result;
3207}
3208
3209static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3210 size_t size, loff_t *pos)
3211{
Al Viro45063092016-12-04 18:24:56 -05003212 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003213 ssize_t result = 0;
3214 int r;
3215
3216 if (size & 0x3 || *pos & 0x3)
3217 return -EINVAL;
3218
3219 while (size) {
3220 uint32_t value;
3221
3222 value = RREG32_DIDT(*pos >> 2);
3223 r = put_user(value, (uint32_t *)buf);
3224 if (r)
3225 return r;
3226
3227 result += 4;
3228 buf += 4;
3229 *pos += 4;
3230 size -= 4;
3231 }
3232
3233 return result;
3234}
3235
3236static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3237 size_t size, loff_t *pos)
3238{
Al Viro45063092016-12-04 18:24:56 -05003239 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003240 ssize_t result = 0;
3241 int r;
3242
3243 if (size & 0x3 || *pos & 0x3)
3244 return -EINVAL;
3245
3246 while (size) {
3247 uint32_t value;
3248
3249 r = get_user(value, (uint32_t *)buf);
3250 if (r)
3251 return r;
3252
3253 WREG32_DIDT(*pos >> 2, value);
3254
3255 result += 4;
3256 buf += 4;
3257 *pos += 4;
3258 size -= 4;
3259 }
3260
3261 return result;
3262}
3263
3264static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3265 size_t size, loff_t *pos)
3266{
Al Viro45063092016-12-04 18:24:56 -05003267 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003268 ssize_t result = 0;
3269 int r;
3270
3271 if (size & 0x3 || *pos & 0x3)
3272 return -EINVAL;
3273
3274 while (size) {
3275 uint32_t value;
3276
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003277 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04003278 r = put_user(value, (uint32_t *)buf);
3279 if (r)
3280 return r;
3281
3282 result += 4;
3283 buf += 4;
3284 *pos += 4;
3285 size -= 4;
3286 }
3287
3288 return result;
3289}
3290
3291static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3292 size_t size, loff_t *pos)
3293{
Al Viro45063092016-12-04 18:24:56 -05003294 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denisadcec282016-04-15 13:08:44 -04003295 ssize_t result = 0;
3296 int r;
3297
3298 if (size & 0x3 || *pos & 0x3)
3299 return -EINVAL;
3300
3301 while (size) {
3302 uint32_t value;
3303
3304 r = get_user(value, (uint32_t *)buf);
3305 if (r)
3306 return r;
3307
Tom St Denis6fc0dea2016-08-29 08:39:29 -04003308 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04003309
3310 result += 4;
3311 buf += 4;
3312 *pos += 4;
3313 size -= 4;
3314 }
3315
3316 return result;
3317}
3318
Tom St Denis1e051412016-06-27 09:57:18 -04003319static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3320 size_t size, loff_t *pos)
3321{
Al Viro45063092016-12-04 18:24:56 -05003322 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis1e051412016-06-27 09:57:18 -04003323 ssize_t result = 0;
3324 int r;
3325 uint32_t *config, no_regs = 0;
3326
3327 if (size & 0x3 || *pos & 0x3)
3328 return -EINVAL;
3329
Markus Elfringecab7662016-09-18 17:00:52 +02003330 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
Tom St Denis1e051412016-06-27 09:57:18 -04003331 if (!config)
3332 return -ENOMEM;
3333
3334 /* version, increment each time something is added */
Tom St Denis9a999352017-01-18 13:01:25 -05003335 config[no_regs++] = 3;
Tom St Denis1e051412016-06-27 09:57:18 -04003336 config[no_regs++] = adev->gfx.config.max_shader_engines;
3337 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3338 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3339 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3340 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3341 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3342 config[no_regs++] = adev->gfx.config.max_gprs;
3343 config[no_regs++] = adev->gfx.config.max_gs_threads;
3344 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3345 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3346 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3347 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3348 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3349 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3350 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3351 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3352 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3353 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3354 config[no_regs++] = adev->gfx.config.num_gpus;
3355 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3356 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3357 config[no_regs++] = adev->gfx.config.gb_addr_config;
3358 config[no_regs++] = adev->gfx.config.num_rbs;
3359
Tom St Denis89a8f302016-08-12 15:14:31 -04003360 /* rev==1 */
3361 config[no_regs++] = adev->rev_id;
3362 config[no_regs++] = adev->pg_flags;
3363 config[no_regs++] = adev->cg_flags;
3364
Tom St Denise9f11dc2016-08-17 12:00:51 -04003365 /* rev==2 */
3366 config[no_regs++] = adev->family;
3367 config[no_regs++] = adev->external_rev_id;
3368
Tom St Denis9a999352017-01-18 13:01:25 -05003369 /* rev==3 */
3370 config[no_regs++] = adev->pdev->device;
3371 config[no_regs++] = adev->pdev->revision;
3372 config[no_regs++] = adev->pdev->subsystem_device;
3373 config[no_regs++] = adev->pdev->subsystem_vendor;
3374
Tom St Denis1e051412016-06-27 09:57:18 -04003375 while (size && (*pos < no_regs * 4)) {
3376 uint32_t value;
3377
3378 value = config[*pos >> 2];
3379 r = put_user(value, (uint32_t *)buf);
3380 if (r) {
3381 kfree(config);
3382 return r;
3383 }
3384
3385 result += 4;
3386 buf += 4;
3387 *pos += 4;
3388 size -= 4;
3389 }
3390
3391 kfree(config);
3392 return result;
3393}
3394
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003395static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3396 size_t size, loff_t *pos)
3397{
Al Viro45063092016-12-04 18:24:56 -05003398 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003399 int idx, x, outsize, r, valuesize;
3400 uint32_t values[16];
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003401
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003402 if (size & 3 || *pos & 0x3)
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003403 return -EINVAL;
3404
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003405 if (amdgpu_dpm == 0)
3406 return -EINVAL;
3407
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003408 /* convert offset to sensor number */
3409 idx = *pos >> 2;
3410
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003411 valuesize = sizeof(values);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003412 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003413 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
Samuel Pitoiset3cbc6142017-02-15 19:32:29 +01003414 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3415 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3416 &valuesize);
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003417 else
3418 return -EINVAL;
3419
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003420 if (size > valuesize)
3421 return -EINVAL;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003422
Tom St Denis9f8df7d2017-02-09 14:29:01 -05003423 outsize = 0;
3424 x = 0;
3425 if (!r) {
3426 while (size) {
3427 r = put_user(values[x++], (int32_t *)buf);
3428 buf += 4;
3429 size -= 4;
3430 outsize += 4;
3431 }
3432 }
3433
3434 return !r ? outsize : r;
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003435}
Tom St Denis1e051412016-06-27 09:57:18 -04003436
Tom St Denis273d7aa2016-10-11 14:48:55 -04003437static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3438 size_t size, loff_t *pos)
3439{
3440 struct amdgpu_device *adev = f->f_inode->i_private;
3441 int r, x;
3442 ssize_t result=0;
Tom St Denis472259f2016-10-14 09:49:09 -04003443 uint32_t offset, se, sh, cu, wave, simd, data[32];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003444
3445 if (size & 3 || *pos & 3)
3446 return -EINVAL;
3447
3448 /* decode offset */
3449 offset = (*pos & 0x7F);
3450 se = ((*pos >> 7) & 0xFF);
3451 sh = ((*pos >> 15) & 0xFF);
3452 cu = ((*pos >> 23) & 0xFF);
3453 wave = ((*pos >> 31) & 0xFF);
3454 simd = ((*pos >> 37) & 0xFF);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003455
3456 /* switch to the specific se/sh/cu */
3457 mutex_lock(&adev->grbm_idx_mutex);
3458 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3459
3460 x = 0;
Tom St Denis472259f2016-10-14 09:49:09 -04003461 if (adev->gfx.funcs->read_wave_data)
3462 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
Tom St Denis273d7aa2016-10-11 14:48:55 -04003463
3464 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3465 mutex_unlock(&adev->grbm_idx_mutex);
3466
Tom St Denis5ecfb3b2016-10-13 12:15:03 -04003467 if (!x)
3468 return -EINVAL;
3469
Tom St Denis472259f2016-10-14 09:49:09 -04003470 while (size && (offset < x * 4)) {
Tom St Denis273d7aa2016-10-11 14:48:55 -04003471 uint32_t value;
3472
Tom St Denis472259f2016-10-14 09:49:09 -04003473 value = data[offset >> 2];
Tom St Denis273d7aa2016-10-11 14:48:55 -04003474 r = put_user(value, (uint32_t *)buf);
3475 if (r)
3476 return r;
3477
3478 result += 4;
3479 buf += 4;
Tom St Denis472259f2016-10-14 09:49:09 -04003480 offset += 4;
Tom St Denis273d7aa2016-10-11 14:48:55 -04003481 size -= 4;
3482 }
3483
3484 return result;
3485}
3486
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003487static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3488 size_t size, loff_t *pos)
3489{
3490 struct amdgpu_device *adev = f->f_inode->i_private;
3491 int r;
3492 ssize_t result = 0;
3493 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3494
3495 if (size & 3 || *pos & 3)
3496 return -EINVAL;
3497
3498 /* decode offset */
3499 offset = (*pos & 0xFFF); /* in dwords */
3500 se = ((*pos >> 12) & 0xFF);
3501 sh = ((*pos >> 20) & 0xFF);
3502 cu = ((*pos >> 28) & 0xFF);
3503 wave = ((*pos >> 36) & 0xFF);
3504 simd = ((*pos >> 44) & 0xFF);
3505 thread = ((*pos >> 52) & 0xFF);
3506 bank = ((*pos >> 60) & 1);
3507
3508 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3509 if (!data)
3510 return -ENOMEM;
3511
3512 /* switch to the specific se/sh/cu */
3513 mutex_lock(&adev->grbm_idx_mutex);
3514 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3515
3516 if (bank == 0) {
3517 if (adev->gfx.funcs->read_wave_vgprs)
3518 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3519 } else {
3520 if (adev->gfx.funcs->read_wave_sgprs)
3521 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3522 }
3523
3524 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3525 mutex_unlock(&adev->grbm_idx_mutex);
3526
3527 while (size) {
3528 uint32_t value;
3529
3530 value = data[offset++];
3531 r = put_user(value, (uint32_t *)buf);
3532 if (r) {
3533 result = r;
3534 goto err;
3535 }
3536
3537 result += 4;
3538 buf += 4;
3539 size -= 4;
3540 }
3541
3542err:
3543 kfree(data);
3544 return result;
3545}
3546
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003547static const struct file_operations amdgpu_debugfs_regs_fops = {
3548 .owner = THIS_MODULE,
3549 .read = amdgpu_debugfs_regs_read,
3550 .write = amdgpu_debugfs_regs_write,
3551 .llseek = default_llseek
3552};
Tom St Denisadcec282016-04-15 13:08:44 -04003553static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3554 .owner = THIS_MODULE,
3555 .read = amdgpu_debugfs_regs_didt_read,
3556 .write = amdgpu_debugfs_regs_didt_write,
3557 .llseek = default_llseek
3558};
3559static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3560 .owner = THIS_MODULE,
3561 .read = amdgpu_debugfs_regs_pcie_read,
3562 .write = amdgpu_debugfs_regs_pcie_write,
3563 .llseek = default_llseek
3564};
3565static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3566 .owner = THIS_MODULE,
3567 .read = amdgpu_debugfs_regs_smc_read,
3568 .write = amdgpu_debugfs_regs_smc_write,
3569 .llseek = default_llseek
3570};
3571
Tom St Denis1e051412016-06-27 09:57:18 -04003572static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3573 .owner = THIS_MODULE,
3574 .read = amdgpu_debugfs_gca_config_read,
3575 .llseek = default_llseek
3576};
3577
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003578static const struct file_operations amdgpu_debugfs_sensors_fops = {
3579 .owner = THIS_MODULE,
3580 .read = amdgpu_debugfs_sensor_read,
3581 .llseek = default_llseek
3582};
3583
Tom St Denis273d7aa2016-10-11 14:48:55 -04003584static const struct file_operations amdgpu_debugfs_wave_fops = {
3585 .owner = THIS_MODULE,
3586 .read = amdgpu_debugfs_wave_read,
3587 .llseek = default_llseek
3588};
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003589static const struct file_operations amdgpu_debugfs_gpr_fops = {
3590 .owner = THIS_MODULE,
3591 .read = amdgpu_debugfs_gpr_read,
3592 .llseek = default_llseek
3593};
Tom St Denis273d7aa2016-10-11 14:48:55 -04003594
Tom St Denisadcec282016-04-15 13:08:44 -04003595static const struct file_operations *debugfs_regs[] = {
3596 &amdgpu_debugfs_regs_fops,
3597 &amdgpu_debugfs_regs_didt_fops,
3598 &amdgpu_debugfs_regs_pcie_fops,
3599 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04003600 &amdgpu_debugfs_gca_config_fops,
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003601 &amdgpu_debugfs_sensors_fops,
Tom St Denis273d7aa2016-10-11 14:48:55 -04003602 &amdgpu_debugfs_wave_fops,
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003603 &amdgpu_debugfs_gpr_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04003604};
3605
3606static const char *debugfs_regs_names[] = {
3607 "amdgpu_regs",
3608 "amdgpu_regs_didt",
3609 "amdgpu_regs_pcie",
3610 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04003611 "amdgpu_gca_config",
Tom St Denisf2cdaf22016-09-15 10:08:44 -04003612 "amdgpu_sensors",
Tom St Denis273d7aa2016-10-11 14:48:55 -04003613 "amdgpu_wave",
Tom St Denisc5a60ce2016-12-05 11:39:19 -05003614 "amdgpu_gpr",
Tom St Denisadcec282016-04-15 13:08:44 -04003615};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003616
3617static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3618{
3619 struct drm_minor *minor = adev->ddev->primary;
3620 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04003621 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003622
Tom St Denisadcec282016-04-15 13:08:44 -04003623 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3624 ent = debugfs_create_file(debugfs_regs_names[i],
3625 S_IFREG | S_IRUGO, root,
3626 adev, debugfs_regs[i]);
3627 if (IS_ERR(ent)) {
3628 for (j = 0; j < i; j++) {
3629 debugfs_remove(adev->debugfs_regs[i]);
3630 adev->debugfs_regs[i] = NULL;
3631 }
3632 return PTR_ERR(ent);
3633 }
3634
3635 if (!i)
3636 i_size_write(ent->d_inode, adev->rmmio_size);
3637 adev->debugfs_regs[i] = ent;
3638 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003639
3640 return 0;
3641}
3642
3643static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3644{
Tom St Denisadcec282016-04-15 13:08:44 -04003645 unsigned i;
3646
3647 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3648 if (adev->debugfs_regs[i]) {
3649 debugfs_remove(adev->debugfs_regs[i]);
3650 adev->debugfs_regs[i] = NULL;
3651 }
3652 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003653}
3654
3655int amdgpu_debugfs_init(struct drm_minor *minor)
3656{
3657 return 0;
3658}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06003659#else
3660static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3661{
3662 return 0;
3663}
3664static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003665#endif